diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index dc003ca202..69a35f66ac 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,7 +2,6 @@ // Reference Doc: https://code.visualstudio.com/remote/advancedcontainers/overview "name": "OpenIM Dev Environment", // Update the container version when you publish dev-container - "dockerComposeFile": "docker-compose.yml", "build": { "dockerfile": "Dockerfile" }, // Replace with uncommented line below to build your own local copy of the image // "dockerFile": "../docker/Dockerfile-dev", diff --git a/.github/workflows/help-comment-issue.yml b/.github/workflows/help-comment-issue.yml index 73cf105a8e..dce8581978 100644 --- a/.github/workflows/help-comment-issue.yml +++ b/.github/workflows/help-comment-issue.yml @@ -17,6 +17,7 @@ on: issues: types: - labeled + jobs: add-comment: if: github.event.label.name == 'help wanted' || github.event.label.name == 'good first issue' diff --git a/.github/workflows/openimci.yml b/.github/workflows/openimci.yml index dce5649df4..8aa38d941c 100644 --- a/.github/workflows/openimci.yml +++ b/.github/workflows/openimci.yml @@ -23,6 +23,8 @@ on: - "docs/**" - "README.md" - "README_zh-CN.md" + - "**.md" + - "docs/**" - "CONTRIBUTING.md" pull_request: branches: @@ -31,7 +33,8 @@ on: paths-ignore: - "README.md" - "README_zh-CN.md" - - "CONTRIBUTING.md" + - "CONTRIBUTING/**" + - "**.md" - "docs/**" env: @@ -67,6 +70,9 @@ jobs: version: '3.x' # If available, use the latest major version that's compatible repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: OpenIM Scripts Verification(make verify) + run: sudo make verify + - name: Module Operations run: | sudo make tidy @@ -91,13 +97,6 @@ jobs: - name: Cleanup Build run: sudo make clean - - name: Push Changes to Main - uses: stefanzweifel/git-auto-commit-action@v5 - with: - commit_message: "cicd: robot automated Change" - branch: main - continue-on-error: true - - name: Set Current Directory id: set_directory run: echo "::set-output name=directory::$(pwd)" diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index a03eade82e..828d30d7d2 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -78,9 +78,6 @@ jobs: echo "Run unit test and get test coverage successfully" continue-on-error: true - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 - - name: OpenIM verify copyright run: | sudo make verify-copyright diff --git a/.github/workflows/sync-release.yml b/.github/workflows/sync-release.yml index 2a29f6218d..e156e07d80 100644 --- a/.github/workflows/sync-release.yml +++ b/.github/workflows/sync-release.yml @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # https://github.com/BetaHuhn/repo-file-sync-action -name: Synchronize kubecub public code to other repositories +name: Synchronize OpenIM Release Branch Public Code To Other Repositories on: push: paths: @@ -41,3 +41,4 @@ jobs: automerge ASSIGNEES: | kubbot + continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index 6e5c797998..77ed2f881b 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # https://github.com/BetaHuhn/repo-file-sync-action -name: Synchronize kubecub public code to other repositories +name: Synchronize OpenIM Main Branch Public Code To Other Repositories on: push: branches: @@ -36,4 +36,5 @@ jobs: file-sync automerge ASSIGNEES: | - kubbot \ No newline at end of file + kubbot + continue-on-error: true \ No newline at end of file diff --git a/.gitignore b/.gitignore index fb8d428d24..5142fe5513 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,7 @@ deployments/charts/generated-configs/ ### OpenIM Config ### .env config/config.yaml +config/openim.yaml config/alertmanager.yml config/prometheus.yml config/email.tmpl diff --git a/CHANGELOG/CHANGELOG-3.5.0.md b/CHANGELOG/CHANGELOG-3.5.0.md deleted file mode 100644 index 02c28308d8..0000000000 --- a/CHANGELOG/CHANGELOG-3.5.0.md +++ /dev/null @@ -1,15 +0,0 @@ -# Version logging for OpenIM - - - - - - -## [Unreleased] - - - -## [v3.5.0+3.97baaac] - 2024-01-12 - -[Unreleased]: https://github.com/openimsdk/open-im-server/compare/v3.5.0+3.97baaac...HEAD -[v3.5.0+3.97baaac]: https://github.com/openimsdk/open-im-server/compare/v3.5.0+5.950e970...v3.5.0+3.97baaac diff --git a/README.md b/README.md index 551ff27f86..3876fcd7ad 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ OpenIM is a service platform specifically designed for integrating chat, audio-v - 🛡️ API wrapping - 🌐 Connection management - ## 📚 Main Modules: ++ 📚 Main Modules: 1. 🚀 Initialization and Login 2. 👤 User Management @@ -70,11 +70,16 @@ It is built using Golang and supports cross-platform deployment, ensuring a cons ## :rocket: Quick Start +We support many platforms. Here are the addresses for quick experience on the web side: + +👉 **[OpenIM online web demo](https://web-enterprise.rentsoft.cn/)** + 🤲 To facilitate user experience, we offer various deployment solutions. You can choose your deployment method from the list below: + **[Source Code Deployment Guide](https://docs.openim.io/guides/gettingStarted/imSourceCodeDeployment)** + **[Docker Deployment Guide](https://docs.openim.io/guides/gettingStarted/dockerCompose)** + **[Kubernetes Deployment Guide](https://docs.openim.io/guides/gettingStarted/k8s-deployment)** ++ **[Mac Developer Deployment Guide](https://docs.openim.io/guides/gettingstarted/mac-deployment-guide)** ## :hammer_and_wrench: To Start Developing OpenIM diff --git a/config/templates/config.yaml.template b/config/templates/config.yaml.template index 9954a8863b..32ac14361c 100644 --- a/config/templates/config.yaml.template +++ b/config/templates/config.yaml.template @@ -14,7 +14,7 @@ # ----------------------------------------------------------------- # TODO: This config file is the template file -# --| source: deployments/templates/openim.yaml +# --| source: deployments/templates/config.yaml # --| env: scripts/install/environment # --| target: config/config.yaml # ----------------------------------------------------------------- @@ -52,8 +52,8 @@ mongo: # Default MongoDB database name # Maximum connection pool size address: [ 172.28.0.1:37017 ] - database: openIM_v3 - username: root + database: openim_v3 + username: openIM password: openIM123 maxPoolSize: 100 @@ -122,14 +122,14 @@ api: # minio.signEndpoint is minio public network address object: enable: "minio" - apiURL: "http://14.155.64.202:10002" + apiURL: "http://172.28.0.1:10002" minio: bucket: "openim" endpoint: "http://172.28.0.1:10005" accessKeyID: "root" secretAccessKey: "openIM123" sessionToken: '' - signEndpoint: "http://14.155.64.202:10005" + signEndpoint: "http://172.28.0.1:10005" publicRead: false cos: bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com @@ -193,7 +193,7 @@ rpcRegisterName: # Whether to output in json format # Whether to include stack trace in logs log: - storageLocation: /data/workspaces/open-im-server/logs/ + storageLocation: /workspaces/open-im-server/logs/ rotationTime: 24 remainRotationCount: 2 remainLogLevel: 6 @@ -247,6 +247,14 @@ manager: userID: [ "openIM123456", "openIM654321", "openIMAdmin" ] nickname: [ "system1", "system2", "system3" ] +# chatAdmin, use for send notification +# +# Built-in app system notification account ID +# Built-in app system notification account nickname +im-admin: + userID: [ "imAdmin" ] + nickname: [ "imAdmin" ] + # Multi-platform login policy # For each platform(Android, iOS, Windows, Mac, web), only one can be online at a time multiLoginPolicy: 1 @@ -307,21 +315,21 @@ iosPush: # Timeout in seconds # Whether to continue execution if callback fails callback: - url: "" + url: "http://127.0.0.1:10008/callbackExample" beforeSendSingleMsg: enable: false timeout: 5 failedContinue: true beforeUpdateUserInfoEx: - enable: false + enable: false timeout: 5 failedContinue: true afterUpdateUserInfoEx: - enable: false + enable: false timeout: 5 failedContinue: true afterSendSingleMsg: - enable: false + enable: true timeout: 5 failedContinue: true beforeSendGroupMsg: @@ -505,8 +513,8 @@ callback: # The number of Prometheus ports per service needs to correspond to rpcPort # The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh prometheus: - enable: false - grafanaUrl: 172.28.0.1:13000 + enable: true + grafanaUrl: http://172.28.0.1:13000/ apiPrometheusPort: [20100] userPrometheusPort: [ 20110 ] friendPrometheusPort: [ 20120 ] diff --git a/config/templates/env.template b/config/templates/env.template index a606704c5b..b4f9c868de 100644 --- a/config/templates/env.template +++ b/config/templates/env.template @@ -1,4 +1,4 @@ -# Copyright © 2023 OpenIM. All rights reserved. +# Copyright © 2024 OpenIM. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,31 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -# ====================================== -# ========= Basic Configuration ======== -# ====================================== - -# The user for authentication or system operations. -# Default: OPENIM_USER=root -USER=root - -# Password associated with the specified user for authentication. -# Default: PASSWORD=openIM123 -PASSWORD=openIM123 - -# Base URL for the application programming interface (API). -# Default: API_URL=http://172.28.0.1:10002 -API_URL=http://14.155.64.202:10002 - -# Directory path for storing data files or related information. -# Default: DATA_DIR=./ -DATA_DIR=/data/workspaces/open-im-server - -# Choose the appropriate image address, the default is GITHUB image, -# you can choose docker hub, for Chinese users can choose Ali Cloud -# export IMAGE_REGISTRY="ghcr.io/openimsdk" -# export IMAGE_REGISTRY="openim" -# export IMAGE_REGISTRY="registry.cn-hangzhou.aliyuncs.com/openimsdk" +# ----------------------------------------------------------------------------- +# General Configuration +# This section contains general configuration options for the entire environment. +# These options can be set via environment variables. If both environment variables +# and settings in this .env file exist, the environment variables take precedence. +# ----------------------------------------------------------------------------- +# ========================== +# General Configuration +# ========================== +# These settings apply to the overall environment. + +# Data storage directory for persistent data. +# Example: DATA_DIR=/path/to/data +DATA_DIR=/workspaces/open-im-server + +# Docker image registry. Uncomment the preferred one. +# Options: ghcr.io/openimsdk, openim, registry.cn-hangzhou.aliyuncs.com/openimsdk +# IMAGE_REGISTRY="ghcr.io/openimsdk" +# IMAGE_REGISTRY="openim" +# IMAGE_REGISTRY="registry.cn-hangzhou.aliyuncs.com/openimsdk" IMAGE_REGISTRY=ghcr.io/openimsdk # ====================================== @@ -47,10 +42,9 @@ IMAGE_REGISTRY=ghcr.io/openimsdk # Default: DOCKER_BRIDGE_SUBNET=172.28.0.0/16 DOCKER_BRIDGE_SUBNET=172.28.0.0/16 -# Gateway for the Docker network. -# Default: DOCKER_BRIDGE_GATEWAY=172.28.0.1 +# Set and specify the IP addresses of some containers. Generally speaking, +# you do not need to modify these configurations to facilitate debugging DOCKER_BRIDGE_GATEWAY=172.28.0.1 - MONGO_NETWORK_ADDRESS=172.28.0.2 REDIS_NETWORK_ADDRESS=172.28.0.3 KAFKA_NETWORK_ADDRESS=172.28.0.4 @@ -65,45 +59,66 @@ NODE_EXPORTER_NETWORK_ADDRESS=172.28.0.12 OPENIM_ADMIN_FRONT_NETWORK_ADDRESS=172.28.0.13 ALERT_MANAGER_NETWORK_ADDRESS=172.28.0.14 -# =============================================== -# = Component Extension Configuration = -# =============================================== +# ============================================================================== +# Configuration Update Instructions +# ============================================================================== +# This header outlines the methods to update common variables in config.yaml and .env files. +# These instructions are vital for maintaining the OpenIM environment's configuration. +# +# METHOD 1: Regenerate All Configurations +# ---------------------------------------- +# Use this method to regenerate all configurations. +# Steps: +# 1. Delete existing config files: +# - openim-server/config/config.yaml +# - openim-chat/config/config.yaml +# 2. Modify the .env file as required. +# 3. Run 'docker compose up -d'. This will regenerate: +# - config/config.yaml +# +# METHOD 2: Modify Individual Configuration Files +# ----------------------------------------------- +# Use this method to update specific configuration files. +# Steps: +# 1. Modify the .env file as necessary. +# 2. Update the corresponding entries in: +# - config/config.yaml +# 3. Restart the services with 'docker compose up -d'. +# 4. Special Note: If you modify OPENIM_IP, API_OPENIM_PORT, or MINIO_PORT in .env, +# ensure to update the corresponding services and configurations accordingly. +# +# It is essential to follow these methods to ensure consistent and correct application behavior. +# ============================================================================== +# Local IP address of the service. Modify if necessary. +# Example: OPENIM_IP=172.28.0.1, +OPENIM_IP=172.28.0.1 -# ============ Component Extension Configuration ========== # ----- ZooKeeper Configuration ----- -# Address or hostname for the ZooKeeper service. -# Default: ZOOKEEPER_ADDRESS=172.28.0.1 -ZOOKEEPER_ADDRESS=172.28.0.5 - # Port for ZooKeeper service. # Default: ZOOKEEPER_PORT=12181 ZOOKEEPER_PORT=12181 -# ----- MongoDB Configuration ----- -# Address or hostname for the MongoDB service. -# Default: MONGO_ADDRESS=172.28.0.1 -MONGO_ADDRESS=172.28.0.2 - -# Port on which MongoDB service is running. +# MongoDB service port configuration. # Default: MONGO_PORT=37017 # MONGO_PORT=37017 -# Username to authenticate with the MongoDB service. -# Default: MONGO_USERNAME=root -# MONGO_USERNAME=root - -# Password to authenticate with the MongoDB service. +# Password for MongoDB admin user. Used for service authentication. # Default: MONGO_PASSWORD=openIM123 MONGO_PASSWORD=openIM123 -# Name of the database in MongoDB to be used. -# Default: MONGO_DATABASE=openIM_v3 -MONGO_DATABASE=openIM_v3 +# Username for a regular OpenIM user in MongoDB. +# Default: MONGO_OPENIM_USERNAME=openIM +MONGO_OPENIM_USERNAME=openIM + +# Password for a regular OpenIM user in MongoDB. +# Default: MONGO_OPENIM_PASSWORD=openIM123456 +MONGO_OPENIM_PASSWORD=openIM123 + +# Specifies the database name to be used within MongoDB. +# Default: MONGO_DATABASE=openim_v3 +MONGO_DATABASE=openim_v3 # ----- Redis Configuration ----- -# Address or hostname for the Redis service. -# Default: REDIS_ADDRESS=172.28.0.1 -REDIS_ADDRESS=172.28.0.3 # Port on which Redis in-memory data structure store is running. # Default: REDIS_PORT=16379 @@ -113,11 +128,6 @@ REDIS_PORT=16379 # Default: REDIS_PASSWORD=openIM123 REDIS_PASSWORD=openIM123 -# ----- Kafka Configuration ----- -# Address or hostname for the Kafka service. -# Default: KAFKA_ADDRESS=172.28.0.1 -KAFKA_ADDRESS=172.28.0.4 - # Kakfa username to authenticate with the Kafka service. # KAFKA_USERNAME='' @@ -129,20 +139,13 @@ KAFKA_PORT=19094 # Default: KAFKA_LATESTMSG_REDIS_TOPIC=latestMsgToRedis KAFKA_LATESTMSG_REDIS_TOPIC=latestMsgToRedis -# Topic in Kafka for pushing messages (e.g. notifications or updates). -# Default: KAFKA_MSG_PUSH_TOPIC=msgToPush -KAFKA_MSG_PUSH_TOPIC=msgToPush - -# Topic in Kafka for storing offline messages in MongoDB. -# Default: KAFKA_OFFLINEMSG_MONGO_TOPIC=offlineMsgToMongoMysql -KAFKA_OFFLINEMSG_MONGO_TOPIC=offlineMsgToMongoMysql - -# ----- MinIO Configuration ---- -# Address or hostname for the MinIO object storage service. -# Default: MINIO_ADDRESS=172.28.0.1 -MINIO_ADDRESS=172.28.0.6 - -# Port on which MinIO object storage service is running. +# MINIO_PORT +# ---------- +# MINIO_PORT sets the port for the MinIO object storage service. +# Upon changing this port, the MinIO endpoint URLs in the file must be updated +# to reflect this change. The endpoints include both the 'endpoint' and 'signEndpoint' +# under the MinIO configuration. +# # Default: MINIO_PORT=10005 MINIO_PORT=10005 @@ -155,19 +158,11 @@ MINIO_PORT=10005 MINIO_SECRET_KEY=openIM123 # ----- Prometheus Configuration ----- -# Address or hostname for the Prometheus service. -# Default: PROMETHEUS_ADDRESS=172.28.0.1 -PROMETHEUS_ADDRESS=172.28.0.10 - # Port on which Prometheus service is running. # Default: PROMETHEUS_PORT=19090 PROMETHEUS_PORT=19090 # ----- Grafana Configuration ----- -# Address or hostname for the Grafana service. -# Default: GRAFANA_ADDRESS=172.28.0.1 -GRAFANA_ADDRESS=172.28.0.11 - # Port on which Grafana service is running. # Default: GRAFANA_PORT=13000 GRAFANA_PORT=13000 @@ -183,41 +178,34 @@ OPENIM_WEB_PORT=11001 # ====================================== # ========= OpenIM Server ============== # ====================================== - -# Address or hostname for the OpenIM server. -# Default: OPENIM_SERVER_ADDRESS=172.28.0.1 -OPENIM_SERVER_ADDRESS=172.28.0.8 - # Port for the OpenIM WebSockets. # Default: OPENIM_WS_PORT=10001 OPENIM_WS_PORT=10001 -# Port for the OpenIM API. +# API_OPENIM_PORT +# --------------- +# This variable defines the port on which the OpenIM API service will listen. +# When changing this port, it's essential to update the apiURL in the config.yaml file +# to ensure the API service is accessible at the new port. +# # Default: API_OPENIM_PORT=10002 API_OPENIM_PORT=10002 - # ====================================== # ========== OpenIM Chat =============== # ====================================== # Branch name for OpenIM chat. # Default: CHAT_IMAGE_VERSION=main -# https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/version.md CHAT_IMAGE_VERSION=main -# Address or hostname for the OpenIM chat service. -# Default: OPENIM_CHAT_ADDRESS=172.28.0.1 -OPENIM_CHAT_ADDRESS=172.28.0.9 - # Port for the OpenIM chat API. # Default: OPENIM_CHAT_API_PORT=10008 OPENIM_CHAT_API_PORT=10008 -# Directory path for storing data files or related information for OpenIM chat. -# Default: OPENIM_CHAT_DATA_DIR=./openim-chat/main -OPENIM_CHAT_DATA_DIR=./openim-chat/main - +# Port for the OpenIM admin API. +# Default: OPENIM_ADMIN_API_PORT=10009 +OPENIM_ADMIN_API_PORT=10009 # ====================================== # ========== OpenIM Admin ============== @@ -226,10 +214,6 @@ OPENIM_CHAT_DATA_DIR=./openim-chat/main # Branch name for OpenIM server. # Default: SERVER_IMAGE_VERSION=main SERVER_IMAGE_VERSION=main - -# Port for the OpenIM admin API. -# Default: OPENIM_ADMIN_API_PORT=10009 -OPENIM_ADMIN_API_PORT=10009 # Port for the node exporter. # Default: NODE_EXPORTER_PORT=19100 diff --git a/config/templates/open-im-ng-example.conf b/config/templates/open-im-ng-example.conf index 10a2ecb52f..62befa6385 100644 --- a/config/templates/open-im-ng-example.conf +++ b/config/templates/open-im-ng-example.conf @@ -169,4 +169,4 @@ server { proxy_pass http://minio_console_2; # This uses the upstream directive definition to load balance } -} +} \ No newline at end of file diff --git a/config/templates/prometheus-dashboard.yaml b/config/templates/prometheus-dashboard.yaml index e1b569df3e..417f3d343b 100644 --- a/config/templates/prometheus-dashboard.yaml +++ b/config/templates/prometheus-dashboard.yaml @@ -1471,4 +1471,4 @@ "uid": "f5f5de9a-6ec5-499a-841e-6e901c33b1f7", "version": 16, "weekStart": "" -} \ No newline at end of file +} diff --git a/deployments/Readme.md b/deployments/Readme.md index 03f6d112bb..a7b288130d 100644 --- a/deployments/Readme.md +++ b/deployments/Readme.md @@ -165,7 +165,7 @@ export MINIO_ENDPOINT="http://im-minio:9000" export MINIO_SIGN_ENDPOINT="https://openim.server.com/im-minio-api" mkdir ./charts/generated-configs -../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/openim.yaml > ./charts/generated-configs/config.yaml +../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/config.yaml > ./charts/generated-configs/config.yaml cp ../config/notification.yaml ./charts/generated-configs/notification.yaml ../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/helm-image.yaml > ./charts/generated-configs/helm-image.yaml ``` diff --git a/deployments/openim-charts.yaml b/deployments/openim-charts.yaml deleted file mode 100644 index 2b468dad06..0000000000 --- a/deployments/openim-charts.yaml +++ /dev/null @@ -1,1276 +0,0 @@ -# Copyright © 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Source: openim-api/templates/app-cm.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: openim-cm -data: - config.yaml: |+ - notification.yaml: |+ ---- -# Source: openim-api/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-api - labels: - helm.sh/chart: openim-api-0.1.0 - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name ---- -# Source: openim-api/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-api - labels: - helm.sh/chart: openim-api-0.1.0 - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-api - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-api/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-api - labels: - helm.sh/chart: openim-api-0.1.0 - app.kubernetes.io/name: openim-api - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-api - port: - number: 80 ---- -# Source: openim-msggateway/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-msggateway - labels: - helm.sh/chart: openim-msggateway-0.1.0 - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - - port: 88 - targetPort: rpc - protocol: TCP - name: rpc - selector: - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name ---- -# Source: openim-msggateway/templates/deployment.yaml -# Copyright © 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-msggateway - labels: - helm.sh/chart: openim-msggateway-0.1.0 - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-msggateway - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - - name: rpc - containerPort: 88 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-msggateway/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-msggateway - labels: - helm.sh/chart: openim-msggateway-0.1.0 - app.kubernetes.io/name: openim-msggateway - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-msggateway - port: - number: 80 ---- -# Source: openim-msgtransfer/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-msgtransfer - labels: - helm.sh/chart: openim-msgtransfer-0.1.0 - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name ---- -# Source: openim-msgtransfer/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-msgtransfer - labels: - helm.sh/chart: openim-msgtransfer-0.1.0 - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-msgtransfer - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-msgtransfer/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-msgtransfer - labels: - helm.sh/chart: openim-msgtransfer-0.1.0 - app.kubernetes.io/name: openim-msgtransfer - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-msgtransfer - port: - number: 80 ---- -# Source: openim-push/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-push - labels: - helm.sh/chart: openim-push-0.1.0 - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name ---- -# Source: openim-push/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-push - labels: - helm.sh/chart: openim-push-0.1.0 - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-push - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-push/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-push - labels: - helm.sh/chart: openim-push-0.1.0 - app.kubernetes.io/name: openim-push - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-push - port: - number: 80 ---- -# Source: openim-rpc-auth/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-auth - labels: - helm.sh/chart: openim-rpc-auth-0.1.0 - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-auth/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-auth - labels: - helm.sh/chart: openim-rpc-auth-0.1.0 - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-auth - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-auth/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-auth - labels: - helm.sh/chart: openim-rpc-auth-0.1.0 - app.kubernetes.io/name: openim-rpc-auth - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-auth - port: - number: 80 ---- -# Source: openim-rpc-conversation/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-conversation - labels: - helm.sh/chart: openim-rpc-conversation-0.1.0 - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-conversation/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-conversation - labels: - helm.sh/chart: openim-rpc-conversation-0.1.0 - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-conversation - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-conversation/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-conversation - labels: - helm.sh/chart: openim-rpc-conversation-0.1.0 - app.kubernetes.io/name: openim-rpc-conversation - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-conversation - port: - number: 80 ---- -# Source: openim-rpc-friend/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-friend - labels: - helm.sh/chart: openim-rpc-friend-0.1.0 - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-friend/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-friend - labels: - helm.sh/chart: openim-rpc-friend-0.1.0 - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-friend - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-friend/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-friend - labels: - helm.sh/chart: openim-rpc-friend-0.1.0 - app.kubernetes.io/name: openim-rpc-friend - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-friend - port: - number: 80 ---- -# Source: openim-rpc-group/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-group - labels: - helm.sh/chart: openim-rpc-group-0.1.0 - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-group/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-group - labels: - helm.sh/chart: openim-rpc-group-0.1.0 - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-group - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-group/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-group - labels: - helm.sh/chart: openim-rpc-group-0.1.0 - app.kubernetes.io/name: openim-rpc-group - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-group - port: - number: 80 ---- -# Source: openim-rpc-msg/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-msg - labels: - helm.sh/chart: openim-rpc-msg-0.1.0 - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-msg/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-msg - labels: - helm.sh/chart: openim-rpc-msg-0.1.0 - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-msg - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-msg/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-msg - labels: - helm.sh/chart: openim-rpc-msg-0.1.0 - app.kubernetes.io/name: openim-rpc-msg - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-msg - port: - number: 80 ---- -# Source: openim-rpc-third/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-third - labels: - helm.sh/chart: openim-rpc-third-0.1.0 - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-third/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-third - labels: - helm.sh/chart: openim-rpc-third-0.1.0 - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-third - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-third/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-third - labels: - helm.sh/chart: openim-rpc-third-0.1.0 - app.kubernetes.io/name: openim-rpc-third - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-third - port: - number: 80 ---- -# Source: openim-rpc-user/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: release-name-openim-rpc-user - labels: - helm.sh/chart: openim-rpc-user-0.1.0 - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name ---- -# Source: openim-rpc-user/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: release-name-openim-rpc-user - labels: - helm.sh/chart: openim-rpc-user-0.1.0 - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name - template: - metadata: - labels: - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name - spec: - serviceAccountName: default - securityContext: - {} - containers: - - name: openim-rpc-user - securityContext: - {} - image: "registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api:latest" - imagePullPolicy: Always - ports: - - name: http - containerPort: 80 - protocol: TCP - #livenessProbe: - # httpGet: - # path: / - # port: http - #readinessProbe: - # httpGet: - # path: / - # port: http - resources: - {} - volumeMounts: - - mountPath: /openim/openim-server/config/config.yaml - name: config - subPath: config.yaml - - mountPath: /openim/openim-server/config/ - name: config - subPath: notification.yaml - volumes: - - name: config - configMap: - name: openim-cm ---- -# Source: openim-rpc-user/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: release-name-openim-rpc-user - labels: - helm.sh/chart: openim-rpc-user-0.1.0 - app.kubernetes.io/name: openim-rpc-user - app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "1.16.0" - app.kubernetes.io/managed-by: Helm - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /./templates/helm-image.yaml - nginx.ingress.kubernetes.io/use-regex: "true" -spec: - ingressClassName: nginx - tls: - - hosts: - - "openim.server.com" - secretName: webapitls - rules: - - host: "openim.server.com" - http: - paths: - - path: /api(/|$)(.*) - pathType: ImplementationSpecific - backend: - service: - name: release-name-openim-rpc-user - port: - number: 80 diff --git a/deployments/templates/openim.yaml b/deployments/templates/config.yaml similarity index 94% rename from deployments/templates/openim.yaml rename to deployments/templates/config.yaml index 82087bc4d0..b76a1371d1 100644 --- a/deployments/templates/openim.yaml +++ b/deployments/templates/config.yaml @@ -14,7 +14,7 @@ # ----------------------------------------------------------------- # TODO: This config file is the template file -# --| source: deployments/templates/openim.yaml +# --| source: deployments/templates/config.yaml # --| env: scripts/install/environment # --| target: config/config.yaml # ----------------------------------------------------------------- @@ -527,3 +527,39 @@ prometheus: rtcPrometheusPort: [ ${RTC_PROM_PORT} ] thirdPrometheusPort: [ ${THIRD_PROM_PORT} ] messageTransferPrometheusPort: [ ${MSG_TRANSFER_PROM_PORT} ] # List of ports + +###################### LocalCache configuration information ###################### +# topic: redis subscriber channel +# slotNum: number of slots, multiple slots can prevent too many keys from competing for a lock +# slotSize: number of slots, the number of cached keys per slot, the overall cache quantity is slotNum * slotSize +# successExpire: successful cache time seconds +# failedExpire: failed cache time seconds +# disable local caching and annotate topic, slotNum, and slotSize +localCache: + user: + topic: DELETE_CACHE_USER + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + + group: + topic: DELETE_CACHE_GROUP + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + + friend: + topic: DELETE_CACHE_FRIEND + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + + conversation: + topic: DELETE_CACHE_CONVERSATION + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 \ No newline at end of file diff --git a/deployments/templates/env-template.yaml b/deployments/templates/env-template.yaml index cbe900c19d..9b21c8c657 100644 --- a/deployments/templates/env-template.yaml +++ b/deployments/templates/env-template.yaml @@ -115,7 +115,7 @@ MONGO_OPENIM_USERNAME=${MONGO_OPENIM_USERNAME} MONGO_OPENIM_PASSWORD=${MONGO_OPENIM_PASSWORD} # Specifies the database name to be used within MongoDB. -# Default: MONGO_DATABASE=openIM_v3 +# Default: MONGO_DATABASE=openim_v3 MONGO_DATABASE=${MONGO_DATABASE} # ----- Redis Configuration ----- diff --git a/docker-compose.yml b/docker-compose.yml index 6d0f3c25a7..dcf7518e21 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -27,7 +27,7 @@ services: - wiredTigerCacheSizeGB=1 - MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME:-root} - MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD:-openIM123} - - MONGO_INITDB_DATABASE=${MONGO_DATABASE:-openIM_v3} + - MONGO_INITDB_DATABASE=${MONGO_DATABASE:-openim_v3} - MONGO_OPENIM_USERNAME=${MONGO_OPENIM_USERNAME:-openIM} # Non-root username - MONGO_OPENIM_PASSWORD=${MONGO_OPENIM_PASSWORD:-openIM123456} # Non-root password restart: always @@ -87,6 +87,7 @@ services: - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@:9093 - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}:${KAFKA_PORT:-19094} + # - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://127.0.0.1:${KAFKA_PORT:-19094} # Mac Deployment - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER networks: @@ -213,6 +214,7 @@ services: # - "${OPENIM_ADMIN_API_PORT:-10009}:10009" # volumes: # - "${DATA_DIR:-./}/components/openim-chat/logs:/openim/openim-chat/logs" + # - "${DATA_DIR:-./}/components/openim-chat/_output/logs:/openim/openim-chat/_output/logs" # - "${DATA_DIR:-./}/components/openim-chat/config:/openim/openim-chat/config" # restart: always # # user: root:root diff --git a/docs/.generated_docs b/docs/.generated_docs index 0867d721c2..f9b8da6757 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -31,7 +31,7 @@ docs/guide/en-US/cmd/openim/openim-rpc-user_list.md docs/guide/en-US/cmd/openim/openim-rpc-user_update.md docs/guide/en-US/cmd/openim/openim_validate.md docs/guide/en-US/cmd/openim/openim_version.md -docs/guide/en-US/yaml/openim/openim.yaml +docs/guide/en-US/yaml/openim/config.yaml docs/guide/en-US/yaml/openim/openim_color.yaml docs/guide/en-US/yaml/openim/openim_completion.yaml docs/guide/en-US/yaml/openim/openim_info.yaml diff --git a/docs/contrib/environment.md b/docs/contrib/environment.md index 7d672eda56..366a1d94fc 100644 --- a/docs/contrib/environment.md +++ b/docs/contrib/environment.md @@ -89,7 +89,7 @@ While the first two methods will be our main focus, it's worth noting that the t ### 1.2. Source Code Deployment -In the source code deployment method, the configuration generation process involves executing `make init`, which fundamentally runs the script `./scripts/init-config.sh`. This script utilizes variables defined in the [`environment.sh`](https://github.com/openimsdk/open-im-server/blob/main/scripts/install/environment.sh) script to render the [`openim.yaml`](https://github.com/openimsdk/open-im-server/blob/main/deployments/templates/openim.yaml) template file, subsequently generating the [`config.yaml`](https://github.com/openimsdk/open-im-server/blob/main/config/config.yaml) configuration file. +In the source code deployment method, the configuration generation process involves executing `make init`, which fundamentally runs the script `./scripts/init-config.sh`. This script utilizes variables defined in the [`environment.sh`](https://github.com/openimsdk/open-im-server/blob/main/scripts/install/environment.sh) script to render the [`config.yaml`](https://github.com/openimsdk/open-im-server/blob/main/deployments/templates/config.yaml) template file, subsequently generating the [`config.yaml`](https://github.com/openimsdk/open-im-server/blob/main/config/config.yaml) configuration file. ### 1.3. Docker Compose Deployment diff --git a/docs/contrib/mac-developer-deployment-guide.md b/docs/contrib/mac-developer-deployment-guide.md index 69e4e75030..af8d0b768d 100644 --- a/docs/contrib/mac-developer-deployment-guide.md +++ b/docs/contrib/mac-developer-deployment-guide.md @@ -45,86 +45,162 @@ Homebrew is an essential package manager for macOS. Install it using: git config --global user.email "your.email@example.com" ``` -### Forking and Cloning the Repository +### Setting Up the Devcontainer -To optimize and add logic to your instructions, particularly regarding deploying on a Mac, you can modify them as follows: +`Devcontainers` provide a Docker-based isolated development environment. -1. **Fork the OpenIM Repository**: Fork the OpenIM repository on GitHub to your account. +Read [README.md](https://github.com/openimsdk/open-im-server/tree/main/.devcontainer) in the `.devcontainer` directory of the project to learn more about the devcontainer. -2. **Clone Your Fork to Your Local Machine**: - Open your terminal and execute the following commands: +To set it up: - ```sh - # Clone the repository - git clone https://github.com//open-im-server.git +1. Install Docker Desktop for Mac from [Docker Hub](https://docs.docker.com/desktop/install/mac-install/). +2. Install Visual Studio Code and the Remote - Containers extension. +3. Open the cloned OpenIM repository in VS Code. +4. VS Code will prompt to reopen the project in a container. Accept this to set up the environment automatically. - # Set Docker bridging network mode for Mac - export DOCKER_BRIDGE_SUBNET=127.0.0.0/16 +### Installing Go and Dependencies - # Set OpenIM IP - export OPENIM_IP= +Use Homebrew to install Go: - # Initialize configuration - make init +```sh +brew install go +``` - # Start components using Docker - docker compose up -d +Ensure the version of Go is compatible with the version required by OpenIM (refer to the main documentation for version requirements). - # Start OpenIM Server - make start - ``` +### Additional Tools -3. **Additional Steps for Mac Deployment**: - If you are deploying on a Mac and wish to use the chat feature, you need to modify the `docker-compose` file. Specifically, you'll need to uncomment the sections related to `openim-chat` and `mysql`. +Install other required tools like Docker, Vagrant, and necessary GNU utils as described in the main documentation. - Here's how to do it: +## Mac Deployment openim-chat and openim-server - - Open the `docker-compose.yml` file in a text editor. - - Find the sections for `openim-chat` and `mysql`. - - Remove the comment marks (`#`) at the beginning of the lines in these sections to enable them. - - Save the file after making these changes. +To integrate the Chinese document into an English document for Linux deployment, we will first translate the content and then adapt it to suit the Linux environment. Here's how the translated and adapted content might look: -4. **Update and Restart Services**: - After modifying the `docker-compose` file, you need to update and restart the services to apply these changes. Run the following command in your terminal: +### Ensure a Clean Environment - ```sh - # Update and restart services - docker compose up -d +- It's recommended to execute in a new directory. +- Run `ps -ef | grep openim` to ensure no OpenIM processes are running. +- Run `ps -ef | grep chat` to check for absence of chat-related processes. +- Execute `docker ps` to verify there are no related containers running. - # Check openim-chat start - docker compose logs openim-chat - ``` +### Source Code Deployment - This command will re-read the `docker-compose.yml` file, apply the new configuration, and restart the necessary containers. +#### Deploying openim-server -Remember, replacing `` and `` with your actual GitHub username and desired IP address for OpenIM is crucial. These steps should streamline the setup process, particularly for Mac users wishing to use the chat feature. +Source code deployment is slightly more complex because Docker's networking on Linux differs from Mac. -### Setting Up the Devcontainer +```bash +git clone https://github.com/openimsdk/open-im-server +cd open-im-server -`Devcontainers` provide a Docker-based isolated development environment. +export OPENIM_IP="Your IP" # If it's a cloud server, setting might not be needed +make init # Generates configuration files +``` -Read [README.md](https://github.com/openimsdk/open-im-server/tree/main/.devcontainer) in the `.devcontainer` directory of the project to learn more about the devcontainer. +Before deploying openim-server, modify the Kafka logic in the docker-compose.yml file. Replace: -To set it up: +```yaml +- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://${DOCKER_BRIDGE_GATEWAY:-172.28.0.1}:${KAFKA_PORT:-19094} +``` -1. Install Docker Desktop for Mac from [Docker Hub](https://docs.docker.com/desktop/install/mac-install/). -2. Install Visual Studio Code and the Remote - Containers extension. -3. Open the cloned OpenIM repository in VS Code. -4. VS Code will prompt to reopen the project in a container. Accept this to set up the environment automatically. +With: -### Installing Go and Dependencies +```yaml +- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://127.0.0.1:${KAFKA_PORT:-19094} +``` -Use Homebrew to install Go: +Then start the service: -```sh -brew install go +```bash +docker compose up -d ``` -Ensure the version of Go is compatible with the version required by OpenIM (refer to the main documentation for version requirements). +Before starting the openim-server source, set `config/config.yaml` by replacing all instances of `172.28.0.1` with `127.0.0.1`: -### Additional Tools +```bash +vim config/config.yaml -c "%s/172\.28\.0\.1/127.0.0.1/g" -c "wq" +``` -Install other required tools like Docker, Vagrant, and necessary GNU utils as described in the main documentation. +Then start openim-server: + +```bash +make start +``` + +To check the startup: + +```bash +make check +``` + + + +#### Deploying openim-chat + +There are several ways to deploy openim-chat, either by source code or using Docker. + +Navigate back to the parent directory: + +```bash +cd .. +``` + +First, let's look at deploying chat from source: + +```bash +git clone https://github.com/openimsdk/chat +cd chat +make init # Generates configuration files +``` + +If openim-chat has not deployed MySQL, you will need to deploy it. Note that the official Docker Hub for MySQL does not support architectures like ARM, so you can use the newer version of the open-source edition: + +```bash +docker run -d \ + --name mysql \ + -p 13306:3306 \ + -p 3306:33060 \ + -v "$(pwd)/components/mysql/data:/var/lib/mysql" \ + -v "/etc/localtime:/etc/localtime" \ + -e MYSQL_ROOT_PASSWORD="openIM123" \ + --restart always \ + mariadb:10.6 +``` + +Before starting the source code of openim-chat, set `config/config.yaml` by replacing all instances of `172.28.0.1` with `127.0.0.1`: + +```bash +vim config/config.yaml -c "%s/172\.28\.0\.1/127.0.0.1/g" -c "wq" +``` + +Then start openim-chat from source: + +```bash +make start +``` + +To check, ensure the following four processes start successfully: + +```bash +make check +``` + +### Docker Deployment + +Refer to https://github.com/openimsdk/openim-docker for Docker deployment instructions, which can be followed similarly on Linux. + +```bash +git clone https://github.com/openimsdk/openim-docker +cd openim-docker +export OPENIM_IP="Your IP" +make init +docker compose up -d +docker compose logs -f openim-server +docker compose logs -f openim-chat +``` ## GitHub Development Workflow diff --git a/go.mod b/go.mod index 739cf6b8e7..acb825dba9 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.19 require ( firebase.google.com/go v3.13.0+incompatible - github.com/OpenIMSDK/protocol v0.0.48 - github.com/OpenIMSDK/tools v0.0.23 + github.com/OpenIMSDK/protocol v0.0.49 + github.com/OpenIMSDK/tools v0.0.28 github.com/bwmarrin/snowflake v0.3.0 // indirect github.com/dtm-labs/rockscache v0.1.1 github.com/gin-gonic/gin v1.9.1 @@ -18,6 +18,7 @@ require ( github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/mapstructure v1.5.0 github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect + github.com/openimsdk/localcache v0.0.1 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.17.0 github.com/robfig/cron/v3 v3.0.1 @@ -82,6 +83,7 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -157,4 +159,4 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect ) -replace github.com/OpenIMSDK/protocol v0.0.47 => github.com/AndrewZuo01/protocol v0.0.0-20240112093520-fd9c53e27b94 +replace github.com/openimsdk/localcache => ./pkg/localcache diff --git a/go.sum b/go.sum index 659ff30369..0e744ac87b 100644 --- a/go.sum +++ b/go.sum @@ -18,10 +18,10 @@ firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIw github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c= github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= -github.com/OpenIMSDK/protocol v0.0.48 h1:8MIMjyzJRsruYhVv2ZKArFiOveroaofDOb3dlAdgjsw= -github.com/OpenIMSDK/protocol v0.0.48/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y= -github.com/OpenIMSDK/tools v0.0.23 h1:xozfrGzhbpNPlDTap5DLVPk+JfgZ/ZyIj4Cuu3/bm9w= -github.com/OpenIMSDK/tools v0.0.23/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI= +github.com/OpenIMSDK/protocol v0.0.49 h1:wcqJOMBis7f153zNI7V82Fc4WyqA1GanMgXUQgL618k= +github.com/OpenIMSDK/protocol v0.0.49/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y= +github.com/OpenIMSDK/tools v0.0.28 h1:UT0rN1ysCFvsxQXyuxAj2TEkHt4C/sUezy+ChKpgt2Y= +github.com/OpenIMSDK/tools v0.0.28/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= @@ -171,6 +171,8 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= diff --git a/internal/api/route.go b/internal/api/route.go index 3f16d3e509..10907d086d 100644 --- a/internal/api/route.go +++ b/internal/api/route.go @@ -16,6 +16,7 @@ package api import ( "context" + "fmt" "net/http" "github.com/OpenIMSDK/protocol/constant" @@ -43,7 +44,7 @@ import ( ) func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.UniversalClient) *gin.Engine { - discov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) // 默认RPC中间件 + discov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) // 默认RPC中间件 gin.SetMode(gin.ReleaseMode) r := gin.New() if v, ok := binding.Validator.Engine().(*validator.Validate); ok { diff --git a/internal/api/user.go b/internal/api/user.go index 03d22c3540..e7bbd4bfbc 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -201,27 +201,27 @@ func (u *UserApi) GetSubscribeUsersStatus(c *gin.Context) { a2r.Call(user.UserClient.GetSubscribeUsersStatus, u.Client, c) } -// ProcessUserCommandAdd user general function add +// ProcessUserCommandAdd user general function add. func (u *UserApi) ProcessUserCommandAdd(c *gin.Context) { a2r.Call(user.UserClient.ProcessUserCommandAdd, u.Client, c) } -// ProcessUserCommandDelete user general function delete +// ProcessUserCommandDelete user general function delete. func (u *UserApi) ProcessUserCommandDelete(c *gin.Context) { a2r.Call(user.UserClient.ProcessUserCommandDelete, u.Client, c) } -// ProcessUserCommandUpdate user general function update +// ProcessUserCommandUpdate user general function update. func (u *UserApi) ProcessUserCommandUpdate(c *gin.Context) { a2r.Call(user.UserClient.ProcessUserCommandUpdate, u.Client, c) } -// ProcessUserCommandGet user general function get +// ProcessUserCommandGet user general function get. func (u *UserApi) ProcessUserCommandGet(c *gin.Context) { a2r.Call(user.UserClient.ProcessUserCommandGet, u.Client, c) } -// ProcessUserCommandGet user general function get all +// ProcessUserCommandGet user general function get all. func (u *UserApi) ProcessUserCommandGetAll(c *gin.Context) { a2r.Call(user.UserClient.ProcessUserCommandGetAll, u.Client, c) } diff --git a/internal/msggateway/callback.go b/internal/msggateway/callback.go index 7d53817547..d9507c85ef 100644 --- a/internal/msggateway/callback.go +++ b/internal/msggateway/callback.go @@ -105,7 +105,7 @@ func CallbackUserKickOff(ctx context.Context, userID string, platformID int) err // func callbackUserOnline(operationID, userID string, platformID int, token string, isAppBackground bool, connID // string) cbApi.CommonCallbackResp { // callbackResp := cbApi.CommonCallbackResp{OperationID: operationID} -// if !config.Config.Callback.CallbackUserOnline.Enable { +// if !config.Config.Callback.CallbackUserOnline.WithEnable { // return callbackResp // } // callbackUserOnlineReq := cbApi.CallbackUserOnlineReq{ @@ -134,7 +134,7 @@ func CallbackUserKickOff(ctx context.Context, userID string, platformID int) err //} //func callbackUserOffline(operationID, userID string, platformID int, connID string) cbApi.CommonCallbackResp { // callbackResp := cbApi.CommonCallbackResp{OperationID: operationID} -// if !config.Config.Callback.CallbackUserOffline.Enable { +// if !config.Config.Callback.CallbackUserOffline.WithEnable { // return callbackResp // } // callbackOfflineReq := cbApi.CallbackUserOfflineReq{ @@ -161,7 +161,7 @@ func CallbackUserKickOff(ctx context.Context, userID string, platformID int) err //} //func callbackUserKickOff(operationID string, userID string, platformID int) cbApi.CommonCallbackResp { // callbackResp := cbApi.CommonCallbackResp{OperationID: operationID} -// if !config.Config.Callback.CallbackUserKickOff.Enable { +// if !config.Config.Callback.CallbackUserKickOff.WithEnable { // return callbackResp // } // callbackUserKickOffReq := cbApi.CallbackUserKickOffReq{ diff --git a/internal/msggateway/hub_server.go b/internal/msggateway/hub_server.go index 807c4af3bc..abf8342250 100644 --- a/internal/msggateway/hub_server.go +++ b/internal/msggateway/hub_server.go @@ -25,8 +25,6 @@ import ( "github.com/OpenIMSDK/tools/errs" "github.com/OpenIMSDK/tools/log" "github.com/OpenIMSDK/tools/mcontext" - "github.com/OpenIMSDK/tools/utils" - "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" @@ -59,7 +57,7 @@ type Server struct { rpcPort int prometheusPort int LongConnServer LongConnServer - pushTerminal []int + pushTerminal map[int]struct{} } func (s *Server) SetLongConnServer(LongConnServer LongConnServer) { @@ -67,12 +65,15 @@ func (s *Server) SetLongConnServer(LongConnServer LongConnServer) { } func NewServer(rpcPort int, proPort int, longConnServer LongConnServer) *Server { - return &Server{ + s := &Server{ rpcPort: rpcPort, prometheusPort: proPort, LongConnServer: longConnServer, - pushTerminal: []int{constant.IOSPlatformID, constant.AndroidPlatformID}, + pushTerminal: make(map[int]struct{}), } + s.pushTerminal[constant.IOSPlatformID] = struct{}{} + s.pushTerminal[constant.AndroidPlatformID] = struct{}{} + return s } func (s *Server) OnlinePushMsg( @@ -126,13 +127,9 @@ func (s *Server) OnlineBatchPushOneMsg( panic("implement me") } -func (s *Server) SuperGroupOnlineBatchPushOneMsg( - ctx context.Context, - req *msggateway.OnlineBatchPushOneMsgReq, +func (s *Server) SuperGroupOnlineBatchPushOneMsg(ctx context.Context, req *msggateway.OnlineBatchPushOneMsgReq, ) (*msggateway.OnlineBatchPushOneMsgResp, error) { - var singleUserResults []*msggateway.SingleMsgToUserResults - for _, v := range req.PushToUserIDs { var resp []*msggateway.SingleMsgToUserPlatform results := &msggateway.SingleMsgToUserResults{ @@ -153,23 +150,22 @@ func (s *Server) SuperGroupOnlineBatchPushOneMsg( } userPlatform := &msggateway.SingleMsgToUserPlatform{ - RecvID: v, - RecvPlatFormID: int32(client.PlatformID), + PlatFormID: int32(client.PlatformID), } if !client.IsBackground || (client.IsBackground && client.PlatformID != constant.IOSPlatformID) { err := client.PushMessage(ctx, req.MsgData) if err != nil { - userPlatform.ResultCode = -2 + userPlatform.ResultCode = int64(errs.ErrPushMsgErr.Code()) resp = append(resp, userPlatform) } else { - if utils.IsContainInt(client.PlatformID, s.pushTerminal) { + if _, ok := s.pushTerminal[client.PlatformID]; ok { results.OnlinePush = true resp = append(resp, userPlatform) } } } else { - userPlatform.ResultCode = -3 + userPlatform.ResultCode = int64(errs.ErrIOSBackgroundPushErr.Code()) resp = append(resp, userPlatform) } } diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index a8d10799f2..7d692662d3 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -66,7 +66,7 @@ func StartTransfer(prometheusPort int) error { if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil { return err } - client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) + client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) msgModel := cache.NewMsgCacheModel(rdb) msgDocModel := unrelation.NewMsgMongoDriver(mongo.GetDatabase()) msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, msgModel) diff --git a/internal/push/callback.go b/internal/push/callback.go index 99a58fb07f..90f918fc0f 100644 --- a/internal/push/callback.go +++ b/internal/push/callback.go @@ -16,6 +16,7 @@ package push import ( "context" + "encoding/json" "github.com/OpenIMSDK/protocol/constant" "github.com/OpenIMSDK/protocol/sdkws" @@ -136,3 +137,14 @@ func callbackBeforeSuperGroupOnlinePush( } return nil } +func GetContent(msg *sdkws.MsgData) string { + if msg.ContentType >= constant.NotificationBegin && msg.ContentType <= constant.NotificationEnd { + var notification sdkws.NotificationElem + if err := json.Unmarshal(msg.Content, ¬ification); err != nil { + return "" + } + return notification.Detail + } else { + return string(msg.Content) + } +} diff --git a/internal/push/consumer_init.go b/internal/push/consumer_init.go deleted file mode 100644 index b72c32bb13..0000000000 --- a/internal/push/consumer_init.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -type Consumer struct { - pushCh ConsumerHandler - successCount uint64 -} - -func NewConsumer(pusher *Pusher) *Consumer { - return &Consumer{ - pushCh: *NewConsumerHandler(pusher), - } -} - -func (c *Consumer) Start() { - // statistics.NewStatistics(&c.successCount, config.Config.ModuleName.PushName, fmt.Sprintf("%d second push to - // msg_gateway count", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) - go c.pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&c.pushCh) -} diff --git a/internal/push/offlinepush/dummy/push.go b/internal/push/offlinepush/dummy/push.go index f147886d9a..395c2f45e8 100644 --- a/internal/push/offlinepush/dummy/push.go +++ b/internal/push/offlinepush/dummy/push.go @@ -16,17 +16,16 @@ package dummy import ( "context" - - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" ) -func NewClient() *Dummy { +func NewDummy() *Dummy { return &Dummy{} } type Dummy struct { } -func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error { +func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { return nil } diff --git a/internal/push/offlinepush/fcm/push.go b/internal/push/offlinepush/fcm/push.go index 8145d4c170..508c70f559 100644 --- a/internal/push/offlinepush/fcm/push.go +++ b/internal/push/offlinepush/fcm/push.go @@ -16,6 +16,7 @@ package fcm import ( "context" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "path/filepath" firebase "firebase.google.com/go" @@ -25,7 +26,6 @@ import ( "github.com/OpenIMSDK/protocol/constant" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" ) @@ -39,7 +39,7 @@ type Fcm struct { cache cache.MsgModel } -func NewClient(cache cache.MsgModel) *Fcm { +func NewFcm(cache cache.MsgModel) *Fcm { projectRoot := config.GetProjectRoot() credentialsFilePath := filepath.Join(projectRoot, "config", config.Config.Push.Fcm.ServiceAccount) opt := option.WithCredentialsFile(credentialsFilePath) @@ -56,7 +56,7 @@ func NewClient(cache cache.MsgModel) *Fcm { return &Fcm{fcmMsgCli: fcmMsgClient, cache: cache} } -func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error { +func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { // accounts->registrationToken allTokens := make(map[string][]string, 0) for _, account := range userIDs { diff --git a/internal/push/offlinepush/getui/push.go b/internal/push/offlinepush/getui/push.go index b657c9c23c..50308f6aba 100644 --- a/internal/push/offlinepush/getui/push.go +++ b/internal/push/offlinepush/getui/push.go @@ -19,6 +19,7 @@ import ( "crypto/sha256" "encoding/hex" "errors" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "strconv" "sync" "time" @@ -30,7 +31,6 @@ import ( "github.com/OpenIMSDK/tools/mcontext" "github.com/OpenIMSDK/tools/utils/splitter" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" http2 "github.com/openimsdk/open-im-server/v3/pkg/common/http" @@ -55,17 +55,17 @@ const ( taskIDTTL = 1000 * 60 * 60 * 24 ) -type Client struct { +type GeTui struct { cache cache.MsgModel tokenExpireTime int64 taskIDTTL int64 } -func NewClient(cache cache.MsgModel) *Client { - return &Client{cache: cache, tokenExpireTime: tokenExpireTime, taskIDTTL: taskIDTTL} +func NewGeTui(cache cache.MsgModel) *GeTui { + return &GeTui{cache: cache, tokenExpireTime: tokenExpireTime, taskIDTTL: taskIDTTL} } -func (g *Client) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error { +func (g *GeTui) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { token, err := g.cache.GetGetuiToken(ctx) if err != nil { if errs.Unwrap(err) == redis.Nil { @@ -111,7 +111,7 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri return err } -func (g *Client) Auth(ctx context.Context, timeStamp int64) (token string, expireTime int64, err error) { +func (g *GeTui) Auth(ctx context.Context, timeStamp int64) (token string, expireTime int64, err error) { h := sha256.New() h.Write( []byte(config.Config.Push.GeTui.AppKey + strconv.Itoa(int(timeStamp)) + config.Config.Push.GeTui.MasterSecret), @@ -131,7 +131,7 @@ func (g *Client) Auth(ctx context.Context, timeStamp int64) (token string, expir return respAuth.Token, int64(expire), err } -func (g *Client) GetTaskID(ctx context.Context, token string, pushReq PushReq) (string, error) { +func (g *GeTui) GetTaskID(ctx context.Context, token string, pushReq PushReq) (string, error) { respTask := TaskResp{} ttl := int64(1000 * 60 * 5) pushReq.Settings = &Settings{TTL: &ttl} @@ -143,7 +143,7 @@ func (g *Client) GetTaskID(ctx context.Context, token string, pushReq PushReq) ( } // max num is 999. -func (g *Client) batchPush(ctx context.Context, token string, userIDs []string, pushReq PushReq) error { +func (g *GeTui) batchPush(ctx context.Context, token string, userIDs []string, pushReq PushReq) error { taskID, err := g.GetTaskID(ctx, token, pushReq) if err != nil { return err @@ -152,21 +152,21 @@ func (g *Client) batchPush(ctx context.Context, token string, userIDs []string, return g.request(ctx, batchPushURL, pushReq, token, nil) } -func (g *Client) singlePush(ctx context.Context, token, userID string, pushReq PushReq) error { +func (g *GeTui) singlePush(ctx context.Context, token, userID string, pushReq PushReq) error { operationID := mcontext.GetOperationID(ctx) pushReq.RequestID = &operationID pushReq.Audience = &Audience{Alias: []string{userID}} return g.request(ctx, pushURL, pushReq, token, nil) } -func (g *Client) request(ctx context.Context, url string, input any, token string, output any) error { +func (g *GeTui) request(ctx context.Context, url string, input any, token string, output any) error { header := map[string]string{"token": token} resp := &Resp{} resp.Data = output return g.postReturn(ctx, config.Config.Push.GeTui.PushUrl+url, header, input, resp, 3) } -func (g *Client) postReturn( +func (g *GeTui) postReturn( ctx context.Context, url string, header map[string]string, @@ -181,7 +181,7 @@ func (g *Client) postReturn( return output.parseError() } -func (g *Client) getTokenAndSave2Redis(ctx context.Context) (token string, err error) { +func (g *GeTui) getTokenAndSave2Redis(ctx context.Context) (token string, err error) { token, _, err = g.Auth(ctx, time.Now().UnixNano()/1e6) if err != nil { return @@ -193,7 +193,7 @@ func (g *Client) getTokenAndSave2Redis(ctx context.Context) (token string, err e return token, nil } -func (g *Client) GetTaskIDAndSave2Redis(ctx context.Context, token string, pushReq PushReq) (taskID string, err error) { +func (g *GeTui) GetTaskIDAndSave2Redis(ctx context.Context, token string, pushReq PushReq) (taskID string, err error) { pushReq.Settings = &Settings{TTL: &g.taskIDTTL} taskID, err = g.GetTaskID(ctx, token, pushReq) if err != nil { diff --git a/internal/push/offlinepush/jpush/push.go b/internal/push/offlinepush/jpush/push.go index 567269f3c6..f25ff6f4c6 100644 --- a/internal/push/offlinepush/jpush/push.go +++ b/internal/push/offlinepush/jpush/push.go @@ -18,8 +18,8 @@ import ( "context" "encoding/base64" "fmt" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/jpush/body" "github.com/openimsdk/open-im-server/v3/pkg/common/config" http2 "github.com/openimsdk/open-im-server/v3/pkg/common/http" @@ -27,7 +27,7 @@ import ( type JPush struct{} -func NewClient() *JPush { +func NewJPush() *JPush { return &JPush{} } @@ -46,7 +46,7 @@ func (j *JPush) getAuthorization(appKey string, masterSecret string) string { return Authorization } -func (j *JPush) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error { +func (j *JPush) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { var pf body.Platform pf.SetAll() var au body.Audience diff --git a/internal/push/offlinepush/offlinepush_interface.go b/internal/push/offlinepush/offlinepush_interface.go deleted file mode 100644 index a5d4051f90..0000000000 --- a/internal/push/offlinepush/offlinepush_interface.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package offlinepush - -import ( - "context" -) - -// OfflinePusher Offline Pusher. -type OfflinePusher interface { - Push(ctx context.Context, userIDs []string, title, content string, opts *Opts) error -} - -// Opts opts. -type Opts struct { - Signal *Signal - IOSPushSound string - IOSBadgeCount bool - Ex string -} - -// Signal message id. -type Signal struct { - ClientMsgID string -} diff --git a/internal/push/offlinepush/offlinepusher.go b/internal/push/offlinepush/offlinepusher.go new file mode 100644 index 0000000000..83bf8e66e7 --- /dev/null +++ b/internal/push/offlinepush/offlinepusher.go @@ -0,0 +1,52 @@ +// Copyright © 2023 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package offlinepush + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/dummy" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/fcm" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/getui" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/jpush" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" +) + +const ( + GETUI = "getui" + FIREBASE = "fcm" + JPUSH = "jpush" +) + +// OfflinePusher Offline Pusher. +type OfflinePusher interface { + Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error +} + +func NewOfflinePusher(cache cache.MsgModel) OfflinePusher { + var offlinePusher OfflinePusher + switch config.Config.Push.Enable { + case GETUI: + offlinePusher = getui.NewGeTui(cache) + case FIREBASE: + offlinePusher = fcm.NewFcm(cache) + case JPUSH: + offlinePusher = jpush.NewJPush() + default: + offlinePusher = dummy.NewDummy() + } + return offlinePusher +} diff --git a/internal/push/offlinepush/options/options.go b/internal/push/offlinepush/options/options.go new file mode 100644 index 0000000000..056f6b7113 --- /dev/null +++ b/internal/push/offlinepush/options/options.go @@ -0,0 +1,14 @@ +package options + +// Opts opts. +type Opts struct { + Signal *Signal + IOSPushSound string + IOSBadgeCount bool + Ex string +} + +// Signal message id. +type Signal struct { + ClientMsgID string +} diff --git a/internal/push/onlinepusher.go b/internal/push/onlinepusher.go new file mode 100644 index 0000000000..35b9a97b7f --- /dev/null +++ b/internal/push/onlinepusher.go @@ -0,0 +1,211 @@ +package push + +import ( + "context" + "github.com/OpenIMSDK/protocol/msggateway" + "github.com/OpenIMSDK/protocol/sdkws" + "github.com/OpenIMSDK/tools/discoveryregistry" + "github.com/OpenIMSDK/tools/log" + "github.com/OpenIMSDK/tools/utils" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "os" + "sync" +) + +const ( + ENVNAME = "ENVS_DISCOVERY" + KUBERNETES = "k8s" + ZOOKEEPER = "zookeeper" +) + +type OnlinePusher interface { + GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, + pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) + GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, + pushToUserIDs *[]string) []string +} + +type emptyOnlinePUsher struct{} + +func newEmptyOnlinePUsher() *emptyOnlinePUsher { + return &emptyOnlinePUsher{} +} + +func (emptyOnlinePUsher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, + pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { + log.ZWarn(ctx, "emptyOnlinePUsher GetConnsAndOnlinePush", nil) + return nil, nil +} +func (u emptyOnlinePUsher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, + wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { + log.ZWarn(ctx, "emptyOnlinePUsher GetOnlinePushFailedUserIDs", nil) + return nil +} + +func NewOnlinePusher(disCov discoveryregistry.SvcDiscoveryRegistry) OnlinePusher { + var envType string + if value := os.Getenv(ENVNAME); value != "" { + envType = os.Getenv(ENVNAME) + } else { + envType = config.Config.Envs.Discovery + } + switch envType { + case KUBERNETES: + return NewK8sStaticConsistentHash(disCov) + case ZOOKEEPER: + return NewDefaultAllNode(disCov) + default: + return newEmptyOnlinePUsher() + } +} + +type DefaultAllNode struct { + disCov discoveryregistry.SvcDiscoveryRegistry +} + +func NewDefaultAllNode(disCov discoveryregistry.SvcDiscoveryRegistry) *DefaultAllNode { + return &DefaultAllNode{disCov: disCov} +} + +func (d *DefaultAllNode) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, + pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { + conns, err := d.disCov.GetConns(ctx, config.Config.RpcRegisterName.OpenImMessageGatewayName) + log.ZDebug(ctx, "get gateway conn", "conn length", len(conns)) + if err != nil { + return nil, err + } + + var ( + mu sync.Mutex + wg = errgroup.Group{} + input = &msggateway.OnlineBatchPushOneMsgReq{MsgData: msg, PushToUserIDs: pushToUserIDs} + maxWorkers = config.Config.Push.MaxConcurrentWorkers + ) + + if maxWorkers < 3 { + maxWorkers = 3 + } + + wg.SetLimit(maxWorkers) + + // Online push message + for _, conn := range conns { + conn := conn // loop var safe + wg.Go(func() error { + msgClient := msggateway.NewMsgGatewayClient(conn) + reply, err := msgClient.SuperGroupOnlineBatchPushOneMsg(ctx, input) + if err != nil { + return nil + } + + log.ZDebug(ctx, "push result", "reply", reply) + if reply != nil && reply.SinglePushResult != nil { + mu.Lock() + wsResults = append(wsResults, reply.SinglePushResult...) + mu.Unlock() + } + + return nil + }) + } + + _ = wg.Wait() + + // always return nil + return wsResults, nil +} + +func (d *DefaultAllNode) GetOnlinePushFailedUserIDs(_ context.Context, msg *sdkws.MsgData, + wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { + + onlineSuccessUserIDs := []string{msg.SendID} + for _, v := range wsResults { + //message sender do not need offline push + if msg.SendID == v.UserID { + continue + } + // mobile online push success + if v.OnlinePush { + onlineSuccessUserIDs = append(onlineSuccessUserIDs, v.UserID) + } + + } + + return utils.SliceSub(*pushToUserIDs, onlineSuccessUserIDs) +} + +type K8sStaticConsistentHash struct { + disCov discoveryregistry.SvcDiscoveryRegistry +} + +func NewK8sStaticConsistentHash(disCov discoveryregistry.SvcDiscoveryRegistry) *K8sStaticConsistentHash { + return &K8sStaticConsistentHash{disCov: disCov} +} + +func (k *K8sStaticConsistentHash) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, + pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { + + var usersHost = make(map[string][]string) + for _, v := range pushToUserIDs { + tHost, err := k.disCov.GetUserIdHashGatewayHost(ctx, v) + if err != nil { + log.ZError(ctx, "get msg gateway hash error", err) + return nil, err + } + tUsers, tbl := usersHost[tHost] + if tbl { + tUsers = append(tUsers, v) + usersHost[tHost] = tUsers + } else { + usersHost[tHost] = []string{v} + } + } + log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost) + var usersConns = make(map[*grpc.ClientConn][]string) + for host, userIds := range usersHost { + tconn, _ := k.disCov.GetConn(ctx, host) + usersConns[tconn] = userIds + } + var ( + mu sync.Mutex + wg = errgroup.Group{} + maxWorkers = config.Config.Push.MaxConcurrentWorkers + ) + if maxWorkers < 3 { + maxWorkers = 3 + } + wg.SetLimit(maxWorkers) + for conn, userIds := range usersConns { + tcon := conn + tuserIds := userIds + wg.Go(func() error { + input := &msggateway.OnlineBatchPushOneMsgReq{MsgData: msg, PushToUserIDs: tuserIds} + msgClient := msggateway.NewMsgGatewayClient(tcon) + reply, err := msgClient.SuperGroupOnlineBatchPushOneMsg(ctx, input) + if err != nil { + return nil + } + log.ZDebug(ctx, "push result", "reply", reply) + if reply != nil && reply.SinglePushResult != nil { + mu.Lock() + wsResults = append(wsResults, reply.SinglePushResult...) + mu.Unlock() + } + return nil + }) + } + _ = wg.Wait() + return wsResults, nil +} +func (k *K8sStaticConsistentHash) GetOnlinePushFailedUserIDs(_ context.Context, _ *sdkws.MsgData, + wsResults []*msggateway.SingleMsgToUserResults, _ *[]string) []string { + var needOfflinePushUserIDs []string + for _, v := range wsResults { + if !v.OnlinePush { + needOfflinePushUserIDs = append(needOfflinePushUserIDs, v.UserID) + } + } + return needOfflinePushUserIDs +} diff --git a/internal/push/push.go b/internal/push/push.go new file mode 100644 index 0000000000..90e62ae03e --- /dev/null +++ b/internal/push/push.go @@ -0,0 +1,51 @@ +package push + +import ( + "context" + pbpush "github.com/OpenIMSDK/protocol/push" + "github.com/OpenIMSDK/tools/discoveryregistry" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" + "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" + "google.golang.org/grpc" +) + +type pushServer struct { + database controller.PushDatabase + disCov discoveryregistry.SvcDiscoveryRegistry + offlinePusher offlinepush.OfflinePusher + pushCh *ConsumerHandler +} + +func (p pushServer) PushMsg(ctx context.Context, req *pbpush.PushMsgReq) (*pbpush.PushMsgResp, error) { + //todo reserved Interface + return nil, nil +} + +func (p pushServer) DelUserPushToken(ctx context.Context, + req *pbpush.DelUserPushTokenReq) (resp *pbpush.DelUserPushTokenResp, err error) { + if err = p.database.DelFcmToken(ctx, req.UserID, int(req.PlatformID)); err != nil { + return nil, err + } + return &pbpush.DelUserPushTokenResp{}, nil +} + +func Start(disCov discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error { + rdb, err := cache.NewRedis() + if err != nil { + return err + } + cacheModel := cache.NewMsgCacheModel(rdb) + offlinePusher := offlinepush.NewOfflinePusher(cacheModel) + database := controller.NewPushDatabase(cacheModel) + + consumer := NewConsumerHandler(offlinePusher, rdb, disCov) + pbpush.RegisterPushMsgServiceServer(server, &pushServer{ + database: database, + disCov: disCov, + offlinePusher: offlinePusher, + pushCh: consumer, + }) + go consumer.pushConsumerGroup.RegisterHandleAndConsumer(consumer) + return nil +} diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go index 90e1c0756f..8e3edf250b 100644 --- a/internal/push/push_handler.go +++ b/internal/push/push_handler.go @@ -16,6 +16,17 @@ package push import ( "context" + "encoding/json" + "github.com/OpenIMSDK/protocol/sdkws" + "github.com/OpenIMSDK/tools/discoveryregistry" + "github.com/OpenIMSDK/tools/mcontext" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" + "github.com/openimsdk/open-im-server/v3/pkg/rpccache" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/redis/go-redis/v9" "github.com/IBM/sarama" "google.golang.org/protobuf/proto" @@ -31,18 +42,31 @@ import ( ) type ConsumerHandler struct { - pushConsumerGroup *kfk.MConsumerGroup - pusher *Pusher + pushConsumerGroup *kfk.MConsumerGroup + offlinePusher offlinepush.OfflinePusher + onlinePusher OnlinePusher + groupLocalCache *rpccache.GroupLocalCache + conversationLocalCache *rpccache.ConversationLocalCache + msgRpcClient rpcclient.MessageRpcClient + conversationRpcClient rpcclient.ConversationRpcClient + groupRpcClient rpcclient.GroupRpcClient } -func NewConsumerHandler(pusher *Pusher) *ConsumerHandler { +func NewConsumerHandler(offlinePusher offlinepush.OfflinePusher, + rdb redis.UniversalClient, disCov discoveryregistry.SvcDiscoveryRegistry) *ConsumerHandler { var consumerHandler ConsumerHandler - consumerHandler.pusher = pusher consumerHandler.pushConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{ KafkaVersion: sarama.V2_0_0_0, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false, }, []string{config.Config.Kafka.MsgToPush.Topic}, config.Config.Kafka.Addr, config.Config.Kafka.ConsumerGroupID.MsgToPush) + consumerHandler.offlinePusher = offlinePusher + consumerHandler.onlinePusher = NewOnlinePusher(disCov) + consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(disCov) + consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, rdb) + consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(disCov) + consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(disCov) + consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, rdb) return &consumerHandler } @@ -65,7 +89,7 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { var err error switch msgFromMQ.MsgData.SessionType { case constant.SuperGroupChatType: - err = c.pusher.Push2SuperGroup(ctx, pbData.MsgData.GroupID, pbData.MsgData) + err = c.Push2SuperGroup(ctx, pbData.MsgData.GroupID, pbData.MsgData) default: var pushUserIDList []string isSenderSync := utils.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) @@ -74,18 +98,14 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { } else { pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) } - err = c.pusher.Push2User(ctx, pushUserIDList, pbData.MsgData) + err = c.Push2User(ctx, pushUserIDList, pbData.MsgData) } if err != nil { - if err == errNoOfflinePusher { - log.ZWarn(ctx, "offline push failed", err, "msg", pbData.String()) - } else { - log.ZError(ctx, "push failed", err, "msg", pbData.String()) - } + log.ZError(ctx, "push failed", err, "msg", pbData.String()) } } -func (ConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } -func (ConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } +func (*ConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } +func (*ConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim, ) error { @@ -96,3 +116,243 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, } return nil } + +// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. +func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error { + log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) + if err := callbackOnlinePush(ctx, userIDs, msg); err != nil { + return err + } + + wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs) + if err != nil { + return err + } + + log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) + + if !c.shouldPushOffline(ctx, msg) { + return nil + } + + for _, v := range wsResults { + //message sender do not need offline push + if msg.SendID == v.UserID { + continue + } + //receiver online push success + if v.OnlinePush { + return nil + } + } + offlinePUshUserID := []string{msg.RecvID} + //receiver offline push + if err = callbackOfflinePush(ctx, offlinePUshUserID, msg, nil); err != nil { + return err + } + + err = c.offlinePushMsg(ctx, msg, offlinePUshUserID) + if err != nil { + return err + } + + return nil +} + +func (c *ConsumerHandler) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { + log.ZDebug(ctx, "Get super group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) + var pushToUserIDs []string + if err = callbackBeforeSuperGroupOnlinePush(ctx, groupID, msg, &pushToUserIDs); err != nil { + return err + } + + err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg) + if err != nil { + return err + } + + wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) + if err != nil { + return err + } + + log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg) + + if !c.shouldPushOffline(ctx, msg) { + return nil + } + needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs) + + //filter some user, like don not disturb or don't need offline push etc. + needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs) + if err != nil { + return err + } + // Use offline push messaging + if len(needOfflinePushUserIDs) > 0 { + var offlinePushUserIDs []string + err = callbackOfflinePush(ctx, needOfflinePushUserIDs, msg, &offlinePushUserIDs) + if err != nil { + return err + } + + if len(offlinePushUserIDs) > 0 { + needOfflinePushUserIDs = offlinePushUserIDs + } + + err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs) + if err != nil { + log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) + return err + } + + } + + return nil +} + +func (c *ConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error { + title, content, opts, err := c.getOfflinePushInfos(msg) + if err != nil { + return err + } + err = c.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts) + if err != nil { + prommetrics.MsgOfflinePushFailedCounter.Inc() + return err + } + return nil +} + +func (c *ConsumerHandler) filterGroupMessageOfflinePush(ctx context.Context, groupID string, msg *sdkws.MsgData, + offlinePushUserIDs []string) (userIDs []string, err error) { + + //todo local cache Obtain the difference set through local comparison. + needOfflinePushUserIDs, err := c.conversationRpcClient.GetConversationOfflinePushUserIDs( + ctx, utils.GenGroupConversationID(groupID), offlinePushUserIDs) + if err != nil { + return nil, err + } + return needOfflinePushUserIDs, nil +} + +func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) { + type AtTextElem struct { + Text string `json:"text,omitempty"` + AtUserList []string `json:"atUserList,omitempty"` + IsAtSelf bool `json:"isAtSelf"` + } + + opts = &options.Opts{Signal: &options.Signal{}} + if msg.OfflinePushInfo != nil { + opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount + opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound + opts.Ex = msg.OfflinePushInfo.Ex + } + + if msg.OfflinePushInfo != nil { + title = msg.OfflinePushInfo.Title + content = msg.OfflinePushInfo.Desc + } + if title == "" { + switch msg.ContentType { + case constant.Text: + fallthrough + case constant.Picture: + fallthrough + case constant.Voice: + fallthrough + case constant.Video: + fallthrough + case constant.File: + title = constant.ContentType2PushContent[int64(msg.ContentType)] + case constant.AtText: + ac := AtTextElem{} + _ = utils.JsonStringToStruct(string(msg.Content), &ac) + case constant.SignalingNotification: + title = constant.ContentType2PushContent[constant.SignalMsg] + default: + title = constant.ContentType2PushContent[constant.Common] + } + } + if content == "" { + content = title + } + return +} +func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) { + if len(*pushToUserIDs) == 0 { + *pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID) + if err != nil { + return err + } + switch msg.ContentType { + case constant.MemberQuitNotification: + var tips sdkws.MemberQuitTips + if unmarshalNotificationElem(msg.Content, &tips) != nil { + return err + } + if err = c.DeleteMemberAndSetConversationSeq(ctx, groupID, []string{tips.QuitUser.UserID}); err != nil { + log.ZError(ctx, "MemberQuitNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userID", tips.QuitUser.UserID) + } + *pushToUserIDs = append(*pushToUserIDs, tips.QuitUser.UserID) + case constant.MemberKickedNotification: + var tips sdkws.MemberKickedTips + if unmarshalNotificationElem(msg.Content, &tips) != nil { + return err + } + kickedUsers := utils.Slice(tips.KickedUserList, func(e *sdkws.GroupMemberFullInfo) string { return e.UserID }) + if err = c.DeleteMemberAndSetConversationSeq(ctx, groupID, kickedUsers); err != nil { + log.ZError(ctx, "MemberKickedNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userIDs", kickedUsers) + } + + *pushToUserIDs = append(*pushToUserIDs, kickedUsers...) + case constant.GroupDismissedNotification: + if msgprocessor.IsNotification(msgprocessor.GetConversationIDByMsg(msg)) { // 消息先到,通知后到 + var tips sdkws.GroupDismissedTips + if unmarshalNotificationElem(msg.Content, &tips) != nil { + return err + } + log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) + if len(config.Config.Manager.UserID) > 0 { + ctx = mcontext.WithOpUserIDContext(ctx, config.Config.Manager.UserID[0]) + } + defer func(groupID string) { + if err = c.groupRpcClient.DismissGroup(ctx, groupID); err != nil { + log.ZError(ctx, "DismissGroup Notification clear members", err, "groupID", groupID) + } + }(groupID) + } + } + } + return err +} + +func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { + conversationID := msgprocessor.GetConversationIDBySessionType(constant.SuperGroupChatType, groupID) + maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + if err != nil { + return err + } + return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) +} + +func unmarshalNotificationElem(bytes []byte, t any) error { + var notification sdkws.NotificationElem + if err := json.Unmarshal(bytes, ¬ification); err != nil { + return err + } + + return json.Unmarshal([]byte(notification.Detail), t) +} + +func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgData) bool { + isOfflinePush := utils.GetSwitchFromOptions(msg.Options, constant.IsOfflinePush) + if !isOfflinePush { + return false + } + if msg.ContentType == constant.SignalingNotification { + return false + } + return true +} diff --git a/internal/push/push_rpc_server.go b/internal/push/push_rpc_server.go deleted file mode 100644 index 188ddc0e15..0000000000 --- a/internal/push/push_rpc_server.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "context" - "sync" - - "github.com/OpenIMSDK/tools/utils" - - "google.golang.org/grpc" - - "github.com/OpenIMSDK/protocol/constant" - pbpush "github.com/OpenIMSDK/protocol/push" - "github.com/OpenIMSDK/tools/discoveryregistry" - "github.com/OpenIMSDK/tools/log" - - "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" -) - -type pushServer struct { - pusher *Pusher -} - -func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error { - rdb, err := cache.NewRedis() - if err != nil { - return err - } - cacheModel := cache.NewMsgCacheModel(rdb) - offlinePusher := NewOfflinePusher(cacheModel) - database := controller.NewPushDatabase(cacheModel) - groupRpcClient := rpcclient.NewGroupRpcClient(client) - conversationRpcClient := rpcclient.NewConversationRpcClient(client) - msgRpcClient := rpcclient.NewMessageRpcClient(client) - pusher := NewPusher( - client, - offlinePusher, - database, - localcache.NewGroupLocalCache(&groupRpcClient), - localcache.NewConversationLocalCache(&conversationRpcClient), - &conversationRpcClient, - &groupRpcClient, - &msgRpcClient, - ) - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - pbpush.RegisterPushMsgServiceServer(server, &pushServer{ - pusher: pusher, - }) - }() - go func() { - defer wg.Done() - consumer := NewConsumer(pusher) - consumer.Start() - }() - wg.Wait() - return nil -} - -func (r *pushServer) PushMsg(ctx context.Context, pbData *pbpush.PushMsgReq) (resp *pbpush.PushMsgResp, err error) { - switch pbData.MsgData.SessionType { - case constant.SuperGroupChatType: - err = r.pusher.Push2SuperGroup(ctx, pbData.MsgData.GroupID, pbData.MsgData) - default: - var pushUserIDList []string - isSenderSync := utils.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) - if !isSenderSync { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID) - } else { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) - } - err = r.pusher.Push2User(ctx, pushUserIDList, pbData.MsgData) - } - if err != nil { - if err != errNoOfflinePusher { - return nil, err - } else { - log.ZWarn(ctx, "offline push failed", err, "msg", pbData.String()) - } - } - return &pbpush.PushMsgResp{}, nil -} - -func (r *pushServer) DelUserPushToken( - ctx context.Context, - req *pbpush.DelUserPushTokenReq, -) (resp *pbpush.DelUserPushTokenResp, err error) { - if err = r.pusher.database.DelFcmToken(ctx, req.UserID, int(req.PlatformID)); err != nil { - return nil, err - } - return &pbpush.DelUserPushTokenResp{}, nil -} diff --git a/internal/push/push_to_client.go b/internal/push/push_to_client.go deleted file mode 100644 index ca90046058..0000000000 --- a/internal/push/push_to_client.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "context" - "encoding/json" - "errors" - "sync" - - "google.golang.org/grpc" - - "golang.org/x/sync/errgroup" - - "github.com/OpenIMSDK/protocol/constant" - "github.com/OpenIMSDK/protocol/conversation" - "github.com/OpenIMSDK/protocol/msggateway" - "github.com/OpenIMSDK/protocol/sdkws" - "github.com/OpenIMSDK/tools/discoveryregistry" - "github.com/OpenIMSDK/tools/log" - "github.com/OpenIMSDK/tools/mcontext" - "github.com/OpenIMSDK/tools/utils" - - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/dummy" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/fcm" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/getui" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/jpush" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" - "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" -) - -type Pusher struct { - database controller.PushDatabase - discov discoveryregistry.SvcDiscoveryRegistry - offlinePusher offlinepush.OfflinePusher - groupLocalCache *localcache.GroupLocalCache - conversationLocalCache *localcache.ConversationLocalCache - msgRpcClient *rpcclient.MessageRpcClient - conversationRpcClient *rpcclient.ConversationRpcClient - groupRpcClient *rpcclient.GroupRpcClient -} - -var errNoOfflinePusher = errors.New("no offlinePusher is configured") - -func NewPusher(discov discoveryregistry.SvcDiscoveryRegistry, offlinePusher offlinepush.OfflinePusher, database controller.PushDatabase, - groupLocalCache *localcache.GroupLocalCache, conversationLocalCache *localcache.ConversationLocalCache, - conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient, msgRpcClient *rpcclient.MessageRpcClient, -) *Pusher { - return &Pusher{ - discov: discov, - database: database, - offlinePusher: offlinePusher, - groupLocalCache: groupLocalCache, - conversationLocalCache: conversationLocalCache, - msgRpcClient: msgRpcClient, - conversationRpcClient: conversationRpcClient, - groupRpcClient: groupRpcClient, - } -} - -func NewOfflinePusher(cache cache.MsgModel) offlinepush.OfflinePusher { - var offlinePusher offlinepush.OfflinePusher - switch config.Config.Push.Enable { - case "getui": - offlinePusher = getui.NewClient(cache) - case "fcm": - offlinePusher = fcm.NewClient(cache) - case "jpush": - offlinePusher = jpush.NewClient() - default: - offlinePusher = dummy.NewClient() - } - return offlinePusher -} - -func (p *Pusher) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { - conevrsationID := msgprocessor.GetConversationIDBySessionType(constant.SuperGroupChatType, groupID) - maxSeq, err := p.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) - if err != nil { - return err - } - return p.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) -} - -func (p *Pusher) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error { - log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) - if err := callbackOnlinePush(ctx, userIDs, msg); err != nil { - return err - } - // push - wsResults, err := p.GetConnsAndOnlinePush(ctx, msg, userIDs) - if err != nil { - return err - } - - isOfflinePush := utils.GetSwitchFromOptions(msg.Options, constant.IsOfflinePush) - log.ZDebug(ctx, "push_result", "ws push result", wsResults, "sendData", msg, "isOfflinePush", isOfflinePush, "push_to_userID", userIDs) - - if !isOfflinePush { - return nil - } - - for _, v := range wsResults { - if !v.OnlinePush && msg.SendID == v.UserID { - if err = callbackOfflinePush(ctx, userIDs, msg, &[]string{}); err != nil { - return err - } - - err = p.offlinePushMsg(ctx, msg.SendID, msg, []string{v.UserID}) - if err != nil { - return err - } - } - - } - return nil -} - -func (p *Pusher) UnmarshalNotificationElem(bytes []byte, t any) error { - var notification sdkws.NotificationElem - if err := json.Unmarshal(bytes, ¬ification); err != nil { - return err - } - - return json.Unmarshal([]byte(notification.Detail), t) -} - -/* -k8s deployment,offline push group messages function -*/ -func (p *Pusher) k8sOfflinePush2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults) error { - - var needOfflinePushUserIDs []string - for _, v := range wsResults { - if !v.OnlinePush { - needOfflinePushUserIDs = append(needOfflinePushUserIDs, v.UserID) - } - } - if len(needOfflinePushUserIDs) > 0 { - var offlinePushUserIDs []string - err := callbackOfflinePush(ctx, needOfflinePushUserIDs, msg, &offlinePushUserIDs) - if err != nil { - return err - } - - if len(offlinePushUserIDs) > 0 { - needOfflinePushUserIDs = offlinePushUserIDs - } - if msg.ContentType != constant.SignalingNotification { - resp, err := p.conversationRpcClient.Client.GetConversationOfflinePushUserIDs( - ctx, - &conversation.GetConversationOfflinePushUserIDsReq{ConversationID: utils.GenGroupConversationID(groupID), UserIDs: needOfflinePushUserIDs}, - ) - if err != nil { - return err - } - if len(resp.UserIDs) > 0 { - err = p.offlinePushMsg(ctx, groupID, msg, resp.UserIDs) - if err != nil { - log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) - return err - } - } - } - - } - return nil -} -func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { - log.ZDebug(ctx, "Get super group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) - var pushToUserIDs []string - if err = callbackBeforeSuperGroupOnlinePush(ctx, groupID, msg, &pushToUserIDs); err != nil { - return err - } - - if len(pushToUserIDs) == 0 { - pushToUserIDs, err = p.groupLocalCache.GetGroupMemberIDs(ctx, groupID) - if err != nil { - return err - } - - switch msg.ContentType { - case constant.MemberQuitNotification: - var tips sdkws.MemberQuitTips - if p.UnmarshalNotificationElem(msg.Content, &tips) != nil { - return err - } - defer func(groupID string, userIDs []string) { - if err = p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil { - log.ZError(ctx, "MemberQuitNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userIDs", userIDs) - } - }(groupID, []string{tips.QuitUser.UserID}) - pushToUserIDs = append(pushToUserIDs, tips.QuitUser.UserID) - case constant.MemberKickedNotification: - var tips sdkws.MemberKickedTips - if p.UnmarshalNotificationElem(msg.Content, &tips) != nil { - return err - } - kickedUsers := utils.Slice(tips.KickedUserList, func(e *sdkws.GroupMemberFullInfo) string { return e.UserID }) - defer func(groupID string, userIDs []string) { - if err = p.DeleteMemberAndSetConversationSeq(ctx, groupID, userIDs); err != nil { - log.ZError(ctx, "MemberKickedNotification DeleteMemberAndSetConversationSeq", err, "groupID", groupID, "userIDs", userIDs) - } - }(groupID, kickedUsers) - pushToUserIDs = append(pushToUserIDs, kickedUsers...) - case constant.GroupDismissedNotification: - if msgprocessor.IsNotification(msgprocessor.GetConversationIDByMsg(msg)) { // 消息先到,通知后到 - var tips sdkws.GroupDismissedTips - if p.UnmarshalNotificationElem(msg.Content, &tips) != nil { - return err - } - log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(pushToUserIDs), "list", pushToUserIDs) - if len(config.Config.Manager.UserID) > 0 { - ctx = mcontext.WithOpUserIDContext(ctx, config.Config.Manager.UserID[0]) - } - defer func(groupID string) { - if err = p.groupRpcClient.DismissGroup(ctx, groupID); err != nil { - log.ZError(ctx, "DismissGroup Notification clear members", err, "groupID", groupID) - } - }(groupID) - } - } - } - - wsResults, err := p.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) - if err != nil { - return err - } - - log.ZDebug(ctx, "get conn and online push success", "result", wsResults, "msg", msg) - isOfflinePush := utils.GetSwitchFromOptions(msg.Options, constant.IsOfflinePush) - if isOfflinePush && config.Config.Envs.Discovery == "k8s" { - return p.k8sOfflinePush2SuperGroup(ctx, groupID, msg, wsResults) - } - if isOfflinePush && config.Config.Envs.Discovery == "zookeeper" { - var ( - onlineSuccessUserIDs = []string{msg.SendID} - webAndPcBackgroundUserIDs []string - ) - - for _, v := range wsResults { - if v.OnlinePush && v.UserID != msg.SendID { - onlineSuccessUserIDs = append(onlineSuccessUserIDs, v.UserID) - } - - if v.OnlinePush { - continue - } - - if len(v.Resp) == 0 { - continue - } - - for _, singleResult := range v.Resp { - if singleResult.ResultCode != -2 { - continue - } - - isPC := constant.PlatformIDToName(int(singleResult.RecvPlatFormID)) == constant.TerminalPC - isWebID := singleResult.RecvPlatFormID == constant.WebPlatformID - - if isPC || isWebID { - webAndPcBackgroundUserIDs = append(webAndPcBackgroundUserIDs, v.UserID) - } - } - } - - needOfflinePushUserIDs := utils.DifferenceString(onlineSuccessUserIDs, pushToUserIDs) - - // Use offline push messaging - if len(needOfflinePushUserIDs) > 0 { - var offlinePushUserIDs []string - err = callbackOfflinePush(ctx, needOfflinePushUserIDs, msg, &offlinePushUserIDs) - if err != nil { - return err - } - - if len(offlinePushUserIDs) > 0 { - needOfflinePushUserIDs = offlinePushUserIDs - } - if msg.ContentType != constant.SignalingNotification { - resp, err := p.conversationRpcClient.Client.GetConversationOfflinePushUserIDs( - ctx, - &conversation.GetConversationOfflinePushUserIDsReq{ConversationID: utils.GenGroupConversationID(groupID), UserIDs: needOfflinePushUserIDs}, - ) - if err != nil { - return err - } - if len(resp.UserIDs) > 0 { - err = p.offlinePushMsg(ctx, groupID, msg, resp.UserIDs) - if err != nil { - log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) - return err - } - if _, err := p.GetConnsAndOnlinePush(ctx, msg, utils.IntersectString(resp.UserIDs, webAndPcBackgroundUserIDs)); err != nil { - log.ZError(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg, "userIDs", utils.IntersectString(needOfflinePushUserIDs, webAndPcBackgroundUserIDs)) - return err - } - } - } - - } - } - return nil -} - -func (p *Pusher) k8sOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { - var usersHost = make(map[string][]string) - for _, v := range pushToUserIDs { - tHost, err := p.discov.GetUserIdHashGatewayHost(ctx, v) - if err != nil { - log.ZError(ctx, "get msggateway hash error", err) - return nil, err - } - tUsers, tbl := usersHost[tHost] - if tbl { - tUsers = append(tUsers, v) - usersHost[tHost] = tUsers - } else { - usersHost[tHost] = []string{v} - } - } - log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost) - var usersConns = make(map[*grpc.ClientConn][]string) - for host, userIds := range usersHost { - tconn, _ := p.discov.GetConn(ctx, host) - usersConns[tconn] = userIds - } - var ( - mu sync.Mutex - wg = errgroup.Group{} - maxWorkers = config.Config.Push.MaxConcurrentWorkers - ) - if maxWorkers < 3 { - maxWorkers = 3 - } - wg.SetLimit(maxWorkers) - for conn, userIds := range usersConns { - tcon := conn - tuserIds := userIds - wg.Go(func() error { - input := &msggateway.OnlineBatchPushOneMsgReq{MsgData: msg, PushToUserIDs: tuserIds} - msgClient := msggateway.NewMsgGatewayClient(tcon) - reply, err := msgClient.SuperGroupOnlineBatchPushOneMsg(ctx, input) - if err != nil { - return nil - } - log.ZDebug(ctx, "push result", "reply", reply) - if reply != nil && reply.SinglePushResult != nil { - mu.Lock() - wsResults = append(wsResults, reply.SinglePushResult...) - mu.Unlock() - } - return nil - }) - } - _ = wg.Wait() - return wsResults, nil -} -func (p *Pusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { - if config.Config.Envs.Discovery == "k8s" { - return p.k8sOnlinePush(ctx, msg, pushToUserIDs) - } - conns, err := p.discov.GetConns(ctx, config.Config.RpcRegisterName.OpenImMessageGatewayName) - log.ZDebug(ctx, "get gateway conn", "conn length", len(conns)) - if err != nil { - return nil, err - } - - var ( - mu sync.Mutex - wg = errgroup.Group{} - input = &msggateway.OnlineBatchPushOneMsgReq{MsgData: msg, PushToUserIDs: pushToUserIDs} - maxWorkers = config.Config.Push.MaxConcurrentWorkers - ) - - if maxWorkers < 3 { - maxWorkers = 3 - } - - wg.SetLimit(maxWorkers) - - // Online push message - for _, conn := range conns { - conn := conn // loop var safe - wg.Go(func() error { - msgClient := msggateway.NewMsgGatewayClient(conn) - reply, err := msgClient.SuperGroupOnlineBatchPushOneMsg(ctx, input) - if err != nil { - return nil - } - - log.ZDebug(ctx, "push result", "reply", reply) - if reply != nil && reply.SinglePushResult != nil { - mu.Lock() - wsResults = append(wsResults, reply.SinglePushResult...) - mu.Unlock() - } - - return nil - }) - } - - _ = wg.Wait() - - // always return nil - return wsResults, nil -} - -func (p *Pusher) offlinePushMsg(ctx context.Context, conversationID string, msg *sdkws.MsgData, offlinePushUserIDs []string) error { - title, content, opts, err := p.getOfflinePushInfos(conversationID, msg) - if err != nil { - return err - } - err = p.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts) - if err != nil { - prommetrics.MsgOfflinePushFailedCounter.Inc() - return err - } - return nil -} - -func (p *Pusher) GetOfflinePushOpts(msg *sdkws.MsgData) (opts *offlinepush.Opts, err error) { - opts = &offlinepush.Opts{Signal: &offlinepush.Signal{}} - // if msg.ContentType > constant.SignalingNotificationBegin && msg.ContentType < constant.SignalingNotificationEnd { - // req := &sdkws.SignalReq{} - // if err := proto.Unmarshal(msg.Content, req); err != nil { - // return nil, utils.Wrap(err, "") - // } - // switch req.Payload.(type) { - // case *sdkws.SignalReq_Invite, *sdkws.SignalReq_InviteInGroup: - // opts.Signal = &offlinepush.Signal{ClientMsgID: msg.ClientMsgID} - // } - // } - if msg.OfflinePushInfo != nil { - opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount - opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound - opts.Ex = msg.OfflinePushInfo.Ex - } - return opts, nil -} - -func (p *Pusher) getOfflinePushInfos(conversationID string, msg *sdkws.MsgData) (title, content string, opts *offlinepush.Opts, err error) { - if p.offlinePusher == nil { - err = errNoOfflinePusher - return - } - - type atContent struct { - Text string `json:"text"` - AtUserList []string `json:"atUserList"` - IsAtSelf bool `json:"isAtSelf"` - } - - opts, err = p.GetOfflinePushOpts(msg) - if err != nil { - return - } - - if msg.OfflinePushInfo != nil { - title = msg.OfflinePushInfo.Title - content = msg.OfflinePushInfo.Desc - } - if title == "" { - switch msg.ContentType { - case constant.Text: - fallthrough - case constant.Picture: - fallthrough - case constant.Voice: - fallthrough - case constant.Video: - fallthrough - case constant.File: - title = constant.ContentType2PushContent[int64(msg.ContentType)] - case constant.AtText: - ac := atContent{} - _ = utils.JsonStringToStruct(string(msg.Content), &ac) - if utils.IsContain(conversationID, ac.AtUserList) { - title = constant.ContentType2PushContent[constant.AtText] + constant.ContentType2PushContent[constant.Common] - } else { - title = constant.ContentType2PushContent[constant.GroupMsg] - } - case constant.SignalingNotification: - title = constant.ContentType2PushContent[constant.SignalMsg] - default: - title = constant.ContentType2PushContent[constant.Common] - } - } - if content == "" { - content = title - } - return -} diff --git a/internal/push/tools.go b/internal/push/tools.go deleted file mode 100644 index 3242767b16..0000000000 --- a/internal/push/tools.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "github.com/OpenIMSDK/protocol/constant" - "github.com/OpenIMSDK/protocol/sdkws" - "google.golang.org/protobuf/proto" -) - -func GetContent(msg *sdkws.MsgData) string { - if msg.ContentType >= constant.NotificationBegin && msg.ContentType <= constant.NotificationEnd { - var tips sdkws.TipsComm - _ = proto.Unmarshal(msg.Content, &tips) - content := tips.JsonDetail - return content - } else { - return string(msg.Content) - } -} diff --git a/internal/rpc/friend/friend.go b/internal/rpc/friend/friend.go index 84702f5481..9c76c6009e 100644 --- a/internal/rpc/friend/friend.go +++ b/internal/rpc/friend/friend.go @@ -16,7 +16,6 @@ package friend import ( "context" - "github.com/OpenIMSDK/tools/tx" "github.com/OpenIMSDK/protocol/sdkws" diff --git a/internal/rpc/msg/as_read.go b/internal/rpc/msg/as_read.go index cb292421e9..5a4f8b5f37 100644 --- a/internal/rpc/msg/as_read.go +++ b/internal/rpc/msg/as_read.go @@ -44,7 +44,7 @@ func (m *msgServer) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *m if err != nil { return nil, err } - conversations, err := m.Conversation.GetConversations(ctx, req.UserID, conversationIDs) + conversations, err := m.ConversationLocalCache.GetConversations(ctx, req.UserID, conversationIDs) if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (m *msgServer) MarkMsgsAsRead( if hasReadSeq > maxSeq { return nil, errs.ErrArgs.Wrap("hasReadSeq must not be bigger than maxSeq") } - conversation, err := m.Conversation.GetConversation(ctx, req.UserID, req.ConversationID) + conversation, err := m.ConversationLocalCache.GetConversation(ctx, req.UserID, req.ConversationID) if err != nil { return } @@ -147,7 +147,7 @@ func (m *msgServer) MarkConversationAsRead( ctx context.Context, req *msg.MarkConversationAsReadReq, ) (resp *msg.MarkConversationAsReadResp, err error) { - conversation, err := m.Conversation.GetConversation(ctx, req.UserID, req.ConversationID) + conversation, err := m.ConversationLocalCache.GetConversation(ctx, req.UserID, req.ConversationID) if err != nil { return nil, err } diff --git a/internal/rpc/msg/revoke.go b/internal/rpc/msg/revoke.go index d7362d3392..8640524ecb 100644 --- a/internal/rpc/msg/revoke.go +++ b/internal/rpc/msg/revoke.go @@ -47,7 +47,7 @@ func (m *msgServer) RevokeMsg(ctx context.Context, req *msg.RevokeMsgReq) (*msg. if err := authverify.CheckAccessV3(ctx, req.UserID); err != nil { return nil, err } - user, err := m.User.GetUserInfo(ctx, req.UserID) + user, err := m.UserLocalCache.GetUserInfo(ctx, req.UserID) if err != nil { return nil, err } @@ -73,12 +73,7 @@ func (m *msgServer) RevokeMsg(ctx context.Context, req *msg.RevokeMsgReq) (*msg. } role = user.AppMangerLevel case constant.SuperGroupChatType: - members, err := m.Group.GetGroupMemberInfoMap( - ctx, - msgs[0].GroupID, - utils.Distinct([]string{req.UserID, msgs[0].SendID}), - true, - ) + members, err := m.GroupLocalCache.GetGroupMemberInfoMap(ctx, msgs[0].GroupID, utils.Distinct([]string{req.UserID, msgs[0].SendID})) if err != nil { return nil, err } diff --git a/internal/rpc/msg/send.go b/internal/rpc/msg/send.go index 630b74a4a1..20e3f85f13 100644 --- a/internal/rpc/msg/send.go +++ b/internal/rpc/msg/send.go @@ -98,7 +98,7 @@ func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgDa } tagAll := utils.IsContain(constant.AtAllString, msg.AtUserIDList) if tagAll { - memberUserIDList, err := m.Group.GetGroupMemberIDs(ctx, msg.GroupID) + memberUserIDList, err := m.GroupLocalCache.GetGroupMemberIDs(ctx, msg.GroupID) if err != nil { log.ZWarn(ctx, "GetGroupMemberIDs", err) return @@ -144,6 +144,7 @@ func (m *msgServer) sendMsgNotification( } func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, err error) { + log.ZDebug(ctx, "sendMsgSingleChat return") if err := m.messageVerification(ctx, req); err != nil { return nil, err } diff --git a/internal/rpc/msg/server.go b/internal/rpc/msg/server.go index 4854af459d..b4056b46e5 100644 --- a/internal/rpc/msg/server.go +++ b/internal/rpc/msg/server.go @@ -16,6 +16,7 @@ package msg import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpccache" "log" "net/http" _ "net/http/pprof" @@ -29,7 +30,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/db/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/controller" - "github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache" "github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" ) @@ -39,12 +39,11 @@ type ( msgServer struct { RegisterCenter discoveryregistry.SvcDiscoveryRegistry MsgDatabase controller.CommonMsgDatabase - Group *rpcclient.GroupRpcClient - User *rpcclient.UserRpcClient Conversation *rpcclient.ConversationRpcClient - friend *rpcclient.FriendRpcClient - GroupLocalCache *localcache.GroupLocalCache - ConversationLocalCache *localcache.ConversationLocalCache + UserLocalCache *rpccache.UserLocalCache + FriendLocalCache *rpccache.FriendLocalCache + GroupLocalCache *rpccache.GroupLocalCache + ConversationLocalCache *rpccache.ConversationLocalCache Handlers MessageInterceptorChain notificationSender *rpcclient.NotificationSender } @@ -84,15 +83,15 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e groupRpcClient := rpcclient.NewGroupRpcClient(client) friendRpcClient := rpcclient.NewFriendRpcClient(client) msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, cacheModel) + s := &msgServer{ Conversation: &conversationClient, - User: &userRpcClient, - Group: &groupRpcClient, MsgDatabase: msgDatabase, RegisterCenter: client, - GroupLocalCache: localcache.NewGroupLocalCache(&groupRpcClient), - ConversationLocalCache: localcache.NewConversationLocalCache(&conversationClient), - friend: &friendRpcClient, + UserLocalCache: rpccache.NewUserLocalCache(userRpcClient, rdb), + GroupLocalCache: rpccache.NewGroupLocalCache(groupRpcClient, rdb), + ConversationLocalCache: rpccache.NewConversationLocalCache(conversationClient, rdb), + FriendLocalCache: rpccache.NewFriendLocalCache(friendRpcClient, rdb), } go func() { log.Println(http.ListenAndServe("0.0.0.0:6061", nil)) diff --git a/internal/rpc/msg/statistics.go b/internal/rpc/msg/statistics.go index ac09e3f69c..620e6c7b03 100644 --- a/internal/rpc/msg/statistics.go +++ b/internal/rpc/msg/statistics.go @@ -41,7 +41,7 @@ func (m *msgServer) GetActiveUser(ctx context.Context, req *msg.GetActiveUserReq var pbUsers []*msg.ActiveUser if len(users) > 0 { userIDs := utils.Slice(users, func(e *unrelation.UserCount) string { return e.UserID }) - userMap, err := m.User.GetUsersInfoMap(ctx, userIDs) + userMap, err := m.UserLocalCache.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } @@ -83,7 +83,7 @@ func (m *msgServer) GetActiveGroup(ctx context.Context, req *msg.GetActiveGroupR var pbgroups []*msg.ActiveGroup if len(groups) > 0 { groupIDs := utils.Slice(groups, func(e *unrelation.GroupCount) string { return e.GroupID }) - resp, err := m.Group.GetGroupInfos(ctx, groupIDs, false) + resp, err := m.GroupLocalCache.GetGroupInfos(ctx, groupIDs) if err != nil { return nil, err } diff --git a/internal/rpc/msg/sync_msg.go b/internal/rpc/msg/sync_msg.go index dbd8da4d80..404ca62188 100644 --- a/internal/rpc/msg/sync_msg.go +++ b/internal/rpc/msg/sync_msg.go @@ -37,7 +37,7 @@ func (m *msgServer) PullMessageBySeqs( resp.NotificationMsgs = make(map[string]*sdkws.PullMsgs) for _, seq := range req.SeqRanges { if !msgprocessor.IsNotification(seq.ConversationID) { - conversation, err := m.Conversation.GetConversation(ctx, req.UserID, seq.ConversationID) + conversation, err := m.ConversationLocalCache.GetConversation(ctx, req.UserID, seq.ConversationID) if err != nil { log.ZError(ctx, "GetConversation error", err, "conversationID", seq.ConversationID) continue @@ -140,7 +140,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq } } if len(sendIDs) != 0 { - sendInfos, err := m.User.GetUsersInfo(ctx, sendIDs) + sendInfos, err := m.UserLocalCache.GetUsersInfo(ctx, sendIDs) if err != nil { return nil, err } @@ -149,7 +149,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq } } if len(recvIDs) != 0 { - recvInfos, err := m.User.GetUsersInfo(ctx, recvIDs) + recvInfos, err := m.UserLocalCache.GetUsersInfo(ctx, recvIDs) if err != nil { return nil, err } @@ -158,7 +158,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq } } if len(groupIDs) != 0 { - groupInfos, err := m.Group.GetGroupInfos(ctx, groupIDs, true) + groupInfos, err := m.GroupLocalCache.GetGroupInfos(ctx, groupIDs) if err != nil { return nil, err } diff --git a/internal/rpc/msg/verify.go b/internal/rpc/msg/verify.go index 2837cb944c..50b7718cec 100644 --- a/internal/rpc/msg/verify.go +++ b/internal/rpc/msg/verify.go @@ -16,6 +16,7 @@ package msg import ( "context" + "github.com/OpenIMSDK/tools/log" "math/rand" "strconv" "time" @@ -58,7 +59,7 @@ func (m *msgServer) messageVerification(ctx context.Context, data *msg.SendMsgRe data.MsgData.ContentType >= constant.NotificationBegin { return nil } - black, err := m.friend.IsBlocked(ctx, data.MsgData.SendID, data.MsgData.RecvID) + black, err := m.FriendLocalCache.IsBlack(ctx, data.MsgData.SendID, data.MsgData.RecvID) if err != nil { return err } @@ -66,7 +67,7 @@ func (m *msgServer) messageVerification(ctx context.Context, data *msg.SendMsgRe return errs.ErrBlockedByPeer.Wrap() } if *config.Config.MessageVerify.FriendVerify { - friend, err := m.friend.IsFriend(ctx, data.MsgData.SendID, data.MsgData.RecvID) + friend, err := m.FriendLocalCache.IsFriend(ctx, data.MsgData.SendID, data.MsgData.RecvID) if err != nil { return err } @@ -77,7 +78,7 @@ func (m *msgServer) messageVerification(ctx context.Context, data *msg.SendMsgRe } return nil case constant.SuperGroupChatType: - groupInfo, err := m.Group.GetGroupInfoCache(ctx, data.MsgData.GroupID) + groupInfo, err := m.GroupLocalCache.GetGroupInfo(ctx, data.MsgData.GroupID) if err != nil { return err } @@ -95,17 +96,17 @@ func (m *msgServer) messageVerification(ctx context.Context, data *msg.SendMsgRe data.MsgData.ContentType >= constant.NotificationBegin { return nil } - // memberIDs, err := m.GroupLocalCache.GetGroupMemberIDs(ctx, data.MsgData.GroupID) - // if err != nil { - // return err - // } - // if !utils.IsContain(data.MsgData.SendID, memberIDs) { - // return errs.ErrNotInGroupYet.Wrap() - // } + memberIDs, err := m.GroupLocalCache.GetGroupMemberIDMap(ctx, data.MsgData.GroupID) + if err != nil { + return err + } + if _, ok := memberIDs[data.MsgData.SendID]; !ok { + return errs.ErrNotInGroupYet.Wrap() + } - groupMemberInfo, err := m.Group.GetGroupMemberCache(ctx, data.MsgData.GroupID, data.MsgData.SendID) + groupMemberInfo, err := m.GroupLocalCache.GetGroupMember(ctx, data.MsgData.GroupID, data.MsgData.SendID) if err != nil { - if err == errs.ErrRecordNotFound { + if errs.ErrRecordNotFound.Is(err) { return errs.ErrNotInGroupYet.Wrap(err.Error()) } return err @@ -186,7 +187,8 @@ func (m *msgServer) modifyMessageByUserMessageReceiveOpt( sessionType int, pb *msg.SendMsgReq, ) (bool, error) { - opt, err := m.User.GetUserGlobalMsgRecvOpt(ctx, userID) + defer log.ZDebug(ctx, "modifyMessageByUserMessageReceiveOpt return") + opt, err := m.UserLocalCache.GetUserGlobalMsgRecvOpt(ctx, userID) if err != nil { return false, err } @@ -202,7 +204,7 @@ func (m *msgServer) modifyMessageByUserMessageReceiveOpt( return true, nil } // conversationID := utils.GetConversationIDBySessionType(conversationID, sessionType) - singleOpt, err := m.Conversation.GetSingleConversationRecvMsgOpt(ctx, userID, conversationID) + singleOpt, err := m.ConversationLocalCache.GetSingleConversationRecvMsgOpt(ctx, userID, conversationID) if errs.ErrRecordNotFound.Is(err) { return true, nil } else if err != nil { diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index e09c3299a1..0be5f97f48 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -243,6 +243,7 @@ func (s *userServer) GetPaginationUsers(ctx context.Context, req *pbuser.GetPagi return nil, err } return &pbuser.GetPaginationUsersResp{Total: int32(total), Users: convert.UsersDB2Pb(users)}, err + } } @@ -389,7 +390,7 @@ func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil } -// ProcessUserCommandAdd user general function add +// ProcessUserCommandAdd user general function add. func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) { err := authverify.CheckAccessV3(ctx, req.UserID) if err != nil { @@ -420,7 +421,7 @@ func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.Proc return &pbuser.ProcessUserCommandAddResp{}, nil } -// ProcessUserCommandDelete user general function delete +// ProcessUserCommandDelete user general function delete. func (s *userServer) ProcessUserCommandDelete(ctx context.Context, req *pbuser.ProcessUserCommandDeleteReq) (*pbuser.ProcessUserCommandDeleteResp, error) { err := authverify.CheckAccessV3(ctx, req.UserID) if err != nil { @@ -439,10 +440,11 @@ func (s *userServer) ProcessUserCommandDelete(ctx context.Context, req *pbuser.P if err != nil { return nil, err } + return &pbuser.ProcessUserCommandDeleteResp{}, nil } -// ProcessUserCommandUpdate user general function update +// ProcessUserCommandUpdate user general function update. func (s *userServer) ProcessUserCommandUpdate(ctx context.Context, req *pbuser.ProcessUserCommandUpdateReq) (*pbuser.ProcessUserCommandUpdateResp, error) { err := authverify.CheckAccessV3(ctx, req.UserID) if err != nil { @@ -475,6 +477,7 @@ func (s *userServer) ProcessUserCommandUpdate(ctx context.Context, req *pbuser.P } func (s *userServer) ProcessUserCommandGet(ctx context.Context, req *pbuser.ProcessUserCommandGetReq) (*pbuser.ProcessUserCommandGetResp, error) { + err := authverify.CheckAccessV3(ctx, req.UserID) if err != nil { return nil, err diff --git a/internal/tools/msg.go b/internal/tools/msg.go index 97bb2988e7..30006670e5 100644 --- a/internal/tools/msg.go +++ b/internal/tools/msg.go @@ -79,7 +79,7 @@ func InitMsgTool() (*MsgTool, error) { if err != nil { return nil, err } - discov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) + discov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) userDB, err := mgo.NewUserMongo(mongo.GetDatabase()) if err != nil { return nil, err diff --git a/pkg/common/cachekey/black.go b/pkg/common/cachekey/black.go new file mode 100644 index 0000000000..527ad14dc2 --- /dev/null +++ b/pkg/common/cachekey/black.go @@ -0,0 +1,15 @@ +package cachekey + +const ( + BlackIDsKey = "BLACK_IDS:" + IsBlackKey = "IS_BLACK:" // local cache +) + +func GetBlackIDsKey(ownerUserID string) string { + return BlackIDsKey + ownerUserID + +} + +func GetIsBlackIDsKey(possibleBlackUserID, userID string) string { + return IsBlackKey + userID + "-" + possibleBlackUserID +} diff --git a/pkg/common/cachekey/conversation.go b/pkg/common/cachekey/conversation.go new file mode 100644 index 0000000000..665ca11c6c --- /dev/null +++ b/pkg/common/cachekey/conversation.go @@ -0,0 +1,44 @@ +package cachekey + +const ( + ConversationKey = "CONVERSATION:" + ConversationIDsKey = "CONVERSATION_IDS:" + ConversationIDsHashKey = "CONVERSATION_IDS_HASH:" + ConversationHasReadSeqKey = "CONVERSATION_HAS_READ_SEQ:" + RecvMsgOptKey = "RECV_MSG_OPT:" + SuperGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:" + SuperGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:" + ConversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:" +) + +func GetConversationKey(ownerUserID, conversationID string) string { + return ConversationKey + ownerUserID + ":" + conversationID +} + +func GetConversationIDsKey(ownerUserID string) string { + return ConversationIDsKey + ownerUserID +} + +func GetSuperGroupRecvNotNotifyUserIDsKey(groupID string) string { + return SuperGroupRecvMsgNotNotifyUserIDsKey + groupID +} + +func GetRecvMsgOptKey(ownerUserID, conversationID string) string { + return RecvMsgOptKey + ownerUserID + ":" + conversationID +} + +func GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string { + return SuperGroupRecvMsgNotNotifyUserIDsHashKey + groupID +} + +func GetConversationHasReadSeqKey(ownerUserID, conversationID string) string { + return ConversationHasReadSeqKey + ownerUserID + ":" + conversationID +} + +func GetConversationNotReceiveMessageUserIDsKey(conversationID string) string { + return ConversationNotReceiveMessageUserIDsKey + conversationID +} + +func GetUserConversationIDsHashKey(ownerUserID string) string { + return ConversationIDsHashKey + ownerUserID +} diff --git a/pkg/common/cachekey/friend.go b/pkg/common/cachekey/friend.go new file mode 100644 index 0000000000..f37c9da37d --- /dev/null +++ b/pkg/common/cachekey/friend.go @@ -0,0 +1,24 @@ +package cachekey + +const ( + FriendIDsKey = "FRIEND_IDS:" + TwoWayFriendsIDsKey = "COMMON_FRIENDS_IDS:" + FriendKey = "FRIEND_INFO:" + IsFriendKey = "IS_FRIEND:" // local cache key +) + +func GetFriendIDsKey(ownerUserID string) string { + return FriendIDsKey + ownerUserID +} + +func GetTwoWayFriendsIDsKey(ownerUserID string) string { + return TwoWayFriendsIDsKey + ownerUserID +} + +func GetFriendKey(ownerUserID, friendUserID string) string { + return FriendKey + ownerUserID + "-" + friendUserID +} + +func GetIsFriendKey(possibleFriendUserID, userID string) string { + return IsFriendKey + possibleFriendUserID + "-" + userID +} diff --git a/pkg/common/cachekey/group.go b/pkg/common/cachekey/group.go new file mode 100644 index 0000000000..1dcf0ffcef --- /dev/null +++ b/pkg/common/cachekey/group.go @@ -0,0 +1,45 @@ +package cachekey + +import ( + "strconv" + "time" +) + +const ( + groupExpireTime = time.Second * 60 * 60 * 12 + GroupInfoKey = "GROUP_INFO:" + GroupMemberIDsKey = "GROUP_MEMBER_IDS:" + GroupMembersHashKey = "GROUP_MEMBERS_HASH2:" + GroupMemberInfoKey = "GROUP_MEMBER_INFO:" + JoinedGroupsKey = "JOIN_GROUPS_KEY:" + GroupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:" + GroupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:" +) + +func GetGroupInfoKey(groupID string) string { + return GroupInfoKey + groupID +} + +func GetJoinedGroupsKey(userID string) string { + return JoinedGroupsKey + userID +} + +func GetGroupMembersHashKey(groupID string) string { + return GroupMembersHashKey + groupID +} + +func GetGroupMemberIDsKey(groupID string) string { + return GroupMemberIDsKey + groupID +} + +func GetGroupMemberInfoKey(groupID, userID string) string { + return GroupMemberInfoKey + groupID + "-" + userID +} + +func GetGroupMemberNumKey(groupID string) string { + return GroupMemberNumKey + groupID +} + +func GetGroupRoleLevelMemberIDsKey(groupID string, roleLevel int32) string { + return GroupRoleLevelMemberIDsKey + groupID + "-" + strconv.Itoa(int(roleLevel)) +} diff --git a/pkg/common/cachekey/user.go b/pkg/common/cachekey/user.go new file mode 100644 index 0000000000..3fb877e222 --- /dev/null +++ b/pkg/common/cachekey/user.go @@ -0,0 +1,14 @@ +package cachekey + +const ( + UserInfoKey = "USER_INFO:" + UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:" +) + +func GetUserInfoKey(userID string) string { + return UserInfoKey + userID +} + +func GetUserGlobalRecvMsgOptKey(userID string) string { + return UserGlobalRecvMsgOptKey + userID +} diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index 9696e9367e..20eea04645 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -16,6 +16,7 @@ package config import ( "bytes" + "time" "github.com/OpenIMSDK/tools/discoveryregistry" "gopkg.in/yaml.v3" @@ -258,6 +259,8 @@ type configStruct struct { FriendVerify *bool `yaml:"friendVerify"` } `yaml:"messageVerify"` + LocalCache localCache `yaml:"localCache"` + IOSPush struct { PushSound string `yaml:"pushSound"` BadgeCount bool `yaml:"badgeCount"` @@ -370,6 +373,33 @@ type notification struct { ConversationSetPrivate NotificationConf `yaml:"conversationSetPrivate"` } +type LocalCache struct { + Topic string `yaml:"topic"` + SlotNum int `yaml:"slotNum"` + SlotSize int `yaml:"slotSize"` + SuccessExpire int `yaml:"successExpire"` // second + FailedExpire int `yaml:"failedExpire"` // second +} + +func (l LocalCache) Failed() time.Duration { + return time.Second * time.Duration(l.FailedExpire) +} + +func (l LocalCache) Success() time.Duration { + return time.Second * time.Duration(l.SuccessExpire) +} + +func (l LocalCache) Enable() bool { + return l.Topic != "" && l.SlotNum > 0 && l.SlotSize > 0 +} + +type localCache struct { + User LocalCache `yaml:"user"` + Group LocalCache `yaml:"group"` + Friend LocalCache `yaml:"friend"` + Conversation LocalCache `yaml:"conversation"` +} + func (c *configStruct) GetServiceNames() []string { return []string{ c.RpcRegisterName.OpenImUserName, diff --git a/pkg/common/config/parse_test.go b/pkg/common/config/parse_test.go index 38171ec088..30cb270fef 100644 --- a/pkg/common/config/parse_test.go +++ b/pkg/common/config/parse_test.go @@ -16,6 +16,8 @@ package config import ( _ "embed" + "fmt" + "gopkg.in/yaml.v3" "reflect" "testing" @@ -115,3 +117,13 @@ func TestInitConfig(t *testing.T) { }) } } + +func TestName(t *testing.T) { + Config.LocalCache.Friend.Topic = "friend" + Config.LocalCache.Friend.SlotNum = 500 + Config.LocalCache.Friend.SlotSize = 20000 + + data, _ := yaml.Marshal(&Config) + + fmt.Println(string(data)) +} diff --git a/pkg/common/db/cache/black.go b/pkg/common/db/cache/black.go index d1abe945ca..8328306ff7 100644 --- a/pkg/common/db/cache/black.go +++ b/pkg/common/db/cache/black.go @@ -16,6 +16,9 @@ package cache import ( "context" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "time" "github.com/dtm-labs/rockscache" @@ -52,11 +55,15 @@ func NewBlackCacheRedis( options rockscache.Options, ) BlackCache { rcClient := rockscache.NewClient(rdb, options) - + mc := NewMetaCacheRedis(rcClient) + b := config.Config.LocalCache.Friend + log.ZDebug(context.Background(), "black local cache init", "Topic", b.Topic, "SlotNum", b.SlotNum, "SlotSize", b.SlotSize, "enable", b.Enable()) + mc.SetTopic(b.Topic) + mc.SetRawRedisClient(rdb) return &BlackCacheRedis{ expireTime: blackExpireTime, rcClient: rcClient, - metaCache: NewMetaCacheRedis(rcClient), + metaCache: mc, blackDB: blackDB, } } @@ -66,12 +73,12 @@ func (b *BlackCacheRedis) NewCache() BlackCache { expireTime: b.expireTime, rcClient: b.rcClient, blackDB: b.blackDB, - metaCache: NewMetaCacheRedis(b.rcClient, b.metaCache.GetPreDelKeys()...), + metaCache: b.Copy(), } } func (b *BlackCacheRedis) getBlackIDsKey(ownerUserID string) string { - return blackIDsKey + ownerUserID + return cachekey.GetBlackIDsKey(ownerUserID) } func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error) { diff --git a/pkg/common/db/cache/config.go b/pkg/common/db/cache/config.go new file mode 100644 index 0000000000..52ece95f79 --- /dev/null +++ b/pkg/common/db/cache/config.go @@ -0,0 +1,66 @@ +package cache + +import ( + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "strings" + "sync" +) + +var ( + once sync.Once + subscribe map[string][]string +) + +func getPublishKey(topic string, key []string) []string { + if topic == "" || len(key) == 0 { + return nil + } + once.Do(func() { + list := []struct { + Local config.LocalCache + Keys []string + }{ + { + Local: config.Config.LocalCache.User, + Keys: []string{cachekey.UserInfoKey, cachekey.UserGlobalRecvMsgOptKey}, + }, + { + Local: config.Config.LocalCache.Group, + Keys: []string{cachekey.GroupMemberIDsKey, cachekey.GroupInfoKey, cachekey.GroupMemberInfoKey}, + }, + { + Local: config.Config.LocalCache.Friend, + Keys: []string{cachekey.FriendIDsKey, cachekey.BlackIDsKey}, + }, + { + Local: config.Config.LocalCache.Conversation, + Keys: []string{cachekey.ConversationIDsKey, cachekey.ConversationKey}, + }, + } + subscribe = make(map[string][]string) + for _, v := range list { + if v.Local.Enable() { + subscribe[v.Local.Topic] = v.Keys + } + } + }) + prefix, ok := subscribe[topic] + if !ok { + return nil + } + res := make([]string, 0, len(key)) + for _, k := range key { + var exist bool + for _, p := range prefix { + if strings.HasPrefix(k, p) { + exist = true + break + } + } + if exist { + res = append(res, k) + } + } + return res +} diff --git a/pkg/common/db/cache/conversation.go b/pkg/common/db/cache/conversation.go index a7018bc18c..61489ff923 100644 --- a/pkg/common/db/cache/conversation.go +++ b/pkg/common/db/cache/conversation.go @@ -17,6 +17,9 @@ package cache import ( "context" "errors" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "math/big" "strings" "time" @@ -30,14 +33,14 @@ import ( ) const ( - conversationKey = "CONVERSATION:" - conversationIDsKey = "CONVERSATION_IDS:" - conversationIDsHashKey = "CONVERSATION_IDS_HASH:" - conversationHasReadSeqKey = "CONVERSATION_HAS_READ_SEQ:" - recvMsgOptKey = "RECV_MSG_OPT:" - superGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:" - superGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:" - conversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:" + //conversationKey = "CONVERSATION:" + //conversationIDsKey = "CONVERSATION_IDS:" + //conversationIDsHashKey = "CONVERSATION_IDS_HASH:" + //conversationHasReadSeqKey = "CONVERSATION_HAS_READ_SEQ:" + //recvMsgOptKey = "RECV_MSG_OPT:" + //superGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:" + //superGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:" + //conversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:" conversationExpireTime = time.Second * 60 * 60 * 12 ) @@ -84,10 +87,14 @@ type ConversationCache interface { func NewConversationRedis(rdb redis.UniversalClient, opts rockscache.Options, db relationtb.ConversationModelInterface) ConversationCache { rcClient := rockscache.NewClient(rdb, opts) - + mc := NewMetaCacheRedis(rcClient) + c := config.Config.LocalCache.Conversation + log.ZDebug(context.Background(), "black local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable()) + mc.SetTopic(c.Topic) + mc.SetRawRedisClient(rdb) return &ConversationRedisCache{ rcClient: rcClient, - metaCache: NewMetaCacheRedis(rcClient), + metaCache: mc, conversationDB: db, expireTime: conversationExpireTime, } @@ -118,38 +125,42 @@ type ConversationRedisCache struct { func (c *ConversationRedisCache) NewCache() ConversationCache { return &ConversationRedisCache{ rcClient: c.rcClient, - metaCache: NewMetaCacheRedis(c.rcClient, c.metaCache.GetPreDelKeys()...), + metaCache: c.Copy(), conversationDB: c.conversationDB, expireTime: c.expireTime, } } func (c *ConversationRedisCache) getConversationKey(ownerUserID, conversationID string) string { - return conversationKey + ownerUserID + ":" + conversationID + return cachekey.GetConversationKey(ownerUserID, conversationID) } func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) string { - return conversationIDsKey + ownerUserID + return cachekey.GetConversationIDsKey(ownerUserID) } func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string { - return superGroupRecvMsgNotNotifyUserIDsKey + groupID + return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID) } func (c *ConversationRedisCache) getRecvMsgOptKey(ownerUserID, conversationID string) string { - return recvMsgOptKey + ownerUserID + ":" + conversationID + return cachekey.GetRecvMsgOptKey(ownerUserID, conversationID) } func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string { - return superGroupRecvMsgNotNotifyUserIDsHashKey + groupID + return cachekey.GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID) } func (c *ConversationRedisCache) getConversationHasReadSeqKey(ownerUserID, conversationID string) string { - return conversationHasReadSeqKey + ownerUserID + ":" + conversationID + return cachekey.GetConversationHasReadSeqKey(ownerUserID, conversationID) } func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conversationID string) string { - return conversationNotReceiveMessageUserIDsKey + conversationID + return cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID) +} + +func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string { + return cachekey.GetUserConversationIDsHashKey(ownerUserID) } func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) { @@ -169,10 +180,6 @@ func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) Conversat return cache } -func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string { - return conversationIDsHashKey + ownerUserID -} - func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) { return getCache( ctx, diff --git a/pkg/common/db/cache/friend.go b/pkg/common/db/cache/friend.go index a2b60d48fb..d09d00312a 100644 --- a/pkg/common/db/cache/friend.go +++ b/pkg/common/db/cache/friend.go @@ -16,6 +16,9 @@ package cache import ( "context" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "time" "github.com/dtm-labs/rockscache" @@ -27,10 +30,10 @@ import ( ) const ( - friendExpireTime = time.Second * 60 * 60 * 12 - friendIDsKey = "FRIEND_IDS:" - TwoWayFriendsIDsKey = "COMMON_FRIENDS_IDS:" - friendKey = "FRIEND_INFO:" + friendExpireTime = time.Second * 60 * 60 * 12 + //friendIDsKey = "FRIEND_IDS:" + //TwoWayFriendsIDsKey = "COMMON_FRIENDS_IDS:" + //friendKey = "FRIEND_INFO:" ) // FriendCache is an interface for caching friend-related data. @@ -60,8 +63,13 @@ type FriendCacheRedis struct { func NewFriendCacheRedis(rdb redis.UniversalClient, friendDB relationtb.FriendModelInterface, options rockscache.Options) FriendCache { rcClient := rockscache.NewClient(rdb, options) + mc := NewMetaCacheRedis(rcClient) + f := config.Config.LocalCache.Friend + log.ZDebug(context.Background(), "friend local cache init", "Topic", f.Topic, "SlotNum", f.SlotNum, "SlotSize", f.SlotSize, "enable", f.Enable()) + mc.SetTopic(f.Topic) + mc.SetRawRedisClient(rdb) return &FriendCacheRedis{ - metaCache: NewMetaCacheRedis(rcClient), + metaCache: mc, friendDB: friendDB, expireTime: friendExpireTime, rcClient: rcClient, @@ -72,7 +80,7 @@ func NewFriendCacheRedis(rdb redis.UniversalClient, friendDB relationtb.FriendMo func (f *FriendCacheRedis) NewCache() FriendCache { return &FriendCacheRedis{ rcClient: f.rcClient, - metaCache: NewMetaCacheRedis(f.rcClient, f.metaCache.GetPreDelKeys()...), + metaCache: f.Copy(), friendDB: f.friendDB, expireTime: f.expireTime, } @@ -80,17 +88,17 @@ func (f *FriendCacheRedis) NewCache() FriendCache { // getFriendIDsKey returns the key for storing friend IDs in the cache. func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string { - return friendIDsKey + ownerUserID + return cachekey.GetFriendIDsKey(ownerUserID) } // getTwoWayFriendsIDsKey returns the key for storing two-way friend IDs in the cache. func (f *FriendCacheRedis) getTwoWayFriendsIDsKey(ownerUserID string) string { - return TwoWayFriendsIDsKey + ownerUserID + return cachekey.GetTwoWayFriendsIDsKey(ownerUserID) } // getFriendKey returns the key for storing friend info in the cache. func (f *FriendCacheRedis) getFriendKey(ownerUserID, friendUserID string) string { - return friendKey + ownerUserID + "-" + friendUserID + return cachekey.GetFriendKey(ownerUserID, friendUserID) } // GetFriendIDs retrieves friend IDs from the cache or the database if not found. diff --git a/pkg/common/db/cache/group.go b/pkg/common/db/cache/group.go index 57fcf1a9b4..71f5d06fd3 100644 --- a/pkg/common/db/cache/group.go +++ b/pkg/common/db/cache/group.go @@ -17,7 +17,8 @@ package cache import ( "context" "fmt" - "strconv" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "time" "github.com/OpenIMSDK/protocol/constant" @@ -34,15 +35,14 @@ import ( ) const ( - groupExpireTime = time.Second * 60 * 60 * 12 - groupInfoKey = "GROUP_INFO:" - groupMemberIDsKey = "GROUP_MEMBER_IDS:" - groupMembersHashKey = "GROUP_MEMBERS_HASH2:" - groupMemberInfoKey = "GROUP_MEMBER_INFO:" - //groupOwnerInfoKey = "GROUP_OWNER_INFO:". - joinedGroupsKey = "JOIN_GROUPS_KEY:" - groupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:" - groupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:" + groupExpireTime = time.Second * 60 * 60 * 12 + //groupInfoKey = "GROUP_INFO:" + //groupMemberIDsKey = "GROUP_MEMBER_IDS:" + //groupMembersHashKey = "GROUP_MEMBERS_HASH2:" + //groupMemberInfoKey = "GROUP_MEMBER_INFO:" + //joinedGroupsKey = "JOIN_GROUPS_KEY:" + //groupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:" + //groupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:" ) type GroupHash interface { @@ -105,12 +105,16 @@ func NewGroupCacheRedis( opts rockscache.Options, ) GroupCache { rcClient := rockscache.NewClient(rdb, opts) - + mc := NewMetaCacheRedis(rcClient) + g := config.Config.LocalCache.Group + mc.SetTopic(g.Topic) + log.ZDebug(context.Background(), "group local cache init", "Topic", g.Topic, "SlotNum", g.SlotNum, "SlotSize", g.SlotSize, "enable", g.Enable()) + mc.SetRawRedisClient(rdb) return &GroupCacheRedis{ rcClient: rcClient, expireTime: groupExpireTime, groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB, groupHash: hashCode, - metaCache: NewMetaCacheRedis(rcClient), + metaCache: mc, } } @@ -121,36 +125,36 @@ func (g *GroupCacheRedis) NewCache() GroupCache { groupDB: g.groupDB, groupMemberDB: g.groupMemberDB, groupRequestDB: g.groupRequestDB, - metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...), + metaCache: g.Copy(), } } func (g *GroupCacheRedis) getGroupInfoKey(groupID string) string { - return groupInfoKey + groupID + return cachekey.GetGroupInfoKey(groupID) } func (g *GroupCacheRedis) getJoinedGroupsKey(userID string) string { - return joinedGroupsKey + userID + return cachekey.GetJoinedGroupsKey(userID) } func (g *GroupCacheRedis) getGroupMembersHashKey(groupID string) string { - return groupMembersHashKey + groupID + return cachekey.GetGroupMembersHashKey(groupID) } func (g *GroupCacheRedis) getGroupMemberIDsKey(groupID string) string { - return groupMemberIDsKey + groupID + return cachekey.GetGroupMemberIDsKey(groupID) } func (g *GroupCacheRedis) getGroupMemberInfoKey(groupID, userID string) string { - return groupMemberInfoKey + groupID + "-" + userID + return cachekey.GetGroupMemberInfoKey(groupID, userID) } func (g *GroupCacheRedis) getGroupMemberNumKey(groupID string) string { - return groupMemberNumKey + groupID + return cachekey.GetGroupMemberNumKey(groupID) } func (g *GroupCacheRedis) getGroupRoleLevelMemberIDsKey(groupID string, roleLevel int32) string { - return groupRoleLevelMemberIDsKey + groupID + "-" + strconv.Itoa(int(roleLevel)) + return cachekey.GetGroupRoleLevelMemberIDsKey(groupID, roleLevel) } func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []string) (int, error) { diff --git a/pkg/common/db/cache/meta_cache.go b/pkg/common/db/cache/meta_cache.go index 4bc2a046aa..86ade0b681 100644 --- a/pkg/common/db/cache/meta_cache.go +++ b/pkg/common/db/cache/meta_cache.go @@ -18,6 +18,7 @@ import ( "context" "encoding/json" "errors" + "github.com/redis/go-redis/v9" "time" "github.com/OpenIMSDK/tools/mw/specialerror" @@ -44,6 +45,9 @@ type metaCache interface { AddKeys(keys ...string) ClearKeys() GetPreDelKeys() []string + SetTopic(topic string) + SetRawRedisClient(cli redis.UniversalClient) + Copy() metaCache } func NewMetaCacheRedis(rcClient *rockscache.Client, keys ...string) metaCache { @@ -51,10 +55,36 @@ func NewMetaCacheRedis(rcClient *rockscache.Client, keys ...string) metaCache { } type metaCacheRedis struct { + topic string rcClient *rockscache.Client keys []string maxRetryTimes int retryInterval time.Duration + redisClient redis.UniversalClient +} + +func (m *metaCacheRedis) Copy() metaCache { + var keys []string + if len(m.keys) > 0 { + keys = make([]string, 0, len(m.keys)*2) + keys = append(keys, m.keys...) + } + return &metaCacheRedis{ + topic: m.topic, + rcClient: m.rcClient, + keys: keys, + maxRetryTimes: m.maxRetryTimes, + retryInterval: m.retryInterval, + redisClient: redisClient, + } +} + +func (m *metaCacheRedis) SetTopic(topic string) { + m.topic = topic +} + +func (m *metaCacheRedis) SetRawRedisClient(cli redis.UniversalClient) { + m.redisClient = cli } func (m *metaCacheRedis) ExecDel(ctx context.Context, distinct ...bool) error { @@ -62,7 +92,7 @@ func (m *metaCacheRedis) ExecDel(ctx context.Context, distinct ...bool) error { m.keys = utils.Distinct(m.keys) } if len(m.keys) > 0 { - log.ZDebug(ctx, "delete cache", "keys", m.keys) + log.ZDebug(ctx, "delete cache", "topic", m.topic, "keys", m.keys) for _, key := range m.keys { for i := 0; i < m.maxRetryTimes; i++ { if err := m.rcClient.TagAsDeleted(key); err != nil { @@ -72,31 +102,18 @@ func (m *metaCacheRedis) ExecDel(ctx context.Context, distinct ...bool) error { } break } - - //retryTimes := 0 - //for { - // m.rcClient.TagAsDeleted() - // if err := m.rcClient.TagAsDeletedBatch2(ctx, []string{key}); err != nil { - // if retryTimes >= m.maxRetryTimes { - // err = errs.ErrInternalServer.Wrap( - // fmt.Sprintf( - // "delete cache error: %v, keys: %v, retry times %d, please check redis server", - // err, - // key, - // retryTimes, - // ), - // ) - // log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", key) - // return err - // } - // retryTimes++ - // } else { - // break - // } - //} + } + if pk := getPublishKey(m.topic, m.keys); len(pk) > 0 { + data, err := json.Marshal(pk) + if err != nil { + log.ZError(ctx, "keys json marshal failed", err, "topic", m.topic, "keys", pk) + } else { + if err := m.redisClient.Publish(ctx, m.topic, string(data)).Err(); err != nil { + log.ZError(ctx, "redis publish cache delete error", err, "topic", m.topic, "keys", pk) + } + } } } - return nil } diff --git a/pkg/common/db/cache/msg.go b/pkg/common/db/cache/msg.go index 5cd3cb22c5..768e75a27d 100644 --- a/pkg/common/db/cache/msg.go +++ b/pkg/common/db/cache/msg.go @@ -17,6 +17,7 @@ package cache import ( "context" "errors" + "github.com/dtm-labs/rockscache" "strconv" "time" @@ -44,12 +45,12 @@ const ( conversationUserMinSeq = "CON_USER_MIN_SEQ:" hasReadSeq = "HAS_READ_SEQ:" - appleDeviceToken = "DEVICE_TOKEN" - getuiToken = "GETUI_TOKEN" - getuiTaskID = "GETUI_TASK_ID" - signalCache = "SIGNAL_CACHE:" - signalListCache = "SIGNAL_LIST_CACHE:" - FCM_TOKEN = "FCM_TOKEN:" + //appleDeviceToken = "DEVICE_TOKEN" + getuiToken = "GETUI_TOKEN" + getuiTaskID = "GETUI_TASK_ID" + //signalCache = "SIGNAL_CACHE:" + //signalListCache = "SIGNAL_LIST_CACHE:" + FCM_TOKEN = "FCM_TOKEN:" messageCache = "MESSAGE_CACHE:" messageDelUserList = "MESSAGE_DEL_USER_LIST:" @@ -128,7 +129,8 @@ type MsgModel interface { } func NewMsgCacheModel(client redis.UniversalClient) MsgModel { - return &msgCache{rdb: client} + rcClient := rockscache.NewClient(client, rockscache.NewDefaultOptions()) + return &msgCache{metaCache: NewMetaCacheRedis(rcClient), rdb: client} } type msgCache struct { @@ -148,6 +150,10 @@ func (c *msgCache) getHasReadSeqKey(conversationID string, userID string) string return hasReadSeq + userID + ":" + conversationID } +func (c *msgCache) getConversationUserMinSeqKey(conversationID, userID string) string { + return conversationUserMinSeq + conversationID + "u:" + userID +} + func (c *msgCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error { return utils.Wrap1(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err()) } @@ -209,10 +215,6 @@ func (c *msgCache) GetMinSeq(ctx context.Context, conversationID string) (int64, return c.getSeq(ctx, conversationID, c.getMinSeqKey) } -func (c *msgCache) getConversationUserMinSeqKey(conversationID, userID string) string { - return conversationUserMinSeq + conversationID + "u:" + userID -} - func (c *msgCache) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) { return utils.Wrap2(c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()) } diff --git a/pkg/common/db/cache/user.go b/pkg/common/db/cache/user.go index 979bd06e4c..c18f2af25f 100644 --- a/pkg/common/db/cache/user.go +++ b/pkg/common/db/cache/user.go @@ -18,6 +18,8 @@ import ( "context" "encoding/json" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "hash/crc32" "strconv" "time" @@ -36,8 +38,8 @@ import ( ) const ( - userExpireTime = time.Second * 60 * 60 * 12 - userInfoKey = "USER_INFO:" + userExpireTime = time.Second * 60 * 60 * 12 + //userInfoKey = "USER_INFO:" userGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:" olineStatusKey = "ONLINE_STATUS:" userOlineStatusExpireTime = time.Second * 60 * 60 * 24 @@ -72,7 +74,11 @@ func NewUserCacheRedis( options rockscache.Options, ) UserCache { rcClient := rockscache.NewClient(rdb, options) - + mc := NewMetaCacheRedis(rcClient) + u := config.Config.LocalCache.User + log.ZDebug(context.Background(), "user local cache init", "Topic", u.Topic, "SlotNum", u.SlotNum, "SlotSize", u.SlotSize, "enable", u.Enable()) + mc.SetTopic(u.Topic) + mc.SetRawRedisClient(rdb) return &UserCacheRedis{ rdb: rdb, metaCache: NewMetaCacheRedis(rcClient), @@ -85,7 +91,7 @@ func NewUserCacheRedis( func (u *UserCacheRedis) NewCache() UserCache { return &UserCacheRedis{ rdb: u.rdb, - metaCache: NewMetaCacheRedis(u.rcClient, u.metaCache.GetPreDelKeys()...), + metaCache: u.Copy(), userDB: u.userDB, expireTime: u.expireTime, rcClient: u.rcClient, @@ -93,18 +99,17 @@ func (u *UserCacheRedis) NewCache() UserCache { } func (u *UserCacheRedis) getUserInfoKey(userID string) string { - return userInfoKey + userID + return cachekey.GetUserInfoKey(userID) } func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string { - return userGlobalRecvMsgOptKey + userID + return cachekey.GetUserGlobalRecvMsgOptKey(userID) } func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) { return getCache(ctx, u.rcClient, u.getUserInfoKey(userID), u.expireTime, func(ctx context.Context) (*relationtb.UserModel, error) { return u.userDB.Take(ctx, userID) - }, - ) + }) } func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) { diff --git a/pkg/common/db/controller/msg_test.go b/pkg/common/db/controller/msg_test.go index cfb969b3e6..70c055bf31 100644 --- a/pkg/common/db/controller/msg_test.go +++ b/pkg/common/db/controller/msg_test.go @@ -146,7 +146,7 @@ func Test_BatchInsertChat2DB(t *testing.T) { func GetDB() *commonMsgDatabase { config.Config.Mongo.Address = []string{"203.56.175.233:37017"} // config.Config.Mongo.Timeout = 60 - config.Config.Mongo.Database = "openIM_v3" + config.Config.Mongo.Database = "openim_v3" // config.Config.Mongo.Source = "admin" config.Config.Mongo.Username = "root" config.Config.Mongo.Password = "openIM123" diff --git a/pkg/common/db/controller/user.go b/pkg/common/db/controller/user.go index 78ac5a7010..8ba1c01d31 100644 --- a/pkg/common/db/controller/user.go +++ b/pkg/common/db/controller/user.go @@ -142,12 +142,12 @@ func (u *userDatabase) Find(ctx context.Context, userIDs []string) (users []*rel return u.cache.GetUsersInfo(ctx, userIDs) } -// Find userInfo By Nickname +// Find userInfo By Nickname. func (u *userDatabase) FindByNickname(ctx context.Context, nickname string) (users []*relation.UserModel, err error) { return u.userDB.TakeByNickname(ctx, nickname) } -// Find notificationAccouts +// Find notificationAccouts. func (u *userDatabase) FindNotification(ctx context.Context, level int64) (users []*relation.UserModel, err error) { return u.userDB.TakeNotification(ctx, level) } diff --git a/pkg/common/db/localcache/conversation.go b/pkg/common/db/localcache/conversation.go deleted file mode 100644 index c40bcdbce6..0000000000 --- a/pkg/common/db/localcache/conversation.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localcache - -import ( - "context" - "sync" - - "github.com/OpenIMSDK/protocol/conversation" - - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" -) - -type ConversationLocalCache struct { - lock sync.Mutex - superGroupRecvMsgNotNotifyUserIDs map[string]Hash - conversationIDs map[string]Hash - client *rpcclient.ConversationRpcClient -} - -type Hash struct { - hash uint64 - ids []string -} - -func NewConversationLocalCache(client *rpcclient.ConversationRpcClient) *ConversationLocalCache { - return &ConversationLocalCache{ - superGroupRecvMsgNotNotifyUserIDs: make(map[string]Hash), - conversationIDs: make(map[string]Hash), - client: client, - } -} - -func (g *ConversationLocalCache) GetRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error) { - resp, err := g.client.Client.GetRecvMsgNotNotifyUserIDs(ctx, &conversation.GetRecvMsgNotNotifyUserIDsReq{ - GroupID: groupID, - }) - if err != nil { - return nil, err - } - return resp.UserIDs, nil -} - -func (g *ConversationLocalCache) GetConversationIDs(ctx context.Context, userID string) ([]string, error) { - resp, err := g.client.Client.GetUserConversationIDsHash(ctx, &conversation.GetUserConversationIDsHashReq{ - OwnerUserID: userID, - }) - if err != nil { - return nil, err - } - - g.lock.Lock() - hash, ok := g.conversationIDs[userID] - g.lock.Unlock() - - if !ok || hash.hash != resp.Hash { - conversationIDsResp, err := g.client.Client.GetConversationIDs(ctx, &conversation.GetConversationIDsReq{ - UserID: userID, - }) - if err != nil { - return nil, err - } - - g.lock.Lock() - defer g.lock.Unlock() - g.conversationIDs[userID] = Hash{ - hash: resp.Hash, - ids: conversationIDsResp.ConversationIDs, - } - - return conversationIDsResp.ConversationIDs, nil - } - - return hash.ids, nil -} diff --git a/pkg/common/db/localcache/doc.go b/pkg/common/db/localcache/doc.go deleted file mode 100644 index d349373eee..0000000000 --- a/pkg/common/db/localcache/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localcache // import "github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache" diff --git a/pkg/common/db/localcache/group.go b/pkg/common/db/localcache/group.go deleted file mode 100644 index 4958d91eee..0000000000 --- a/pkg/common/db/localcache/group.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localcache - -import ( - "context" - "sync" - - "github.com/OpenIMSDK/protocol/group" - "github.com/OpenIMSDK/tools/errs" - - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" -) - -type GroupLocalCache struct { - lock sync.Mutex - cache map[string]GroupMemberIDsHash - client *rpcclient.GroupRpcClient -} - -type GroupMemberIDsHash struct { - memberListHash uint64 - userIDs []string -} - -func NewGroupLocalCache(client *rpcclient.GroupRpcClient) *GroupLocalCache { - return &GroupLocalCache{ - cache: make(map[string]GroupMemberIDsHash, 0), - client: client, - } -} - -func (g *GroupLocalCache) GetGroupMemberIDs(ctx context.Context, groupID string) ([]string, error) { - resp, err := g.client.Client.GetGroupAbstractInfo(ctx, &group.GetGroupAbstractInfoReq{ - GroupIDs: []string{groupID}, - }) - if err != nil { - return nil, err - } - if len(resp.GroupAbstractInfos) < 1 { - return nil, errs.ErrGroupIDNotFound - } - - g.lock.Lock() - localHashInfo, ok := g.cache[groupID] - if ok && localHashInfo.memberListHash == resp.GroupAbstractInfos[0].GroupMemberListHash { - g.lock.Unlock() - return localHashInfo.userIDs, nil - } - g.lock.Unlock() - - groupMembersResp, err := g.client.Client.GetGroupMemberUserIDs(ctx, &group.GetGroupMemberUserIDsReq{ - GroupID: groupID, - }) - if err != nil { - return nil, err - } - - g.lock.Lock() - defer g.lock.Unlock() - g.cache[groupID] = GroupMemberIDsHash{ - memberListHash: resp.GroupAbstractInfos[0].GroupMemberListHash, - userIDs: groupMembersResp.UserIDs, - } - return g.cache[groupID].userIDs, nil -} diff --git a/pkg/common/db/localcache/meta_local_cache.go b/pkg/common/db/localcache/meta_local_cache.go deleted file mode 100644 index ed9389c27f..0000000000 --- a/pkg/common/db/localcache/meta_local_cache.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localcache diff --git a/pkg/common/db/s3/cos/cos.go b/pkg/common/db/s3/cos/cos.go index 7d2c0befe9..a82ffe6709 100644 --- a/pkg/common/db/s3/cos/cos.go +++ b/pkg/common/db/s3/cos/cos.go @@ -36,9 +36,9 @@ import ( ) const ( - minPartSize = 1024 * 1024 * 1 // 1MB - maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB - maxNumSize = 1000 + minPartSize int64 = 1024 * 1024 * 1 // 1MB + maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB + maxNumSize int64 = 1000 ) const ( @@ -133,7 +133,7 @@ func (c *Cos) PartSize(ctx context.Context, size int64) (int64, error) { return 0, errors.New("size must be greater than 0") } if size > maxPartSize*maxNumSize { - return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize) + return 0, fmt.Errorf("COS size must be less than the maximum allowed limit") } if size <= minPartSize*maxNumSize { return minPartSize, nil diff --git a/pkg/common/db/s3/minio/minio.go b/pkg/common/db/s3/minio/minio.go index 7dfe35b513..5a615dcfd3 100644 --- a/pkg/common/db/s3/minio/minio.go +++ b/pkg/common/db/s3/minio/minio.go @@ -45,9 +45,9 @@ const ( ) const ( - minPartSize = 1024 * 1024 * 5 // 1MB - maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB - maxNumSize = 10000 + minPartSize int64 = 1024 * 1024 * 5 // 1MB + maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB + maxNumSize int64 = 10000 ) const ( @@ -240,7 +240,7 @@ func (m *Minio) PartSize(ctx context.Context, size int64) (int64, error) { return 0, errors.New("size must be greater than 0") } if size > maxPartSize*maxNumSize { - return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize) + return 0, fmt.Errorf("MINIO size must be less than the maximum allowed limit") } if size <= minPartSize*maxNumSize { return minPartSize, nil diff --git a/pkg/common/db/s3/oss/oss.go b/pkg/common/db/s3/oss/oss.go index 8fa2a538e8..0bba97ee78 100644 --- a/pkg/common/db/s3/oss/oss.go +++ b/pkg/common/db/s3/oss/oss.go @@ -37,9 +37,9 @@ import ( ) const ( - minPartSize = 1024 * 1024 * 1 // 1MB - maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB - maxNumSize = 10000 + minPartSize int64 = 1024 * 1024 * 1 // 1MB + maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB + maxNumSize int64 = 10000 ) const ( @@ -141,7 +141,7 @@ func (o *OSS) PartSize(ctx context.Context, size int64) (int64, error) { return 0, errors.New("size must be greater than 0") } if size > maxPartSize*maxNumSize { - return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize) + return 0, fmt.Errorf("OSS size must be less than the maximum allowed limit") } if size <= minPartSize*maxNumSize { return minPartSize, nil diff --git a/pkg/common/db/s3/s3.go b/pkg/common/db/s3/s3.go index 0352004b58..d3dd90ae98 100644 --- a/pkg/common/db/s3/s3.go +++ b/pkg/common/db/s3/s3.go @@ -24,7 +24,7 @@ import ( type PartLimit struct { MinPartSize int64 `json:"minPartSize"` MaxPartSize int64 `json:"maxPartSize"` - MaxNumSize int `json:"maxNumSize"` + MaxNumSize int64 `json:"maxNumSize"` } type InitiateMultipartUploadResult struct { diff --git a/pkg/common/discoveryregister/direct/directResolver.go b/pkg/common/discoveryregister/direct/directResolver.go new file mode 100644 index 0000000000..285f551127 --- /dev/null +++ b/pkg/common/discoveryregister/direct/directResolver.go @@ -0,0 +1,81 @@ +package direct + +import ( + "context" + "github.com/OpenIMSDK/tools/log" + "google.golang.org/grpc/resolver" + "math/rand" + "strings" +) + +const ( + slashSeparator = "/" + // EndpointSepChar is the separator char in endpoints. + EndpointSepChar = ',' + + subsetSize = 32 + scheme = "direct" +) + +type ResolverDirect struct { +} + +func NewResolverDirect() *ResolverDirect { + return &ResolverDirect{} +} + +func (rd *ResolverDirect) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) ( + resolver.Resolver, error) { + log.ZDebug(context.Background(), "Build", "target", target) + endpoints := strings.FieldsFunc(GetEndpoints(target), func(r rune) bool { + return r == EndpointSepChar + }) + endpoints = subset(endpoints, subsetSize) + addrs := make([]resolver.Address, 0, len(endpoints)) + + for _, val := range endpoints { + addrs = append(addrs, resolver.Address{ + Addr: val, + }) + } + if err := cc.UpdateState(resolver.State{ + Addresses: addrs, + }); err != nil { + return nil, err + } + + return &nopResolver{cc: cc}, nil +} +func init() { + resolver.Register(&ResolverDirect{}) +} +func (rd *ResolverDirect) Scheme() string { + return scheme // return your custom scheme name +} + +// GetEndpoints returns the endpoints from the given target. +func GetEndpoints(target resolver.Target) string { + return strings.Trim(target.URL.Path, slashSeparator) +} +func subset(set []string, sub int) []string { + rand.Shuffle(len(set), func(i, j int) { + set[i], set[j] = set[j], set[i] + }) + if len(set) <= sub { + return set + } + + return set[:sub] +} + +type nopResolver struct { + cc resolver.ClientConn +} + +func (n nopResolver) ResolveNow(options resolver.ResolveNowOptions) { + +} + +func (n nopResolver) Close() { + +} diff --git a/pkg/common/discoveryregister/direct/directconn.go b/pkg/common/discoveryregister/direct/directconn.go new file mode 100644 index 0000000000..3eaa6fa194 --- /dev/null +++ b/pkg/common/discoveryregister/direct/directconn.go @@ -0,0 +1,154 @@ +package direct + +import ( + "context" + "errors" + "fmt" + "github.com/OpenIMSDK/tools/errs" + config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type ServiceAddresses map[string][]int + +func getServiceAddresses() ServiceAddresses { + return ServiceAddresses{ + config2.Config.RpcRegisterName.OpenImUserName: config2.Config.RpcPort.OpenImUserPort, + config2.Config.RpcRegisterName.OpenImFriendName: config2.Config.RpcPort.OpenImFriendPort, + config2.Config.RpcRegisterName.OpenImMsgName: config2.Config.RpcPort.OpenImMessagePort, + config2.Config.RpcRegisterName.OpenImMessageGatewayName: config2.Config.LongConnSvr.OpenImMessageGatewayPort, + config2.Config.RpcRegisterName.OpenImGroupName: config2.Config.RpcPort.OpenImGroupPort, + config2.Config.RpcRegisterName.OpenImAuthName: config2.Config.RpcPort.OpenImAuthPort, + config2.Config.RpcRegisterName.OpenImPushName: config2.Config.RpcPort.OpenImPushPort, + config2.Config.RpcRegisterName.OpenImConversationName: config2.Config.RpcPort.OpenImConversationPort, + config2.Config.RpcRegisterName.OpenImThirdName: config2.Config.RpcPort.OpenImThirdPort, + } +} + +type ConnDirect struct { + additionalOpts []grpc.DialOption + currentServiceAddress string + conns map[string][]*grpc.ClientConn + resolverDirect *ResolverDirect +} + +func (cd *ConnDirect) GetClientLocalConns() map[string][]*grpc.ClientConn { + return nil +} + +func (cd *ConnDirect) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) { + return "", nil +} + +func (cd *ConnDirect) Register(serviceName, host string, port int, opts ...grpc.DialOption) error { + return nil +} + +func (cd *ConnDirect) UnRegister() error { + return nil +} + +func (cd *ConnDirect) CreateRpcRootNodes(serviceNames []string) error { + return nil +} + +func (cd *ConnDirect) RegisterConf2Registry(key string, conf []byte) error { + return nil +} + +func (cd *ConnDirect) GetConfFromRegistry(key string) ([]byte, error) { + return nil, nil +} + +func (cd *ConnDirect) Close() { + +} + +func NewConnDirect() (*ConnDirect, error) { + return &ConnDirect{ + conns: make(map[string][]*grpc.ClientConn), + resolverDirect: NewResolverDirect(), + }, nil +} + +func (cd *ConnDirect) GetConns(ctx context.Context, + serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) { + + if conns, exists := cd.conns[serviceName]; exists { + return conns, nil + } + ports := getServiceAddresses()[serviceName] + var connections []*grpc.ClientConn + for _, port := range ports { + conn, err := cd.dialServiceWithoutResolver(ctx, fmt.Sprintf(config2.Config.Rpc.ListenIP+":%d", port), append(cd.additionalOpts, opts...)...) + if err != nil { + fmt.Printf("connect to port %d failed,serviceName %s, IP %s\n", port, serviceName, config2.Config.Rpc.ListenIP) + } + connections = append(connections, conn) + } + + if len(connections) == 0 { + return nil, fmt.Errorf("no connections found for service: %s", serviceName) + } + return connections, nil +} + +func (cd *ConnDirect) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + // Get service addresses + addresses := getServiceAddresses() + address, ok := addresses[serviceName] + if !ok { + return nil, errs.Wrap(errors.New("unknown service name"), "serviceName", serviceName) + } + var result string + for _, addr := range address { + if result != "" { + result = result + "," + fmt.Sprintf(config2.Config.Rpc.ListenIP+":%d", addr) + } else { + result = fmt.Sprintf(config2.Config.Rpc.ListenIP+":%d", addr) + } + } + // Try to dial a new connection + conn, err := cd.dialService(ctx, result, append(cd.additionalOpts, opts...)...) + if err != nil { + return nil, errs.Wrap(err, "address", result) + } + + // Store the new connection + cd.conns[serviceName] = append(cd.conns[serviceName], conn) + return conn, nil +} + +func (cd *ConnDirect) GetSelfConnTarget() string { + return cd.currentServiceAddress +} + +func (cd *ConnDirect) AddOption(opts ...grpc.DialOption) { + cd.additionalOpts = append(cd.additionalOpts, opts...) +} + +func (cd *ConnDirect) CloseConn(conn *grpc.ClientConn) { + if conn != nil { + conn.Close() + } +} + +func (cd *ConnDirect) dialService(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + options := append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, cd.resolverDirect.Scheme()+":///"+address, options...) + + if err != nil { + return nil, err + } + return conn, nil +} +func (cd *ConnDirect) dialServiceWithoutResolver(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + options := append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, address, options...) + + if err != nil { + return nil, err + } + return conn, nil +} diff --git a/pkg/common/discoveryregister/discoveryregister.go b/pkg/common/discoveryregister/discoveryregister.go index c143230279..76c8fb2672 100644 --- a/pkg/common/discoveryregister/discoveryregister.go +++ b/pkg/common/discoveryregister/discoveryregister.go @@ -16,6 +16,7 @@ package discoveryregister import ( "errors" + "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister/direct" "os" "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister/kubernetes" @@ -36,6 +37,8 @@ func NewDiscoveryRegister(envType string) (discoveryregistry.SvcDiscoveryRegistr return zookeeper.NewZookeeperDiscoveryRegister() case "k8s": return kubernetes.NewK8sDiscoveryRegister() + case "direct": + return direct.NewConnDirect() default: return nil, errors.New("envType not correct") } diff --git a/pkg/common/discoveryregister/discoveryregister_test.go b/pkg/common/discoveryregister/discoveryregister_test.go index d83da12855..5317db5c69 100644 --- a/pkg/common/discoveryregister/discoveryregister_test.go +++ b/pkg/common/discoveryregister/discoveryregister_test.go @@ -40,6 +40,7 @@ func TestNewDiscoveryRegister(t *testing.T) { }{ {"zookeeper", false, true}, {"k8s", false, true}, // 假设 k8s 配置也已正确设置 + {"direct", false, true}, {"invalid", true, false}, } diff --git a/pkg/common/discoveryregister/kubernetes/kubernetes.go b/pkg/common/discoveryregister/kubernetes/kubernetes.go index 06c58d9616..7c40399a3a 100644 --- a/pkg/common/discoveryregister/kubernetes/kubernetes.go +++ b/pkg/common/discoveryregister/kubernetes/kubernetes.go @@ -104,7 +104,7 @@ func getSelfHost(ctx context.Context) string { return host } -// like openimserver-openim-msggateway-0.openimserver-openim-msggateway-headless.openim-lin.svc.cluster.local:88 +// like openimserver-openim-msggateway-0.openimserver-openim-msggateway-headless.openim-lin.svc.cluster.local:88. func getMsgGatewayHost(ctx context.Context) []string { port := 88 instance := "openimserver" @@ -132,20 +132,40 @@ func getMsgGatewayHost(ctx context.Context) []string { // GetConns returns the gRPC client connections to the specified service. func (cli *K8sDR) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) { + // This conditional checks if the serviceName is not the OpenImMessageGatewayName. + // It seems to handle a special case for the OpenImMessageGateway. if serviceName != config.Config.RpcRegisterName.OpenImMessageGatewayName { + // DialContext creates a client connection to the given target (serviceName) using the specified context. + // 'cli.options' are likely default or common options for all connections in this struct. + // 'opts...' allows for additional gRPC dial options to be passed and used. conn, err := grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...) + + // The function returns a slice of client connections with the new connection, or an error if occurred. return []*grpc.ClientConn{conn}, err } else { + // This block is executed if the serviceName is OpenImMessageGatewayName. + // 'ret' will accumulate the connections to return. var ret []*grpc.ClientConn + + // getMsgGatewayHost presumably retrieves hosts for the message gateway service. + // The context is passed, likely for cancellation and timeout control. gatewayHosts := getMsgGatewayHost(ctx) + + // Iterating over the retrieved gateway hosts. for _, host := range gatewayHosts { + // Establishes a connection to each host. + // Again, appending cli.options with any additional opts provided. conn, err := grpc.DialContext(ctx, host, append(cli.options, opts...)...) + + // If there's an error while dialing any host, the function returns immediately with the error. if err != nil { return nil, err } else { + // If the connection is successful, it is added to the 'ret' slice. ret = append(ret, conn) } } + // After all hosts are processed, the slice of connections is returned. return ret, nil } } @@ -168,7 +188,7 @@ func (cli *K8sDR) CloseConn(conn *grpc.ClientConn) { conn.Close() } -// do not use this method for call rpc +// do not use this method for call rpc. func (cli *K8sDR) GetClientLocalConns() map[string][]*grpc.ClientConn { fmt.Println("should not call this function!!!!!!!!!!!!!!!!!!!!!!!!!") return nil diff --git a/pkg/common/redispubsub/redispubliser.go b/pkg/common/redispubsub/redispubliser.go new file mode 100644 index 0000000000..822b25bf9d --- /dev/null +++ b/pkg/common/redispubsub/redispubliser.go @@ -0,0 +1,16 @@ +package redispubsub + +import "github.com/redis/go-redis/v9" + +type Publisher struct { + client redis.UniversalClient + channel string +} + +func NewPublisher(client redis.UniversalClient, channel string) *Publisher { + return &Publisher{client: client, channel: channel} +} + +func (p *Publisher) Publish(message string) error { + return p.client.Publish(ctx, p.channel, message).Err() +} diff --git a/pkg/common/redispubsub/redissubscriber.go b/pkg/common/redispubsub/redissubscriber.go new file mode 100644 index 0000000000..a7029a9932 --- /dev/null +++ b/pkg/common/redispubsub/redissubscriber.go @@ -0,0 +1,34 @@ +package redispubsub + +import ( + "context" + "github.com/redis/go-redis/v9" +) + +var ctx = context.Background() + +type Subscriber struct { + client redis.UniversalClient + channel string +} + +func NewSubscriber(client redis.UniversalClient, channel string) *Subscriber { + return &Subscriber{client: client, channel: channel} +} + +func (s *Subscriber) OnMessage(ctx context.Context, callback func(string)) error { + messageChannel := s.client.Subscribe(ctx, s.channel).Channel() + + go func() { + for { + select { + case <-ctx.Done(): + return + case msg := <-messageChannel: + callback(msg.Payload) + } + } + }() + + return nil +} diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index 23ff6640b8..25fe320cc9 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -71,7 +71,7 @@ func Start( } defer client.Close() - client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) + client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) registerIP, err := network.GetRpcRegisterIP(config.Config.Rpc.RegisterIP) if err != nil { return err diff --git a/pkg/localcache/cache.go b/pkg/localcache/cache.go new file mode 100644 index 0000000000..4b405b46ac --- /dev/null +++ b/pkg/localcache/cache.go @@ -0,0 +1,112 @@ +package localcache + +import ( + "context" + "github.com/openimsdk/localcache/link" + "github.com/openimsdk/localcache/lru" + "hash/fnv" + "unsafe" +) + +type Cache[V any] interface { + Get(ctx context.Context, key string, fetch func(ctx context.Context) (V, error)) (V, error) + GetLink(ctx context.Context, key string, fetch func(ctx context.Context) (V, error), link ...string) (V, error) + Del(ctx context.Context, key ...string) + DelLocal(ctx context.Context, key ...string) + Stop() +} + +func New[V any](opts ...Option) Cache[V] { + opt := defaultOption() + for _, o := range opts { + o(opt) + } + + c := cache[V]{opt: opt} + if opt.localSlotNum > 0 && opt.localSlotSize > 0 { + createSimpleLRU := func() lru.LRU[string, V] { + if opt.expirationEvict { + return lru.NewExpirationLRU[string, V](opt.localSlotSize, opt.localSuccessTTL, opt.localFailedTTL, opt.target, c.onEvict) + } else { + return lru.NewLayLRU[string, V](opt.localSlotSize, opt.localSuccessTTL, opt.localFailedTTL, opt.target, c.onEvict) + } + } + if opt.localSlotNum == 1 { + c.local = createSimpleLRU() + } else { + c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 { + h := fnv.New64a() + h.Write(*(*[]byte)(unsafe.Pointer(&key))) + return h.Sum64() + }, createSimpleLRU) + } + if opt.linkSlotNum > 0 { + c.link = link.New(opt.linkSlotNum) + } + } + return &c +} + +type cache[V any] struct { + opt *option + link link.Link + local lru.LRU[string, V] +} + +func (c *cache[V]) onEvict(key string, value V) { + if c.link != nil { + lks := c.link.Del(key) + for k := range lks { + if key != k { // prevent deadlock + c.local.Del(k) + } + } + } +} + +func (c *cache[V]) del(key ...string) { + if c.local == nil { + return + } + for _, k := range key { + c.local.Del(k) + if c.link != nil { + lks := c.link.Del(k) + for k := range lks { + c.local.Del(k) + } + } + } +} + +func (c *cache[V]) Get(ctx context.Context, key string, fetch func(ctx context.Context) (V, error)) (V, error) { + return c.GetLink(ctx, key, fetch) +} + +func (c *cache[V]) GetLink(ctx context.Context, key string, fetch func(ctx context.Context) (V, error), link ...string) (V, error) { + if c.local != nil { + return c.local.Get(key, func() (V, error) { + if len(link) > 0 { + c.link.Link(key, link...) + } + return fetch(ctx) + }) + } else { + return fetch(ctx) + } +} + +func (c *cache[V]) Del(ctx context.Context, key ...string) { + for _, fn := range c.opt.delFn { + fn(ctx, key...) + } + c.del(key...) +} + +func (c *cache[V]) DelLocal(ctx context.Context, key ...string) { + c.del(key...) +} + +func (c *cache[V]) Stop() { + c.local.Stop() +} diff --git a/pkg/localcache/cache_test.go b/pkg/localcache/cache_test.go new file mode 100644 index 0000000000..c497b7b4ab --- /dev/null +++ b/pkg/localcache/cache_test.go @@ -0,0 +1,79 @@ +package localcache + +import ( + "context" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestName(t *testing.T) { + c := New[string](WithExpirationEvict()) + //c := New[string]() + ctx := context.Background() + + const ( + num = 10000 + tNum = 10000 + kNum = 100000 + pNum = 100 + ) + + getKey := func(v uint64) string { + return fmt.Sprintf("key_%d", v%kNum) + } + + start := time.Now() + t.Log("start", start) + + var ( + get atomic.Int64 + del atomic.Int64 + ) + + incrGet := func() { + if v := get.Add(1); v%pNum == 0 { + //t.Log("#get count", v/pNum) + } + } + incrDel := func() { + if v := del.Add(1); v%pNum == 0 { + //t.Log("@del count", v/pNum) + } + } + + var wg sync.WaitGroup + + for i := 0; i < tNum; i++ { + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < num; i++ { + c.Get(ctx, getKey(rand.Uint64()), func(ctx context.Context) (string, error) { + return fmt.Sprintf("index_%d", i), nil + }) + incrGet() + } + }() + + go func() { + defer wg.Done() + time.Sleep(time.Second / 10) + for i := 0; i < num; i++ { + c.Del(ctx, getKey(rand.Uint64())) + incrDel() + } + }() + } + + wg.Wait() + end := time.Now() + t.Log("end", end) + t.Log("time", end.Sub(start)) + t.Log("get", get.Load()) + t.Log("del", del.Load()) + // 137.35s +} diff --git a/pkg/localcache/go.mod b/pkg/localcache/go.mod new file mode 100644 index 0000000000..5f0793042e --- /dev/null +++ b/pkg/localcache/go.mod @@ -0,0 +1,5 @@ +module github.com/openimsdk/localcache + +go 1.19 + +require github.com/hashicorp/golang-lru/v2 v2.0.7 diff --git a/pkg/localcache/link/link.go b/pkg/localcache/link/link.go new file mode 100644 index 0000000000..4f238907b9 --- /dev/null +++ b/pkg/localcache/link/link.go @@ -0,0 +1,109 @@ +package link + +import ( + "hash/fnv" + "sync" + "unsafe" +) + +type Link interface { + Link(key string, link ...string) + Del(key string) map[string]struct{} +} + +func newLinkKey() *linkKey { + return &linkKey{ + data: make(map[string]map[string]struct{}), + } +} + +type linkKey struct { + lock sync.Mutex + data map[string]map[string]struct{} +} + +func (x *linkKey) link(key string, link ...string) { + x.lock.Lock() + defer x.lock.Unlock() + v, ok := x.data[key] + if !ok { + v = make(map[string]struct{}) + x.data[key] = v + } + for _, k := range link { + v[k] = struct{}{} + } +} + +func (x *linkKey) del(key string) map[string]struct{} { + x.lock.Lock() + defer x.lock.Unlock() + ks, ok := x.data[key] + if !ok { + return nil + } + delete(x.data, key) + return ks +} + +func New(n int) Link { + if n <= 0 { + panic("must be greater than 0") + } + slots := make([]*linkKey, n) + for i := 0; i < len(slots); i++ { + slots[i] = newLinkKey() + } + return &slot{ + n: uint64(n), + slots: slots, + } +} + +type slot struct { + n uint64 + slots []*linkKey +} + +func (x *slot) index(s string) uint64 { + h := fnv.New64a() + _, _ = h.Write(*(*[]byte)(unsafe.Pointer(&s))) + return h.Sum64() % x.n +} + +func (x *slot) Link(key string, link ...string) { + if len(link) == 0 { + return + } + mk := key + lks := make([]string, len(link)) + for i, k := range link { + lks[i] = k + } + x.slots[x.index(mk)].link(mk, lks...) + for _, lk := range lks { + x.slots[x.index(lk)].link(lk, mk) + } +} + +func (x *slot) Del(key string) map[string]struct{} { + return x.delKey(key) +} + +func (x *slot) delKey(k string) map[string]struct{} { + del := make(map[string]struct{}) + stack := []string{k} + for len(stack) > 0 { + curr := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if _, ok := del[curr]; ok { + continue + } + del[curr] = struct{}{} + childKeys := x.slots[x.index(curr)].del(curr) + for ck := range childKeys { + stack = append(stack, ck) + } + } + return del +} diff --git a/pkg/localcache/link/link_test.go b/pkg/localcache/link/link_test.go new file mode 100644 index 0000000000..ed684e6939 --- /dev/null +++ b/pkg/localcache/link/link_test.go @@ -0,0 +1,20 @@ +package link + +import ( + "testing" +) + +func TestName(t *testing.T) { + + v := New(1) + + //v.Link("a:1", "b:1", "c:1", "d:1") + v.Link("a:1", "b:1", "c:1") + v.Link("z:1", "b:1") + + //v.DelKey("a:1") + v.Del("z:1") + + t.Log(v) + +} diff --git a/pkg/localcache/lru/lru.go b/pkg/localcache/lru/lru.go new file mode 100644 index 0000000000..2995ec028d --- /dev/null +++ b/pkg/localcache/lru/lru.go @@ -0,0 +1,20 @@ +package lru + +import "github.com/hashicorp/golang-lru/v2/simplelru" + +type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V] + +type LRU[K comparable, V any] interface { + Get(key K, fetch func() (V, error)) (V, error) + Del(key K) bool + Stop() +} + +type Target interface { + IncrGetHit() + IncrGetSuccess() + IncrGetFailed() + + IncrDelHit() + IncrDelNotFound() +} diff --git a/pkg/localcache/lru/lru_expiration.go b/pkg/localcache/lru/lru_expiration.go new file mode 100644 index 0000000000..3cf61f0616 --- /dev/null +++ b/pkg/localcache/lru/lru_expiration.go @@ -0,0 +1,78 @@ +package lru + +import ( + "github.com/hashicorp/golang-lru/v2/expirable" + "sync" + "time" +) + +func NewExpirationLRU[K comparable, V any](size int, successTTL, failedTTL time.Duration, target Target, onEvict EvictCallback[K, V]) LRU[K, V] { + var cb expirable.EvictCallback[K, *expirationLruItem[V]] + if onEvict != nil { + cb = func(key K, value *expirationLruItem[V]) { + onEvict(key, value.value) + } + } + core := expirable.NewLRU[K, *expirationLruItem[V]](size, cb, successTTL) + return &ExpirationLRU[K, V]{ + core: core, + successTTL: successTTL, + failedTTL: failedTTL, + target: target, + } +} + +type expirationLruItem[V any] struct { + lock sync.RWMutex + err error + value V +} + +type ExpirationLRU[K comparable, V any] struct { + lock sync.Mutex + core *expirable.LRU[K, *expirationLruItem[V]] + successTTL time.Duration + failedTTL time.Duration + target Target +} + +func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { + x.lock.Lock() + v, ok := x.core.Get(key) + if ok { + x.lock.Unlock() + x.target.IncrGetSuccess() + v.lock.RLock() + defer v.lock.RUnlock() + return v.value, v.err + } else { + v = &expirationLruItem[V]{} + x.core.Add(key, v) + v.lock.Lock() + x.lock.Unlock() + defer v.lock.Unlock() + v.value, v.err = fetch() + if v.err == nil { + x.target.IncrGetSuccess() + } else { + x.target.IncrGetFailed() + x.core.Remove(key) + } + return v.value, v.err + } +} + +func (x *ExpirationLRU[K, V]) Del(key K) bool { + x.lock.Lock() + ok := x.core.Remove(key) + x.lock.Unlock() + if ok { + x.target.IncrDelHit() + } else { + x.target.IncrDelNotFound() + } + return ok +} + +func (x *ExpirationLRU[K, V]) Stop() { +} diff --git a/pkg/localcache/lru/lru_lazy.go b/pkg/localcache/lru/lru_lazy.go new file mode 100644 index 0000000000..a9270ea4a8 --- /dev/null +++ b/pkg/localcache/lru/lru_lazy.go @@ -0,0 +1,90 @@ +package lru + +import ( + "github.com/hashicorp/golang-lru/v2/simplelru" + "sync" + "time" +) + +type layLruItem[V any] struct { + lock sync.Mutex + expires int64 + err error + value V +} + +func NewLayLRU[K comparable, V any](size int, successTTL, failedTTL time.Duration, target Target, onEvict EvictCallback[K, V]) *LayLRU[K, V] { + var cb simplelru.EvictCallback[K, *layLruItem[V]] + if onEvict != nil { + cb = func(key K, value *layLruItem[V]) { + onEvict(key, value.value) + } + } + core, err := simplelru.NewLRU[K, *layLruItem[V]](size, cb) + if err != nil { + panic(err) + } + return &LayLRU[K, V]{ + core: core, + successTTL: successTTL, + failedTTL: failedTTL, + target: target, + } +} + +type LayLRU[K comparable, V any] struct { + lock sync.Mutex + core *simplelru.LRU[K, *layLruItem[V]] + successTTL time.Duration + failedTTL time.Duration + target Target +} + +func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { + x.lock.Lock() + v, ok := x.core.Get(key) + if ok { + x.lock.Unlock() + v.lock.Lock() + expires, value, err := v.expires, v.value, v.err + if expires != 0 && expires > time.Now().UnixMilli() { + v.lock.Unlock() + x.target.IncrGetHit() + return value, err + } + } else { + v = &layLruItem[V]{} + x.core.Add(key, v) + v.lock.Lock() + x.lock.Unlock() + } + defer v.lock.Unlock() + if v.expires > time.Now().UnixMilli() { + return v.value, v.err + } + v.value, v.err = fetch() + if v.err == nil { + v.expires = time.Now().Add(x.successTTL).UnixMilli() + x.target.IncrGetSuccess() + } else { + v.expires = time.Now().Add(x.failedTTL).UnixMilli() + x.target.IncrGetFailed() + } + return v.value, v.err +} + +func (x *LayLRU[K, V]) Del(key K) bool { + x.lock.Lock() + ok := x.core.Remove(key) + x.lock.Unlock() + if ok { + x.target.IncrDelHit() + } else { + x.target.IncrDelNotFound() + } + return ok +} + +func (x *LayLRU[K, V]) Stop() { + +} diff --git a/pkg/localcache/lru/lru_lazy_test.go b/pkg/localcache/lru/lru_lazy_test.go new file mode 100644 index 0000000000..09fd04cd3b --- /dev/null +++ b/pkg/localcache/lru/lru_lazy_test.go @@ -0,0 +1,104 @@ +package lru + +import ( + "fmt" + "hash/fnv" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +type cacheTarget struct { + getHit int64 + getSuccess int64 + getFailed int64 + delHit int64 + delNotFound int64 +} + +func (r *cacheTarget) IncrGetHit() { + atomic.AddInt64(&r.getHit, 1) +} + +func (r *cacheTarget) IncrGetSuccess() { + atomic.AddInt64(&r.getSuccess, 1) +} + +func (r *cacheTarget) IncrGetFailed() { + atomic.AddInt64(&r.getFailed, 1) +} + +func (r *cacheTarget) IncrDelHit() { + atomic.AddInt64(&r.delHit, 1) +} + +func (r *cacheTarget) IncrDelNotFound() { + atomic.AddInt64(&r.delNotFound, 1) +} + +func (r *cacheTarget) String() string { + return fmt.Sprintf("getHit: %d, getSuccess: %d, getFailed: %d, delHit: %d, delNotFound: %d", r.getHit, r.getSuccess, r.getFailed, r.delHit, r.delNotFound) +} + +func TestName(t *testing.T) { + target := &cacheTarget{} + l := NewSlotLRU[string, string](100, func(k string) uint64 { + h := fnv.New64a() + h.Write(*(*[]byte)(unsafe.Pointer(&k))) + return h.Sum64() + }, func() LRU[string, string] { + return NewExpirationLRU[string, string](100, time.Second*60, time.Second, target, nil) + }) + //l := NewInertiaLRU[string, string](1000, time.Second*20, time.Second*5, target) + + fn := func(key string, n int, fetch func() (string, error)) { + for i := 0; i < n; i++ { + //v, err := l.Get(key, fetch) + //if err == nil { + // t.Log("key", key, "value", v) + //} else { + // t.Error("key", key, err) + //} + v, err := l.Get(key, fetch) + //time.Sleep(time.Second / 100) + func(v ...any) {}(v, err) + } + } + + tmp := make(map[string]struct{}) + + var wg sync.WaitGroup + for i := 0; i < 10000; i++ { + wg.Add(1) + key := fmt.Sprintf("key_%d", i%200) + tmp[key] = struct{}{} + go func() { + defer wg.Done() + //t.Log(key) + fn(key, 10000, func() (string, error) { + //time.Sleep(time.Second * 3) + //t.Log(time.Now(), "key", key, "fetch") + //if rand.Uint32()%5 == 0 { + // return "value_" + key, nil + //} + //return "", errors.New("rand error") + return "value_" + key, nil + }) + }() + + //wg.Add(1) + //go func() { + // defer wg.Done() + // for i := 0; i < 10; i++ { + // l.Del(key) + // time.Sleep(time.Second / 3) + // } + //}() + } + wg.Wait() + t.Log(len(tmp)) + t.Log(target.String()) + +} diff --git a/pkg/localcache/lru/lru_slot.go b/pkg/localcache/lru/lru_slot.go new file mode 100644 index 0000000000..c1b8b94d07 --- /dev/null +++ b/pkg/localcache/lru/lru_slot.go @@ -0,0 +1,37 @@ +package lru + +func NewSlotLRU[K comparable, V any](slotNum int, hash func(K) uint64, create func() LRU[K, V]) LRU[K, V] { + x := &slotLRU[K, V]{ + n: uint64(slotNum), + slots: make([]LRU[K, V], slotNum), + hash: hash, + } + for i := 0; i < slotNum; i++ { + x.slots[i] = create() + } + return x +} + +type slotLRU[K comparable, V any] struct { + n uint64 + slots []LRU[K, V] + hash func(k K) uint64 +} + +func (x *slotLRU[K, V]) getIndex(k K) uint64 { + return x.hash(k) % x.n +} + +func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { + return x.slots[x.getIndex(key)].Get(key, fetch) +} + +func (x *slotLRU[K, V]) Del(key K) bool { + return x.slots[x.getIndex(key)].Del(key) +} + +func (x *slotLRU[K, V]) Stop() { + for _, slot := range x.slots { + slot.Stop() + } +} diff --git a/pkg/localcache/option.go b/pkg/localcache/option.go new file mode 100644 index 0000000000..ecb5da0e63 --- /dev/null +++ b/pkg/localcache/option.go @@ -0,0 +1,121 @@ +package localcache + +import ( + "context" + "github.com/openimsdk/localcache/lru" + "time" +) + +func defaultOption() *option { + return &option{ + localSlotNum: 500, + localSlotSize: 20000, + linkSlotNum: 500, + expirationEvict: false, + localSuccessTTL: time.Minute, + localFailedTTL: time.Second * 5, + delFn: make([]func(ctx context.Context, key ...string), 0, 2), + target: emptyTarget{}, + } +} + +type option struct { + localSlotNum int + localSlotSize int + linkSlotNum int + // expirationEvict: true means that the cache will be actively cleared when the timer expires, + // false means that the cache will be lazily deleted. + expirationEvict bool + localSuccessTTL time.Duration + localFailedTTL time.Duration + delFn []func(ctx context.Context, key ...string) + target lru.Target +} + +type Option func(o *option) + +func WithExpirationEvict() Option { + return func(o *option) { + o.expirationEvict = true + } +} + +func WithLazy() Option { + return func(o *option) { + o.expirationEvict = false + } +} + +func WithLocalDisable() Option { + return WithLinkSlotNum(0) +} + +func WithLinkDisable() Option { + return WithLinkSlotNum(0) +} + +func WithLinkSlotNum(linkSlotNum int) Option { + return func(o *option) { + o.linkSlotNum = linkSlotNum + } +} + +func WithLocalSlotNum(localSlotNum int) Option { + return func(o *option) { + o.localSlotNum = localSlotNum + } +} + +func WithLocalSlotSize(localSlotSize int) Option { + return func(o *option) { + o.localSlotSize = localSlotSize + } +} + +func WithLocalSuccessTTL(localSuccessTTL time.Duration) Option { + if localSuccessTTL < 0 { + panic("localSuccessTTL should be greater than 0") + } + return func(o *option) { + o.localSuccessTTL = localSuccessTTL + } +} + +func WithLocalFailedTTL(localFailedTTL time.Duration) Option { + if localFailedTTL < 0 { + panic("localFailedTTL should be greater than 0") + } + return func(o *option) { + o.localFailedTTL = localFailedTTL + } +} + +func WithTarget(target lru.Target) Option { + if target == nil { + panic("target should not be nil") + } + return func(o *option) { + o.target = target + } +} + +func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option { + if fn == nil { + panic("fn should not be nil") + } + return func(o *option) { + o.delFn = append(o.delFn, fn) + } +} + +type emptyTarget struct{} + +func (e emptyTarget) IncrGetHit() {} + +func (e emptyTarget) IncrGetSuccess() {} + +func (e emptyTarget) IncrGetFailed() {} + +func (e emptyTarget) IncrDelHit() {} + +func (e emptyTarget) IncrDelNotFound() {} diff --git a/pkg/localcache/tool.go b/pkg/localcache/tool.go new file mode 100644 index 0000000000..ea35908233 --- /dev/null +++ b/pkg/localcache/tool.go @@ -0,0 +1,9 @@ +package localcache + +func AnyValue[V any](v any, err error) (V, error) { + if err != nil { + var zero V + return zero, err + } + return v.(V), nil +} diff --git a/pkg/rpccache/common.go b/pkg/rpccache/common.go new file mode 100644 index 0000000000..6dc826e305 --- /dev/null +++ b/pkg/rpccache/common.go @@ -0,0 +1,20 @@ +package rpccache + +func newListMap[V comparable](values []V, err error) (*listMap[V], error) { + if err != nil { + return nil, err + } + lm := &listMap[V]{ + List: values, + Map: make(map[V]struct{}, len(values)), + } + for _, value := range values { + lm.Map[value] = struct{}{} + } + return lm, nil +} + +type listMap[V comparable] struct { + List []V + Map map[V]struct{} +} diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go new file mode 100644 index 0000000000..8eadad9d46 --- /dev/null +++ b/pkg/rpccache/conversation.go @@ -0,0 +1,90 @@ +package rpccache + +import ( + "context" + pbconversation "github.com/OpenIMSDK/protocol/conversation" + "github.com/OpenIMSDK/tools/errs" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/redis/go-redis/v9" +) + +func NewConversationLocalCache(client rpcclient.ConversationRpcClient, cli redis.UniversalClient) *ConversationLocalCache { + lc := config.Config.LocalCache.Conversation + log.ZDebug(context.Background(), "ConversationLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) + x := &ConversationLocalCache{ + client: client, + local: localcache.New[any]( + localcache.WithLocalSlotNum(lc.SlotNum), + localcache.WithLocalSlotSize(lc.SlotSize), + localcache.WithLinkSlotNum(lc.SlotNum), + localcache.WithLocalSuccessTTL(lc.Success()), + localcache.WithLocalFailedTTL(lc.Failed()), + ), + } + if lc.Enable() { + go subscriberRedisDeleteCache(context.Background(), cli, lc.Topic, x.local.DelLocal) + } + return x +} + +type ConversationLocalCache struct { + client rpcclient.ConversationRpcClient + local localcache.Cache[any] +} + +func (c *ConversationLocalCache) GetConversationIDs(ctx context.Context, ownerUserID string) (val []string, err error) { + log.ZDebug(ctx, "ConversationLocalCache GetConversationIDs req", "ownerUserID", ownerUserID) + defer func() { + if err == nil { + log.ZDebug(ctx, "ConversationLocalCache GetConversationIDs return", "value", val) + } else { + log.ZError(ctx, "ConversationLocalCache GetConversationIDs return", err) + } + }() + return localcache.AnyValue[[]string](c.local.Get(ctx, cachekey.GetConversationIDsKey(ownerUserID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "ConversationLocalCache GetConversationIDs rpc", "ownerUserID", ownerUserID) + return c.client.GetConversationIDs(ctx, ownerUserID) + })) +} + +func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, conversationID string) (val *pbconversation.Conversation, err error) { + log.ZDebug(ctx, "ConversationLocalCache GetConversation req", "userID", userID, "conversationID", conversationID) + defer func() { + if err == nil { + log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "value", val) + } else { + log.ZError(ctx, "ConversationLocalCache GetConversation return", err) + } + }() + return localcache.AnyValue[*pbconversation.Conversation](c.local.Get(ctx, cachekey.GetConversationKey(userID, conversationID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "ConversationLocalCache GetConversation rpc", "userID", userID, "conversationID", conversationID) + return c.client.GetConversation(ctx, userID, conversationID) + })) +} + +func (c *ConversationLocalCache) GetSingleConversationRecvMsgOpt(ctx context.Context, userID, conversationID string) (int32, error) { + conv, err := c.GetConversation(ctx, userID, conversationID) + if err != nil { + return 0, err + } + return conv.RecvMsgOpt, nil +} + +func (c *ConversationLocalCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*pbconversation.Conversation, error) { + conversations := make([]*pbconversation.Conversation, 0, len(conversationIDs)) + for _, conversationID := range conversationIDs { + conversation, err := c.GetConversation(ctx, ownerUserID, conversationID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + conversations = append(conversations, conversation) + } + return conversations, nil +} diff --git a/pkg/rpccache/friend.go b/pkg/rpccache/friend.go new file mode 100644 index 0000000000..66694f6181 --- /dev/null +++ b/pkg/rpccache/friend.go @@ -0,0 +1,66 @@ +package rpccache + +import ( + "context" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/redis/go-redis/v9" +) + +func NewFriendLocalCache(client rpcclient.FriendRpcClient, cli redis.UniversalClient) *FriendLocalCache { + lc := config.Config.LocalCache.Friend + log.ZDebug(context.Background(), "FriendLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) + x := &FriendLocalCache{ + client: client, + local: localcache.New[any]( + localcache.WithLocalSlotNum(lc.SlotNum), + localcache.WithLocalSlotSize(lc.SlotSize), + localcache.WithLinkSlotNum(lc.SlotNum), + localcache.WithLocalSuccessTTL(lc.Success()), + localcache.WithLocalFailedTTL(lc.Failed()), + ), + } + if lc.Enable() { + go subscriberRedisDeleteCache(context.Background(), cli, lc.Topic, x.local.DelLocal) + } + return x +} + +type FriendLocalCache struct { + client rpcclient.FriendRpcClient + local localcache.Cache[any] +} + +func (f *FriendLocalCache) IsFriend(ctx context.Context, possibleFriendUserID, userID string) (val bool, err error) { + log.ZDebug(ctx, "FriendLocalCache IsFriend req", "possibleFriendUserID", possibleFriendUserID, "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "FriendLocalCache IsFriend return", "value", val) + } else { + log.ZError(ctx, "FriendLocalCache IsFriend return", err) + } + }() + return localcache.AnyValue[bool](f.local.GetLink(ctx, cachekey.GetIsFriendKey(possibleFriendUserID, userID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "FriendLocalCache IsFriend rpc", "possibleFriendUserID", possibleFriendUserID, "userID", userID) + return f.client.IsFriend(ctx, possibleFriendUserID, userID) + }, cachekey.GetFriendIDsKey(possibleFriendUserID))) +} + +// IsBlack possibleBlackUserID selfUserID +func (f *FriendLocalCache) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (val bool, err error) { + log.ZDebug(ctx, "FriendLocalCache IsBlack req", "possibleBlackUserID", possibleBlackUserID, "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "FriendLocalCache IsBlack return", "value", val) + } else { + log.ZError(ctx, "FriendLocalCache IsBlack return", err) + } + }() + return localcache.AnyValue[bool](f.local.GetLink(ctx, cachekey.GetIsBlackIDsKey(possibleBlackUserID, userID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "FriendLocalCache IsBlack rpc", "possibleBlackUserID", possibleBlackUserID, "userID", userID) + return f.client.IsBlack(ctx, possibleBlackUserID, userID) + }, cachekey.GetBlackIDsKey(userID))) +} diff --git a/pkg/rpccache/group.go b/pkg/rpccache/group.go new file mode 100644 index 0000000000..b3c666da4a --- /dev/null +++ b/pkg/rpccache/group.go @@ -0,0 +1,143 @@ +package rpccache + +import ( + "context" + "github.com/OpenIMSDK/protocol/sdkws" + "github.com/OpenIMSDK/tools/errs" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/redis/go-redis/v9" +) + +func NewGroupLocalCache(client rpcclient.GroupRpcClient, cli redis.UniversalClient) *GroupLocalCache { + lc := config.Config.LocalCache.Group + log.ZDebug(context.Background(), "GroupLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) + x := &GroupLocalCache{ + client: client, + local: localcache.New[any]( + localcache.WithLocalSlotNum(lc.SlotNum), + localcache.WithLocalSlotSize(lc.SlotSize), + localcache.WithLinkSlotNum(lc.SlotNum), + localcache.WithLocalSuccessTTL(lc.Success()), + localcache.WithLocalFailedTTL(lc.Failed()), + ), + } + if lc.Enable() { + go subscriberRedisDeleteCache(context.Background(), cli, lc.Topic, x.local.DelLocal) + } + return x +} + +type GroupLocalCache struct { + client rpcclient.GroupRpcClient + local localcache.Cache[any] +} + +func (g *GroupLocalCache) getGroupMemberIDs(ctx context.Context, groupID string) (val *listMap[string], err error) { + log.ZDebug(ctx, "GroupLocalCache getGroupMemberIDs req", "groupID", groupID) + defer func() { + if err == nil { + log.ZDebug(ctx, "GroupLocalCache getGroupMemberIDs return", "value", val) + } else { + log.ZError(ctx, "GroupLocalCache getGroupMemberIDs return", err) + } + }() + return localcache.AnyValue[*listMap[string]](g.local.Get(ctx, cachekey.GetGroupMemberIDsKey(groupID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "GroupLocalCache getGroupMemberIDs rpc", "groupID", groupID) + return newListMap(g.client.GetGroupMemberIDs(ctx, groupID)) + })) +} + +func (g *GroupLocalCache) GetGroupMember(ctx context.Context, groupID, userID string) (val *sdkws.GroupMemberFullInfo, err error) { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo req", "groupID", groupID, "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo return", "value", val) + } else { + log.ZError(ctx, "GroupLocalCache GetGroupInfo return", err) + } + }() + return localcache.AnyValue[*sdkws.GroupMemberFullInfo](g.local.Get(ctx, cachekey.GetGroupMemberInfoKey(groupID, userID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo rpc", "groupID", groupID, "userID", userID) + return g.client.GetGroupMemberCache(ctx, groupID, userID) + })) +} + +func (g *GroupLocalCache) GetGroupInfo(ctx context.Context, groupID string) (val *sdkws.GroupInfo, err error) { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo req", "groupID", groupID) + defer func() { + if err == nil { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo return", "value", val) + } else { + log.ZError(ctx, "GroupLocalCache GetGroupInfo return", err) + } + }() + return localcache.AnyValue[*sdkws.GroupInfo](g.local.Get(ctx, cachekey.GetGroupInfoKey(groupID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "GroupLocalCache GetGroupInfo rpc", "groupID", groupID) + return g.client.GetGroupInfoCache(ctx, groupID) + })) +} + +func (g *GroupLocalCache) GetGroupMemberIDs(ctx context.Context, groupID string) ([]string, error) { + res, err := g.getGroupMemberIDs(ctx, groupID) + if err != nil { + return nil, err + } + return res.List, nil +} + +func (g *GroupLocalCache) GetGroupMemberIDMap(ctx context.Context, groupID string) (map[string]struct{}, error) { + res, err := g.getGroupMemberIDs(ctx, groupID) + if err != nil { + return nil, err + } + return res.Map, nil +} + +func (g *GroupLocalCache) GetGroupInfos(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { + groupInfos := make([]*sdkws.GroupInfo, 0, len(groupIDs)) + for _, groupID := range groupIDs { + groupInfo, err := g.GetGroupInfo(ctx, groupID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + groupInfos = append(groupInfos, groupInfo) + } + return groupInfos, nil +} + +func (g *GroupLocalCache) GetGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { + members := make([]*sdkws.GroupMemberFullInfo, 0, len(userIDs)) + for _, userID := range userIDs { + member, err := g.GetGroupMember(ctx, groupID, userID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + members = append(members, member) + } + return members, nil +} + +func (g *GroupLocalCache) GetGroupMemberInfoMap(ctx context.Context, groupID string, userIDs []string) (map[string]*sdkws.GroupMemberFullInfo, error) { + members := make(map[string]*sdkws.GroupMemberFullInfo) + for _, userID := range userIDs { + member, err := g.GetGroupMember(ctx, groupID, userID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + members[userID] = member + } + return members, nil +} diff --git a/pkg/rpccache/subscriber.go b/pkg/rpccache/subscriber.go new file mode 100644 index 0000000000..571ff6d2db --- /dev/null +++ b/pkg/rpccache/subscriber.go @@ -0,0 +1,23 @@ +package rpccache + +import ( + "context" + "encoding/json" + "github.com/OpenIMSDK/tools/log" + "github.com/redis/go-redis/v9" +) + +func subscriberRedisDeleteCache(ctx context.Context, client redis.UniversalClient, channel string, del func(ctx context.Context, key ...string)) { + for message := range client.Subscribe(ctx, channel).Channel() { + log.ZDebug(ctx, "subscriberRedisDeleteCache", "channel", channel, "payload", message.Payload) + var keys []string + if err := json.Unmarshal([]byte(message.Payload), &keys); err != nil { + log.ZError(ctx, "subscriberRedisDeleteCache json.Unmarshal error", err) + continue + } + if len(keys) == 0 { + continue + } + del(ctx, keys...) + } +} diff --git a/pkg/rpccache/user.go b/pkg/rpccache/user.go new file mode 100644 index 0000000000..7d6cd5c7e3 --- /dev/null +++ b/pkg/rpccache/user.go @@ -0,0 +1,97 @@ +package rpccache + +import ( + "context" + "github.com/OpenIMSDK/protocol/sdkws" + "github.com/OpenIMSDK/tools/errs" + "github.com/OpenIMSDK/tools/log" + "github.com/openimsdk/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/common/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/redis/go-redis/v9" +) + +func NewUserLocalCache(client rpcclient.UserRpcClient, cli redis.UniversalClient) *UserLocalCache { + lc := config.Config.LocalCache.User + log.ZDebug(context.Background(), "UserLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) + x := &UserLocalCache{ + client: client, + local: localcache.New[any]( + localcache.WithLocalSlotNum(lc.SlotNum), + localcache.WithLocalSlotSize(lc.SlotSize), + localcache.WithLinkSlotNum(lc.SlotNum), + localcache.WithLocalSuccessTTL(lc.Success()), + localcache.WithLocalFailedTTL(lc.Failed()), + ), + } + if lc.Enable() { + go subscriberRedisDeleteCache(context.Background(), cli, lc.Topic, x.local.DelLocal) + } + return x +} + +type UserLocalCache struct { + client rpcclient.UserRpcClient + local localcache.Cache[any] +} + +func (u *UserLocalCache) GetUserInfo(ctx context.Context, userID string) (val *sdkws.UserInfo, err error) { + log.ZDebug(ctx, "UserLocalCache GetUserInfo req", "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "UserLocalCache GetUserInfo return", "value", val) + } else { + log.ZError(ctx, "UserLocalCache GetUserInfo return", err) + } + }() + return localcache.AnyValue[*sdkws.UserInfo](u.local.Get(ctx, cachekey.GetUserInfoKey(userID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "UserLocalCache GetUserInfo rpc", "userID", userID) + return u.client.GetUserInfo(ctx, userID) + })) +} + +func (u *UserLocalCache) GetUserGlobalMsgRecvOpt(ctx context.Context, userID string) (val int32, err error) { + log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt req", "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt return", "value", val) + } else { + log.ZError(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt return", err) + } + }() + return localcache.AnyValue[int32](u.local.Get(ctx, cachekey.GetUserGlobalRecvMsgOptKey(userID), func(ctx context.Context) (any, error) { + log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID) + return u.client.GetUserGlobalMsgRecvOpt(ctx, userID) + })) +} + +func (u *UserLocalCache) GetUsersInfo(ctx context.Context, userIDs []string) ([]*sdkws.UserInfo, error) { + users := make([]*sdkws.UserInfo, 0, len(userIDs)) + for _, userID := range userIDs { + user, err := u.GetUserInfo(ctx, userID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + users = append(users, user) + } + return users, nil +} + +func (u *UserLocalCache) GetUsersInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error) { + users := make(map[string]*sdkws.UserInfo, len(userIDs)) + for _, userID := range userIDs { + user, err := u.GetUserInfo(ctx, userID) + if err != nil { + if errs.ErrRecordNotFound.Is(err) { + continue + } + return nil, err + } + users[userID] = user + } + return users, nil +} diff --git a/pkg/rpcclient/conversation.go b/pkg/rpcclient/conversation.go index 3ba8dd8c0f..53332beac6 100644 --- a/pkg/rpcclient/conversation.go +++ b/pkg/rpcclient/conversation.go @@ -114,6 +114,14 @@ func (c *ConversationRpcClient) GetConversationsByConversationID(ctx context.Con return resp.Conversations, nil } +func (c *ConversationRpcClient) GetConversationOfflinePushUserIDs(ctx context.Context, conversationID string, userIDs []string) ([]string, error) { + resp, err := c.Client.GetConversationOfflinePushUserIDs(ctx, &pbconversation.GetConversationOfflinePushUserIDsReq{ConversationID: conversationID, UserIDs: userIDs}) + if err != nil { + return nil, err + } + return resp.UserIDs, nil +} + func (c *ConversationRpcClient) GetConversations( ctx context.Context, ownerUserID string, diff --git a/pkg/rpcclient/friend.go b/pkg/rpcclient/friend.go index b84db40d4d..7158ed5692 100644 --- a/pkg/rpcclient/friend.go +++ b/pkg/rpcclient/friend.go @@ -80,7 +80,7 @@ func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string) return resp.FriendIDs, nil } -func (b *FriendRpcClient) IsBlocked(ctx context.Context, possibleBlackUserID, userID string) (bool, error) { +func (b *FriendRpcClient) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (bool, error) { r, err := b.Client.IsBlack(ctx, &friend.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID}) if err != nil { return false, err diff --git a/pkg/rpcclient/group.go b/pkg/rpcclient/group.go index bf0efe60ca..98c8387e5c 100644 --- a/pkg/rpcclient/group.go +++ b/pkg/rpcclient/group.go @@ -18,8 +18,6 @@ import ( "context" "strings" - "google.golang.org/grpc" - "github.com/OpenIMSDK/protocol/constant" "github.com/OpenIMSDK/protocol/group" "github.com/OpenIMSDK/protocol/sdkws" @@ -31,9 +29,7 @@ import ( ) type Group struct { - conn grpc.ClientConnInterface Client group.GroupClient - discov discoveryregistry.SvcDiscoveryRegistry } func NewGroup(discov discoveryregistry.SvcDiscoveryRegistry) *Group { @@ -42,7 +38,7 @@ func NewGroup(discov discoveryregistry.SvcDiscoveryRegistry) *Group { panic(err) } client := group.NewGroupClient(conn) - return &Group{discov: discov, conn: conn, Client: client} + return &Group{Client: client} } type GroupRpcClient Group diff --git a/pkg/util/flag/flag.go b/pkg/util/flag/flag.go index 7bbacf4444..0a8e527ab7 100644 --- a/pkg/util/flag/flag.go +++ b/pkg/util/flag/flag.go @@ -1,7 +1,21 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package flag import ( - goFlag "flag" + "flag" "log" "strings" @@ -29,7 +43,7 @@ func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedNam // InitFlags normalizes, parses, then logs the command line flags. func InitFlags() { pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goFlag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) } // PrintFlags logs the flags in the flagset. diff --git a/pkg/util/genutil/genutil.go b/pkg/util/genutil/genutil.go index 36da89bbbe..0948a7c49b 100644 --- a/pkg/util/genutil/genutil.go +++ b/pkg/util/genutil/genutil.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package genutil import ( diff --git a/pkg/util/genutil/genutil_test.go b/pkg/util/genutil/genutil_test.go index 0ee85df276..050d140407 100644 --- a/pkg/util/genutil/genutil_test.go +++ b/pkg/util/genutil/genutil_test.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package genutil import ( diff --git a/scripts/advertise.sh b/scripts/advertise.sh index 9c8c284ad6..3effc4f2bd 100755 --- a/scripts/advertise.sh +++ b/scripts/advertise.sh @@ -23,7 +23,7 @@ trap 'openim::util::onCtrlC' INT print_with_delay() { text="$1" delay="$2" - + for i in $(seq 0 $((${#text}-1))); do printf "${text:$i:1}" sleep $delay @@ -34,7 +34,7 @@ print_with_delay() { print_progress() { total="$1" delay="$2" - + printf "[" for i in $(seq 1 $total); do printf "#" @@ -44,14 +44,14 @@ print_progress() { } function openim_logo() { - # Set text color to cyan for header and URL - echo -e "\033[0;36m" + # Set text color to cyan for header and URL + echo -e "\033[0;36m" + + # Display fancy ASCII Art logo + # look http://patorjk.com/software/taag/#p=display&h=1&v=1&f=Doh&t=OpenIM + print_with_delay ' + - # Display fancy ASCII Art logo - # look http://patorjk.com/software/taag/#p=display&h=1&v=1&f=Doh&t=OpenIM - print_with_delay ' - - OOOOOOOOO IIIIIIIIIIMMMMMMMM MMMMMMMM OO:::::::::OO I::::::::IM:::::::M M:::::::M OO:::::::::::::OO I::::::::IM::::::::M M::::::::M @@ -68,45 +68,45 @@ O:::::::OOO:::::::O p:::::ppppp:::::::pe::::::::e n::::n n::::nII: OO:::::::::::::OO p::::::::::::::::p e::::::::eeeeeeee n::::n n::::nI::::::::IM::::::M M::::::M OO:::::::::OO p::::::::::::::pp ee:::::::::::::e n::::n n::::nI::::::::IM::::::M M::::::M OOOOOOOOO p::::::pppppppp eeeeeeeeeeeeee nnnnnn nnnnnnIIIIIIIIIIMMMMMMMM MMMMMMMM - p:::::p - p:::::p - p:::::::p - p:::::::p - p:::::::p - ppppppppp - - ' 0.0001 - - # Display product URL - print_with_delay "Discover more and contribute at: https://github.com/openimsdk/open-im-server" 0.01 - - # Reset text color back to normal - echo -e "\033[0m" - - # Set text color to green for product description - echo -e "\033[1;32m" - - print_with_delay "Open-IM-Server: Reinventing Instant Messaging" 0.01 - print_progress 50 0.02 - - print_with_delay "Open-IM-Server is not just a product; it's a revolution. It's about bringing the power of seamless," 0.01 - print_with_delay "real-time messaging to your fingertips. And it's about joining a global community of developers, dedicated to pushing the boundaries of what's possible." 0.01 - - print_progress 50 0.02 - - # Reset text color back to normal - echo -e "\033[0m" - - # Set text color to yellow for the Slack link - echo -e "\033[1;33m" - - print_with_delay "Join our developer community on Slack: https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" 0.01 - - # Reset text color back to normal - echo -e "\033[0m" + p:::::p + p:::::p + p:::::::p + p:::::::p + p:::::::p + ppppppppp + + ' 0.0001 + + # Display product URL + print_with_delay "Discover more and contribute at: https://github.com/openimsdk/open-im-server" 0.01 + + # Reset text color back to normal + echo -e "\033[0m" + + # Set text color to green for product description + echo -e "\033[1;32m" + + print_with_delay "Open-IM-Server: Reinventing Instant Messaging" 0.01 + print_progress 50 0.02 + + print_with_delay "Open-IM-Server is not just a product; it's a revolution. It's about bringing the power of seamless," 0.01 + print_with_delay "real-time messaging to your fingertips. And it's about joining a global community of developers, dedicated to pushing the boundaries of what's possible." 0.01 + + print_progress 50 0.02 + + # Reset text color back to normal + echo -e "\033[0m" + + # Set text color to yellow for the Slack link + echo -e "\033[1;33m" + + print_with_delay "Join our developer community on Slack: https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" 0.01 + + # Reset text color back to normal + echo -e "\033[0m" } function main() { - openim_logo + openim_logo } main "$@" diff --git a/scripts/bash_beautify.py b/scripts/bash_beautify.py new file mode 100755 index 0000000000..54c6fa0add --- /dev/null +++ b/scripts/bash_beautify.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +#************************************************************************** +# Copyright (C) 2011, Paul Lutus * +# * +# This program is free software; you can redistribute it and/or modify * +# it under the terms of the GNU General Public License as published by * +# the Free Software Foundation; either version 2 of the License, or * +# (at your option) any later version. * +# * +# This program is distributed in the hope that it will be useful, * +# but WITHOUT ANY WARRANTY; without even the implied warranty of * +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +# GNU General Public License for more details. * +# * +# You should have received a copy of the GNU General Public License * +# along with this program; if not, write to the * +# Free Software Foundation, Inc., * +# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * +#************************************************************************** + +import re +import sys + +PVERSION = '1.0' + + +class BeautifyBash: + + def __init__(self): + self.tab_str = ' ' + self.tab_size = 2 + + def read_file(self, fp): + with open(fp) as f: + return f.read() + + def write_file(self, fp, data): + with open(fp, 'w') as f: + f.write(data) + + def beautify_string(self, data, path=''): + tab = 0 + case_stack = [] + in_here_doc = False + defer_ext_quote = False + in_ext_quote = False + ext_quote_string = '' + here_string = '' + output = [] + line = 1 + for record in re.split('\n', data): + record = record.rstrip() + stripped_record = record.strip() + + # collapse multiple quotes between ' ... ' + test_record = re.sub(r'\'.*?\'', '', stripped_record) + # collapse multiple quotes between " ... " + test_record = re.sub(r'".*?"', '', test_record) + # collapse multiple quotes between ` ... ` + test_record = re.sub(r'`.*?`', '', test_record) + # collapse multiple quotes between \` ... ' (weird case) + test_record = re.sub(r'\\`.*?\'', '', test_record) + # strip out any escaped single characters + test_record = re.sub(r'\\.', '', test_record) + # remove '#' comments + test_record = re.sub(r'(\A|\s)(#.*)', '', test_record, 1) + if(not in_here_doc): + if(re.search('<<-?', test_record)): + here_string = re.sub( + '.*<<-?\s*[\'|"]?([_|\w]+)[\'|"]?.*', '\\1', stripped_record, 1) + in_here_doc = (len(here_string) > 0) + if(in_here_doc): # pass on with no changes + output.append(record) + # now test for here-doc termination string + if(re.search(here_string, test_record) and not re.search('<<', test_record)): + in_here_doc = False + else: # not in here doc + if(in_ext_quote): + if(re.search(ext_quote_string, test_record)): + # provide line after quotes + test_record = re.sub( + '.*%s(.*)' % ext_quote_string, '\\1', test_record, 1) + in_ext_quote = False + else: # not in ext quote + if(re.search(r'(\A|\s)(\'|")', test_record)): + # apply only after this line has been processed + defer_ext_quote = True + ext_quote_string = re.sub( + '.*([\'"]).*', '\\1', test_record, 1) + # provide line before quote + test_record = re.sub( + '(.*)%s.*' % ext_quote_string, '\\1', test_record, 1) + if(in_ext_quote): + # pass on unchanged + output.append(record) + else: # not in ext quote + inc = len(re.findall( + '(\s|\A|;)(case|then|do)(;|\Z|\s)', test_record)) + inc += len(re.findall('(\{|\(|\[)', test_record)) + outc = len(re.findall( + '(\s|\A|;)(esac|fi|done|elif)(;|\)|\||\Z|\s)', test_record)) + outc += len(re.findall('(\}|\)|\])', test_record)) + if(re.search(r'\besac\b', test_record)): + if(len(case_stack) == 0): + sys.stderr.write( + 'File %s: error: "esac" before "case" in line %d.\n' % ( + path, line) + ) + else: + outc += case_stack.pop() + # sepcial handling for bad syntax within case ... esac + if(len(case_stack) > 0): + if(re.search('\A[^(]*\)', test_record)): + # avoid overcount + outc -= 2 + case_stack[-1] += 1 + if(re.search(';;', test_record)): + outc += 1 + case_stack[-1] -= 1 + # an ad-hoc solution for the "else" keyword + else_case = ( + 0, -1)[re.search('^(else)', test_record) != None] + net = inc - outc + tab += min(net, 0) + extab = tab + else_case + extab = max(0, extab) + output.append( + (self.tab_str * self.tab_size * extab) + stripped_record) + tab += max(net, 0) + if(defer_ext_quote): + in_ext_quote = True + defer_ext_quote = False + if(re.search(r'\bcase\b', test_record)): + case_stack.append(0) + line += 1 + error = (tab != 0) + if(error): + sys.stderr.write( + 'File %s: error: indent/outdent mismatch: %d.\n' % (path, tab)) + return '\n'.join(output), error + + def beautify_file(self, path): + error = False + if(path == '-'): + data = sys.stdin.read() + result, error = self.beautify_string(data, '(stdin)') + sys.stdout.write(result) + else: # named file + data = self.read_file(path) + result, error = self.beautify_string(data, path) + if(data != result): + # make a backup copy + self.write_file(path + '~', data) + self.write_file(path, result) + return error + + def main(self): + error = False + sys.argv.pop(0) + if(len(sys.argv) < 1): + sys.stderr.write( + 'usage: shell script filenames or \"-\" for stdin.\n') + else: + for path in sys.argv: + error |= self.beautify_file(path) + sys.exit((0, 1)[error]) + +# if not called as a module +if(__name__ == '__main__'): + BeautifyBash().main() + diff --git a/scripts/build-all-service.sh b/scripts/build-all-service.sh index 4b5c0613ad..b5578fca63 100755 --- a/scripts/build-all-service.sh +++ b/scripts/build-all-service.sh @@ -31,7 +31,7 @@ source "${OPENIM_ROOT}/scripts/lib/init.sh" # CPU core number pushd "${OPENIM_ROOT}/tools/ncpu" >/dev/null - cpu_count=$(go run .) +cpu_count=$(go run .) popd >/dev/null openim::color::echo ${GREEN_PREFIX} "======> cpu_count=$cpu_count" @@ -42,7 +42,7 @@ compile_count=$((cpu_count / 2)) # For help output ARGHELP="" if [[ "$#" -gt 0 ]]; then - ARGHELP="'$*'" + ARGHELP="'$*'" fi openim::color::echo $COLOR_CYAN "NOTE: $0 has been replaced by 'make multiarch' or 'make build'" @@ -61,15 +61,15 @@ echo " ./scripts/build-all-service.sh BINS=openim-api V=1 DEBUG=1" echo if [ -z "$*" ]; then - openim::log::info "no args, build all service" - make --no-print-directory -C "${OPENIM_ROOT}" -j$compile_count build + openim::log::info "no args, build all service" + make --no-print-directory -C "${OPENIM_ROOT}" -j$compile_count build else - openim::log::info "build service: $*" - make --no-print-directory -C "${OPENIM_ROOT}" -j$compile_count build "$*" + openim::log::info "build service: $*" + make --no-print-directory -C "${OPENIM_ROOT}" -j$compile_count build "$*" fi if [ $? -eq 0 ]; then - openim::log::success "all service build success, run 'make start' or './scripts/start-all.sh'" + openim::log::success "all service build success, run 'make start' or './scripts/start-all.sh'" else - openim::log::error "make build Error, script exits" + openim::log::error "make build Error, script exits" fi diff --git a/scripts/check-all.sh b/scripts/check-all.sh index 72a34a5e5a..1f6b740e61 100755 --- a/scripts/check-all.sh +++ b/scripts/check-all.sh @@ -14,10 +14,10 @@ # limitations under the License. # This script is check openim service is running normally -# +# # Usage: `scripts/check-all.sh`. # Encapsulated as: `make check`. -# READ: https://github.com/openimsdk/open-im-server/tree/main/scripts/install/environment.sh +# READ: https://github.com/openimsdk/open-im-server/tree/main/scripts/install/environment.sh set -o errexit set -o nounset @@ -34,19 +34,19 @@ openim::log::status "Check all dependent service ports" # Elegant printing function # Elegant printing function print_services_and_ports() { - local service_names=("$@") - local half_length=$((${#service_names[@]} / 2)) - local service_ports=("${service_names[@]:half_length}") - - echo "+-------------------------+----------+" - echo "| Service Name | Port |" - echo "+-------------------------+----------+" - - for ((index=0; index < half_length; index++)); do - printf "| %-23s | %-8s |\n" "${service_names[$index]}" "${service_ports[$index]}" - done - - echo "+-------------------------+----------+" + local service_names=("$@") + local half_length=$((${#service_names[@]} / 2)) + local service_ports=("${service_names[@]:half_length}") + + echo "+-------------------------+----------+" + echo "| Service Name | Port |" + echo "+-------------------------+----------+" + + for ((index=0; index < half_length; index++)); do + printf "| %-23s | %-8s |\n" "${service_names[$index]}" "${service_ports[$index]}" + done + + echo "+-------------------------+----------+" } # Assuming OPENIM_SERVER_NAME_TARGETS and OPENIM_SERVER_PORT_TARGETS are defined @@ -67,10 +67,10 @@ set +e # Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used if grep -qE 'docker|kubepods' /proc/1/cgroup || [ -f /.dockerenv ]; then - openim::color::echo ${COLOR_CYAN} "Environment in the interior of the container" + openim::color::echo ${COLOR_CYAN} "Environment in the interior of the container" else - openim::color::echo ${COLOR_CYAN} "The environment is outside the container" - openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} || return 0 + openim::color::echo ${COLOR_CYAN} "The environment is outside the container" + openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} || return 0 fi if [[ $? -ne 0 ]]; then diff --git a/scripts/cherry-pick.sh b/scripts/cherry-pick.sh index 5f13ef0e4c..8a1f8dd794 100755 --- a/scripts/cherry-pick.sh +++ b/scripts/cherry-pick.sh @@ -118,7 +118,7 @@ function return_to_kansas { openim::log::status "Aborting in-progress git am." git am --abort >/dev/null 2>&1 || true fi - + # return to the starting branch and delete the PR text file if [[ -z "${DRY_RUN}" ]]; then echo @@ -137,7 +137,7 @@ function make-a-pr() { rel="$(basename "${BRANCH}")" echo openim::log::status "Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}" - + local numandtitle numandtitle=$(printf '%s\n' "${SUBJECTS[@]}") prtext=$(cat <&2 - exit 1 - fi - done - - if [[ "${conflicts}" != "true" ]]; then - echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'" +curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch" +echo +openim::log::status "About to attempt cherry pick of PR. To reattempt:" +echo " $ git am -3 /tmp/${pull}.patch" +echo +git am -3 "/tmp/${pull}.patch" || { + conflicts=false + while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \ + || [[ -e "${REBASEMAGIC}" ]]; do + conflicts=true # <-- We should have detected conflicts once + echo + openim::log::status "Conflicts detected:" + echo + (git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?" + echo + openim::log::status "Please resolve the conflicts in another window (and remember to 'git add / git am --continue')" + read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r + echo + if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then + echo "Aborting." >&2 exit 1 fi - } + done + + if [[ "${conflicts}" != "true" ]]; then + echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'" + exit 1 + fi +} - # set the subject - subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //') - SUBJECTS+=("#${pull}: ${subject}") +# set the subject +subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //') +SUBJECTS+=("#${pull}: ${subject}") - # remove the patch file from /tmp - rm -f "/tmp/${pull}.patch" +# remove the patch file from /tmp +rm -f "/tmp/${pull}.patch" done gitamcleanup=false # Re-generate docs (if needed) if [[ -n "${REGENERATE_DOCS}" ]]; then +echo +echo "Regenerating docs..." +if ! scripts/generate-docs.sh; then echo - echo "Regenerating docs..." - if ! scripts/generate-docs.sh; then - echo - echo "scripts/gendoc.sh FAILED to complete." - exit 1 - fi + echo "scripts/gendoc.sh FAILED to complete." + exit 1 +fi fi if [[ -n "${DRY_RUN}" ]]; then - openim::log::error "!!! Skipping git push and PR creation because you set DRY_RUN." - echo "To return to the branch you were in when you invoked this script:" - echo - echo " git checkout ${STARTINGBRANCH}" - echo - echo "To delete this branch:" - echo - echo " git branch -D ${NEWBRANCHUNIQ}" - exit 0 +openim::log::error "!!! Skipping git push and PR creation because you set DRY_RUN." +echo "To return to the branch you were in when you invoked this script:" +echo +echo " git checkout ${STARTINGBRANCH}" +echo +echo "To delete this branch:" +echo +echo " git branch -D ${NEWBRANCHUNIQ}" +exit 0 fi if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then - echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git" - echo "This isn't normal. Leaving you with push instructions:" - echo - openim::log::status "First manually push the branch this script created:" - echo - echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}" - echo - echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)." - echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values." - echo - make-a-pr - cleanbranch="" - exit 0 +echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git" +echo "This isn't normal. Leaving you with push instructions:" +echo +openim::log::status "First manually push the branch this script created:" +echo +echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}" +echo +echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)." +echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values." +echo +make-a-pr +cleanbranch="" +exit 0 fi echo @@ -248,8 +248,8 @@ echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}" echo read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then - echo "Aborting." >&2 - exit 1 +echo "Aborting." >&2 +exit 1 fi git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}" diff --git a/scripts/common.sh b/scripts/common.sh index 882641ae97..d67389d56a 100755 --- a/scripts/common.sh +++ b/scripts/common.sh @@ -87,28 +87,28 @@ readonly OPENIM_CONTAINER_RSYNC_PORT=8730 # # $1 - server architecture openim::build::get_docker_wrapped_binaries() { - local arch=$1 - local debian_base_version=v2.1.0 - local debian_iptables_version=v12.1.0 - ### If you change any of these lists, please also update DOCKERIZED_BINARIES - ### in build/BUILD. And openim::golang::server_image_targets - - local targets=( - "openim-api,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-cmdutils,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-crontask,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-msggateway,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-msgtransfer,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-push,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-auth,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-conversation,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-friend,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-group,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-msg,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-third,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - "openim-rpc-user,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - ) - echo "${targets[@]}" +local arch=$1 +local debian_base_version=v2.1.0 +local debian_iptables_version=v12.1.0 +### If you change any of these lists, please also update DOCKERIZED_BINARIES +### in build/BUILD. And openim::golang::server_image_targets + +local targets=( + "openim-api,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-cmdutils,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-crontask,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-msggateway,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-msgtransfer,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-push,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-auth,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-conversation,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-friend,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-group,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-msg,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-third,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "openim-rpc-user,${OPENIM_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" +) +echo "${targets[@]}" } # --------------------------------------------------------------------------- @@ -133,170 +133,170 @@ openim::build::get_docker_wrapped_binaries() { # DOCKER_MOUNT_ARGS # LOCAL_OUTPUT_BUILD_CONTEXT function openim::build::verify_prereqs() { - local -r require_docker=${1:-true} - openim::log::status "Verifying Prerequisites...." - openim::build::ensure_tar || return 1 - openim::build::ensure_rsync || return 1 - if ${require_docker}; then - openim::build::ensure_docker_in_path || return 1 - openim::util::ensure_docker_daemon_connectivity || return 1 - - if (( OPENIM_VERBOSE > 6 )); then - openim::log::status "Docker Version:" - "${DOCKER[@]}" version | openim::log::info_from_stdin - fi +local -r require_docker=${1:-true} +openim::log::status "Verifying Prerequisites...." +openim::build::ensure_tar || return 1 +openim::build::ensure_rsync || return 1 +if ${require_docker}; then + openim::build::ensure_docker_in_path || return 1 + openim::util::ensure_docker_daemon_connectivity || return 1 + + if (( OPENIM_VERBOSE > 6 )); then + openim::log::status "Docker Version:" + "${DOCKER[@]}" version | openim::log::info_from_stdin fi - - OPENIM_GIT_BRANCH=$(git symbolic-ref --short -q HEAD 2>/dev/null || true) - OPENIM_ROOT_HASH=$(openim::build::short_hash "${HOSTNAME:-}:"${OPENIM_ROOT}":${OPENIM_GIT_BRANCH}") - OPENIM_BUILD_IMAGE_TAG_BASE="build-${OPENIM_ROOT_HASH}" - #OPENIM_BUILD_IMAGE_TAG="${OPENIM_BUILD_IMAGE_TAG_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" - #OPENIM_BUILD_IMAGE="${OPENIM_BUILD_IMAGE_REPO}:${OPENIM_BUILD_IMAGE_TAG}" - OPENIM_BUILD_CONTAINER_NAME_BASE="openim-build-${OPENIM_ROOT_HASH}" - #OPENIM_BUILD_CONTAINER_NAME="${OPENIM_BUILD_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" - OPENIM_RSYNC_CONTAINER_NAME_BASE="openim-rsync-${OPENIM_ROOT_HASH}" - #OPENIM_RSYNC_CONTAINER_NAME="${OPENIM_RSYNC_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" - OPENIM_DATA_CONTAINER_NAME_BASE="openim-build-data-${OPENIM_ROOT_HASH}" - #OPENIM_DATA_CONTAINER_NAME="${OPENIM_DATA_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" - #DOCKER_MOUNT_ARGS=(--volumes-from "${OPENIM_DATA_CONTAINER_NAME}") - #LOCAL_OUTPUT_BUILD_CONTEXT="${LOCAL_OUTPUT_IMAGE_STAGING}/${OPENIM_BUILD_IMAGE}" - - openim::version::get_version_vars - #openim::version::save_version_vars "${OPENIM_ROOT}/.dockerized-openim-version-defs" +fi + +OPENIM_GIT_BRANCH=$(git symbolic-ref --short -q HEAD 2>/dev/null || true) +OPENIM_ROOT_HASH=$(openim::build::short_hash "${HOSTNAME:-}:${OPENIM_ROOT}:${OPENIM_GIT_BRANCH}") +OPENIM_BUILD_IMAGE_TAG_BASE="build-${OPENIM_ROOT_HASH}" +#OPENIM_BUILD_IMAGE_TAG="${OPENIM_BUILD_IMAGE_TAG_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" +#OPENIM_BUILD_IMAGE="${OPENIM_BUILD_IMAGE_REPO}:${OPENIM_BUILD_IMAGE_TAG}" +OPENIM_BUILD_CONTAINER_NAME_BASE="openim-build-${OPENIM_ROOT_HASH}" +#OPENIM_BUILD_CONTAINER_NAME="${OPENIM_BUILD_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" +OPENIM_RSYNC_CONTAINER_NAME_BASE="openim-rsync-${OPENIM_ROOT_HASH}" +#OPENIM_RSYNC_CONTAINER_NAME="${OPENIM_RSYNC_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" +OPENIM_DATA_CONTAINER_NAME_BASE="openim-build-data-${OPENIM_ROOT_HASH}" +#OPENIM_DATA_CONTAINER_NAME="${OPENIM_DATA_CONTAINER_NAME_BASE}-${OPENIM_BUILD_IMAGE_VERSION}" +#DOCKER_MOUNT_ARGS=(--volumes-from "${OPENIM_DATA_CONTAINER_NAME}") +#LOCAL_OUTPUT_BUILD_CONTEXT="${LOCAL_OUTPUT_IMAGE_STAGING}/${OPENIM_BUILD_IMAGE}" + +openim::version::get_version_vars +#openim::version::save_version_vars "${OPENIM_ROOT}/.dockerized-openim-version-defs" } # --------------------------------------------------------------------------- # Utility functions function openim::build::docker_available_on_osx() { - if [[ -z "${DOCKER_HOST}" ]]; then - if [[ -S "/var/run/docker.sock" ]]; then - openim::log::status "Using Docker for MacOS" - return 0 - fi - - openim::log::status "No docker host is set. Checking options for setting one..." - if [[ -z "$(which docker-machine)" ]]; then - openim::log::status "It looks like you're running Mac OS X, yet neither Docker for Mac nor docker-machine can be found." - openim::log::status "See: https://docs.docker.com/engine/installation/mac/ for installation instructions." - return 1 +if [[ -z "${DOCKER_HOST}" ]]; then + if [[ -S "/var/run/docker.sock" ]]; then + openim::log::status "Using Docker for MacOS" + return 0 + fi + + openim::log::status "No docker host is set. Checking options for setting one..." + if [[ -z "$(which docker-machine)" ]]; then + openim::log::status "It looks like you're running Mac OS X, yet neither Docker for Mac nor docker-machine can be found." + openim::log::status "See: https://docs.docker.com/engine/installation/mac/ for installation instructions." + return 1 elif [[ -n "$(which docker-machine)" ]]; then - openim::build::prepare_docker_machine - fi + openim::build::prepare_docker_machine fi +fi } function openim::build::prepare_docker_machine() { - openim::log::status "docker-machine was found." - - local available_memory_bytes - available_memory_bytes=$(sysctl -n hw.memsize 2>/dev/null) - - local bytes_in_mb=1048576 - - # Give virtualbox 1/2 the system memory. Its necessary to divide by 2, instead - # of multiple by .5, because bash can only multiply by ints. - local memory_divisor=2 - - local virtualbox_memory_mb=$(( available_memory_bytes / (bytes_in_mb * memory_divisor) )) - - docker-machine inspect "${DOCKER_MACHINE_NAME}" &> /dev/null || { - openim::log::status "Creating a machine to build OPENIM" - docker-machine create --driver "${DOCKER_MACHINE_DRIVER}" \ - --virtualbox-memory "${virtualbox_memory_mb}" \ - --engine-env HTTP_PROXY="${OPENIMRNETES_HTTP_PROXY:-}" \ - --engine-env HTTPS_PROXY="${OPENIMRNETES_HTTPS_PROXY:-}" \ - --engine-env NO_PROXY="${OPENIMRNETES_NO_PROXY:-127.0.0.1}" \ - "${DOCKER_MACHINE_NAME}" > /dev/null || { - openim::log::error "Something went wrong creating a machine." - openim::log::error "Try the following: " - openim::log::error "docker-machine create -d ${DOCKER_MACHINE_DRIVER} --virtualbox-memory ${virtualbox_memory_mb} ${DOCKER_MACHINE_NAME}" - return 1 - } +openim::log::status "docker-machine was found." + +local available_memory_bytes +available_memory_bytes=$(sysctl -n hw.memsize 2>/dev/null) + +local bytes_in_mb=1048576 + +# Give virtualbox 1/2 the system memory. Its necessary to divide by 2, instead +# of multiple by .5, because bash can only multiply by ints. +local memory_divisor=2 + +local virtualbox_memory_mb=$(( available_memory_bytes / (bytes_in_mb * memory_divisor) )) + +docker-machine inspect "${DOCKER_MACHINE_NAME}" &> /dev/null || { + openim::log::status "Creating a machine to build OPENIM" + docker-machine create --driver "${DOCKER_MACHINE_DRIVER}" \ + --virtualbox-memory "${virtualbox_memory_mb}" \ + --engine-env HTTP_PROXY="${OPENIMRNETES_HTTP_PROXY:-}" \ + --engine-env HTTPS_PROXY="${OPENIMRNETES_HTTPS_PROXY:-}" \ + --engine-env NO_PROXY="${OPENIMRNETES_NO_PROXY:-127.0.0.1}" \ + "${DOCKER_MACHINE_NAME}" > /dev/null || { + openim::log::error "Something went wrong creating a machine." + openim::log::error "Try the following: " + openim::log::error "docker-machine create -d ${DOCKER_MACHINE_DRIVER} --virtualbox-memory ${virtualbox_memory_mb} ${DOCKER_MACHINE_NAME}" + return 1 } - docker-machine start "${DOCKER_MACHINE_NAME}" &> /dev/null - # it takes `docker-machine env` a few seconds to work if the machine was just started - local docker_machine_out - while ! docker_machine_out=$(docker-machine env "${DOCKER_MACHINE_NAME}" 2>&1); do - if [[ ${docker_machine_out} =~ "Error checking TLS connection" ]]; then - echo "${docker_machine_out}" - docker-machine regenerate-certs "${DOCKER_MACHINE_NAME}" - else - sleep 1 - fi - done - eval "$(docker-machine env "${DOCKER_MACHINE_NAME}")" - openim::log::status "A Docker host using docker-machine named '${DOCKER_MACHINE_NAME}' is ready to go!" - return 0 +} +docker-machine start "${DOCKER_MACHINE_NAME}" &> /dev/null +# it takes `docker-machine env` a few seconds to work if the machine was just started +local docker_machine_out +while ! docker_machine_out=$(docker-machine env "${DOCKER_MACHINE_NAME}" 2>&1); do + if [[ ${docker_machine_out} =~ "Error checking TLS connection" ]]; then + echo "${docker_machine_out}" + docker-machine regenerate-certs "${DOCKER_MACHINE_NAME}" + else + sleep 1 + fi +done +eval "$(docker-machine env "${DOCKER_MACHINE_NAME}")" +openim::log::status "A Docker host using docker-machine named '${DOCKER_MACHINE_NAME}' is ready to go!" +return 0 } function openim::build::is_gnu_sed() { - [[ $(sed --version 2>&1) == *GNU* ]] +[[ $(sed --version 2>&1) == *GNU* ]] } function openim::build::ensure_rsync() { - if [[ -z "$(which rsync)" ]]; then - openim::log::error "Can't find 'rsync' in PATH, please fix and retry." - return 1 - fi +if [[ -z "$(which rsync)" ]]; then + openim::log::error "Can't find 'rsync' in PATH, please fix and retry." + return 1 +fi } function openim::build::update_dockerfile() { - if openim::build::is_gnu_sed; then - sed_opts=(-i) - else - sed_opts=(-i '') - fi - sed "${sed_opts[@]}" "s/OPENIM_BUILD_IMAGE_CROSS_TAG/${OPENIM_BUILD_IMAGE_CROSS_TAG}/" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" +if openim::build::is_gnu_sed; then + sed_opts=(-i) +else + sed_opts=(-i '') +fi +sed "${sed_opts[@]}" "s/OPENIM_BUILD_IMAGE_CROSS_TAG/${OPENIM_BUILD_IMAGE_CROSS_TAG}/" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" } function openim::build::set_proxy() { - if [[ -n "${OPENIMRNETES_HTTPS_PROXY:-}" ]]; then - echo "ENV https_proxy $OPENIMRNETES_HTTPS_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - fi - if [[ -n "${OPENIMRNETES_HTTP_PROXY:-}" ]]; then - echo "ENV http_proxy $OPENIMRNETES_HTTP_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - fi - if [[ -n "${OPENIMRNETES_NO_PROXY:-}" ]]; then - echo "ENV no_proxy $OPENIMRNETES_NO_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - fi +if [[ -n "${OPENIMRNETES_HTTPS_PROXY:-}" ]]; then + echo "ENV https_proxy $OPENIMRNETES_HTTPS_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" +fi +if [[ -n "${OPENIMRNETES_HTTP_PROXY:-}" ]]; then + echo "ENV http_proxy $OPENIMRNETES_HTTP_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" +fi +if [[ -n "${OPENIMRNETES_NO_PROXY:-}" ]]; then + echo "ENV no_proxy $OPENIMRNETES_NO_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" +fi } function openim::build::ensure_docker_in_path() { - if [[ -z "$(which docker)" ]]; then - openim::log::error "Can't find 'docker' in PATH, please fix and retry." - openim::log::error "See https://docs.docker.com/installation/#installation for installation instructions." - return 1 - fi +if [[ -z "$(which docker)" ]]; then + openim::log::error "Can't find 'docker' in PATH, please fix and retry." + openim::log::error "See https://docs.docker.com/installation/#installation for installation instructions." + return 1 +fi } function openim::build::ensure_tar() { - if [[ -n "${TAR:-}" ]]; then - return - fi - - # Find gnu tar if it is available, bomb out if not. - TAR=tar - if which gtar &>/dev/null; then - TAR=gtar - else - if which gnutar &>/dev/null; then - TAR=gnutar - fi - fi - if ! "${TAR}" --version | grep -q GNU; then - echo " !!! Cannot find GNU tar. Build on Linux or install GNU tar" - echo " on Mac OS X (brew install gnu-tar)." - return 1 +if [[ -n "${TAR:-}" ]]; then + return +fi + +# Find gnu tar if it is available, bomb out if not. +TAR=tar +if which gtar &>/dev/null; then + TAR=gtar +else + if which gnutar &>/dev/null; then + TAR=gnutar fi +fi +if ! "${TAR}" --version | grep -q GNU; then + echo " !!! Cannot find GNU tar. Build on Linux or install GNU tar" + echo " on Mac OS X (brew install gnu-tar)." + return 1 +fi } function openim::build::has_docker() { - which docker &> /dev/null +which docker &> /dev/null } function openim::build::has_ip() { - which ip &> /dev/null && ip -Version | grep 'iproute2' &> /dev/null +which ip &> /dev/null && ip -Version | grep 'iproute2' &> /dev/null } # Detect if a specific image exists @@ -304,12 +304,12 @@ function openim::build::has_ip() { # $1 - image repo name # $2 - image tag function openim::build::docker_image_exists() { - [[ -n $1 && -n $2 ]] || { - openim::log::error "Internal error. Image not specified in docker_image_exists." - exit 2 - } +[[ -n $1 && -n $2 ]] || { + openim::log::error "Internal error. Image not specified in docker_image_exists." + exit 2 +} - [[ $("${DOCKER[@]}" images -q "${1}:${2}") ]] +[[ $("${DOCKER[@]}" images -q "${1}:${2}") ]] } # Delete all images that match a tag prefix except for the "current" version @@ -318,21 +318,21 @@ function openim::build::docker_image_exists() { # $2: The tag base. We consider any image that matches $2* # $3: The current image not to delete if provided function openim::build::docker_delete_old_images() { - # In Docker 1.12, we can replace this with - # docker images "$1" --format "{{.Tag}}" - for tag in $("${DOCKER[@]}" images "${1}" | tail -n +2 | awk '{print $2}') ; do - if [[ "${tag}" != "${2}"* ]] ; then - V=3 openim::log::status "Keeping image ${1}:${tag}" - continue - fi - - if [[ -z "${3:-}" || "${tag}" != "${3}" ]] ; then - V=2 openim::log::status "Deleting image ${1}:${tag}" - "${DOCKER[@]}" rmi "${1}:${tag}" >/dev/null - else - V=3 openim::log::status "Keeping image ${1}:${tag}" - fi - done +# In Docker 1.12, we can replace this with +# docker images "$1" --format "{{.Tag}}" +for tag in $("${DOCKER[@]}" images "${1}" | tail -n +2 | awk '{print $2}') ; do + if [[ "${tag}" != "${2}"* ]] ; then + V=3 openim::log::status "Keeping image ${1}:${tag}" + continue + fi + + if [[ -z "${3:-}" || "${tag}" != "${3}" ]] ; then + V=2 openim::log::status "Deleting image ${1}:${tag}" + "${DOCKER[@]}" rmi "${1}:${tag}" >/dev/null + else + V=3 openim::log::status "Keeping image ${1}:${tag}" + fi +done } # Stop and delete all containers that match a pattern @@ -340,36 +340,36 @@ function openim::build::docker_delete_old_images() { # $1: The base container prefix # $2: The current container to keep, if provided function openim::build::docker_delete_old_containers() { - # In Docker 1.12 we can replace this line with - # docker ps -a --format="{{.Names}}" - for container in $("${DOCKER[@]}" ps -a | tail -n +2 | awk '{print $NF}') ; do - if [[ "${container}" != "${1}"* ]] ; then - V=3 openim::log::status "Keeping container ${container}" - continue - fi - if [[ -z "${2:-}" || "${container}" != "${2}" ]] ; then - V=2 openim::log::status "Deleting container ${container}" - openim::build::destroy_container "${container}" - else - V=3 openim::log::status "Keeping container ${container}" - fi - done +# In Docker 1.12 we can replace this line with +# docker ps -a --format="{{.Names}}" +for container in $("${DOCKER[@]}" ps -a | tail -n +2 | awk '{print $NF}') ; do + if [[ "${container}" != "${1}"* ]] ; then + V=3 openim::log::status "Keeping container ${container}" + continue + fi + if [[ -z "${2:-}" || "${container}" != "${2}" ]] ; then + V=2 openim::log::status "Deleting container ${container}" + openim::build::destroy_container "${container}" + else + V=3 openim::log::status "Keeping container ${container}" + fi +done } # Takes $1 and computes a short has for it. Useful for unique tag generation function openim::build::short_hash() { - [[ $# -eq 1 ]] || { - openim::log::error "Internal error. No data based to short_hash." - exit 2 - } +[[ $# -eq 1 ]] || { + openim::log::error "Internal error. No data based to short_hash." + exit 2 +} - local short_hash - if which md5 >/dev/null 2>&1; then - short_hash=$(md5 -q -s "$1") - else - short_hash=$(echo -n "$1" | md5sum) - fi - echo "${short_hash:0:10}" +local short_hash +if which md5 >/dev/null 2>&1; then + short_hash=$(md5 -q -s "$1") +else + short_hash=$(echo -n "$1" | md5sum) +fi +echo "${short_hash:0:10}" } # Pedantically kill, wait-on and remove a container. The -f -v options @@ -377,15 +377,15 @@ function openim::build::short_hash() { # container, wait to ensure it's stopped, then try the remove. This is # a workaround for bug https://github.com/docker/docker/issues/3968. function openim::build::destroy_container() { - "${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true - if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then - # Workaround https://github.com/moby/moby/issues/33948. - # TODO: remove when 17.06.0 is not relevant anymore - DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true - else - "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true - fi - "${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true +"${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true +if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then + # Workaround https://github.com/moby/moby/issues/33948. + # TODO: remove when 17.06.0 is not relevant anymore + DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true +else + "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true +fi +"${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true } # --------------------------------------------------------------------------- @@ -393,47 +393,47 @@ function openim::build::destroy_container() { function openim::build::clean() { - if openim::build::has_docker ; then - openim::build::docker_delete_old_containers "${OPENIM_BUILD_CONTAINER_NAME_BASE}" - openim::build::docker_delete_old_containers "${OPENIM_RSYNC_CONTAINER_NAME_BASE}" - openim::build::docker_delete_old_containers "${OPENIM_DATA_CONTAINER_NAME_BASE}" - openim::build::docker_delete_old_images "${OPENIM_BUILD_IMAGE_REPO}" "${OPENIM_BUILD_IMAGE_TAG_BASE}" - - V=2 openim::log::status "Cleaning all untagged docker images" - "${DOCKER[@]}" rmi "$("${DOCKER[@]}" images -q --filter 'dangling=true')" 2> /dev/null || true - fi - - if [[ -d "${LOCAL_OUTPUT_ROOT}" ]]; then - openim::log::status "Removing _output directory" - rm -rf "${LOCAL_OUTPUT_ROOT}" - fi +if openim::build::has_docker ; then + openim::build::docker_delete_old_containers "${OPENIM_BUILD_CONTAINER_NAME_BASE}" + openim::build::docker_delete_old_containers "${OPENIM_RSYNC_CONTAINER_NAME_BASE}" + openim::build::docker_delete_old_containers "${OPENIM_DATA_CONTAINER_NAME_BASE}" + openim::build::docker_delete_old_images "${OPENIM_BUILD_IMAGE_REPO}" "${OPENIM_BUILD_IMAGE_TAG_BASE}" + + V=2 openim::log::status "Cleaning all untagged docker images" + "${DOCKER[@]}" rmi "$("${DOCKER[@]}" images -q --filter 'dangling=true')" 2> /dev/null || true +fi + +if [[ -d "${LOCAL_OUTPUT_ROOT}" ]]; then + openim::log::status "Removing _output directory" + rm -rf "${LOCAL_OUTPUT_ROOT}" +fi } # Set up the context directory for the openim-build image and build it. function openim::build::build_image() { - mkdir -p "${LOCAL_OUTPUT_BUILD_CONTEXT}" - # Make sure the context directory owned by the right user for syncing sources to container. - chown -R "${USER_ID}":"${GROUP_ID}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" +mkdir -p "${LOCAL_OUTPUT_BUILD_CONTEXT}" +# Make sure the context directory owned by the right user for syncing sources to container. +chown -R "${USER_ID}":"${GROUP_ID}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" - cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/" +cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/" - cp "${OPENIM_ROOT}/build/build-image/Dockerfile" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" - cp "${OPENIM_ROOT}/build/build-image/rsyncd.sh" "${LOCAL_OUTPUT_BUILD_CONTEXT}/" - dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" - chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" +cp "${OPENIM_ROOT}/build/build-image/Dockerfile" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile" +cp "${OPENIM_ROOT}/build/build-image/rsyncd.sh" "${LOCAL_OUTPUT_BUILD_CONTEXT}/" +dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" +chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" - openim::build::update_dockerfile - openim::build::set_proxy - openim::build::docker_build "${OPENIM_BUILD_IMAGE}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" 'false' +openim::build::update_dockerfile +openim::build::set_proxy +openim::build::docker_build "${OPENIM_BUILD_IMAGE}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" 'false' - # Clean up old versions of everything - openim::build::docker_delete_old_containers "${OPENIM_BUILD_CONTAINER_NAME_BASE}" "${OPENIM_BUILD_CONTAINER_NAME}" - openim::build::docker_delete_old_containers "${OPENIM_RSYNC_CONTAINER_NAME_BASE}" "${OPENIM_RSYNC_CONTAINER_NAME}" - openim::build::docker_delete_old_containers "${OPENIM_DATA_CONTAINER_NAME_BASE}" "${OPENIM_DATA_CONTAINER_NAME}" - openim::build::docker_delete_old_images "${OPENIM_BUILD_IMAGE_REPO}" "${OPENIM_BUILD_IMAGE_TAG_BASE}" "${OPENIM_BUILD_IMAGE_TAG}" +# Clean up old versions of everything +openim::build::docker_delete_old_containers "${OPENIM_BUILD_CONTAINER_NAME_BASE}" "${OPENIM_BUILD_CONTAINER_NAME}" +openim::build::docker_delete_old_containers "${OPENIM_RSYNC_CONTAINER_NAME_BASE}" "${OPENIM_RSYNC_CONTAINER_NAME}" +openim::build::docker_delete_old_containers "${OPENIM_DATA_CONTAINER_NAME_BASE}" "${OPENIM_DATA_CONTAINER_NAME}" +openim::build::docker_delete_old_images "${OPENIM_BUILD_IMAGE_REPO}" "${OPENIM_BUILD_IMAGE_TAG_BASE}" "${OPENIM_BUILD_IMAGE_TAG}" - openim::build::ensure_data_container - openim::build::sync_to_container +openim::build::ensure_data_container +openim::build::sync_to_container } # Build a docker image from a Dockerfile. @@ -441,14 +441,14 @@ function openim::build::build_image() { # $2 is the location of the "context" directory, with the Dockerfile at the root. # $3 is the value to set the --pull flag for docker build; true by default function openim::build::docker_build() { - local -r image=$1 - local -r context_dir=$2 - local -r pull="${3:-true}" - local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "--pull=${pull}" "${context_dir}") - - openim::log::status "Building Docker image ${image}" - local docker_output - docker_output=$("${build_cmd[@]}" 2>&1) || { +local -r image=$1 +local -r context_dir=$2 +local -r pull="${3:-true}" +local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "--pull=${pull}" "${context_dir}") + +openim::log::status "Building Docker image ${image}" +local docker_output +docker_output=$("${build_cmd[@]}" 2>&1) || { cat <&2 +++ Docker build command failed for ${image} @@ -459,61 +459,61 @@ To retry manually, run: ${build_cmd[*]} EOF - return 1 - } + return 1 +} } function openim::build::ensure_data_container() { - # If the data container exists AND exited successfully, we can use it. - # Otherwise nuke it and start over. - local ret=0 - local code=0 - - code=$(docker inspect \ - -f '{{.State.ExitCode}}' \ - "${OPENIM_DATA_CONTAINER_NAME}" 2>/dev/null) || ret=$? - if [[ "${ret}" == 0 && "${code}" != 0 ]]; then - openim::build::destroy_container "${OPENIM_DATA_CONTAINER_NAME}" - ret=1 - fi - if [[ "${ret}" != 0 ]]; then - openim::log::status "Creating data container ${OPENIM_DATA_CONTAINER_NAME}" - # We have to ensure the directory exists, or else the docker run will - # create it as root. - mkdir -p "${LOCAL_OUTPUT_GOPATH}" - # We want this to run as root to be able to chown, so non-root users can - # later use the result as a data container. This run both creates the data - # container and chowns the GOPATH. - # - # The data container creates volumes for all of the directories that store - # intermediates for the Go build. This enables incremental builds across - # Docker sessions. The *_cgo paths are re-compiled versions of the go std - # libraries for true static building. - local -ra docker_cmd=( - "${DOCKER[@]}" run - --volume "${REMOTE_ROOT}" # white-out the whole output dir - --volume /usr/local/go/pkg/linux_386_cgo - --volume /usr/local/go/pkg/linux_amd64_cgo - --volume /usr/local/go/pkg/linux_arm_cgo - --volume /usr/local/go/pkg/linux_arm64_cgo - --volume /usr/local/go/pkg/linux_ppc64le_cgo - --volume /usr/local/go/pkg/darwin_amd64_cgo - --volume /usr/local/go/pkg/darwin_386_cgo - --volume /usr/local/go/pkg/windows_amd64_cgo - --volume /usr/local/go/pkg/windows_386_cgo - --name "${OPENIM_DATA_CONTAINER_NAME}" - --hostname "${HOSTNAME}" - "${OPENIM_BUILD_IMAGE}" - chown -R "${USER_ID}":"${GROUP_ID}" - "${REMOTE_ROOT}" - /usr/local/go/pkg/ - ) - "${docker_cmd[@]}" - fi +# If the data container exists AND exited successfully, we can use it. +# Otherwise nuke it and start over. +local ret=0 +local code=0 + +code=$(docker inspect \ + -f '{{.State.ExitCode}}' \ +"${OPENIM_DATA_CONTAINER_NAME}" 2>/dev/null) || ret=$? +if [[ "${ret}" == 0 && "${code}" != 0 ]]; then + openim::build::destroy_container "${OPENIM_DATA_CONTAINER_NAME}" + ret=1 +fi +if [[ "${ret}" != 0 ]]; then + openim::log::status "Creating data container ${OPENIM_DATA_CONTAINER_NAME}" + # We have to ensure the directory exists, or else the docker run will + # create it as root. + mkdir -p "${LOCAL_OUTPUT_GOPATH}" + # We want this to run as root to be able to chown, so non-root users can + # later use the result as a data container. This run both creates the data + # container and chowns the GOPATH. + # + # The data container creates volumes for all of the directories that store + # intermediates for the Go build. This enables incremental builds across + # Docker sessions. The *_cgo paths are re-compiled versions of the go std + # libraries for true static building. + local -ra docker_cmd=( + "${DOCKER[@]}" run + --volume "${REMOTE_ROOT}" # white-out the whole output dir + --volume /usr/local/go/pkg/linux_386_cgo + --volume /usr/local/go/pkg/linux_amd64_cgo + --volume /usr/local/go/pkg/linux_arm_cgo + --volume /usr/local/go/pkg/linux_arm64_cgo + --volume /usr/local/go/pkg/linux_ppc64le_cgo + --volume /usr/local/go/pkg/darwin_amd64_cgo + --volume /usr/local/go/pkg/darwin_386_cgo + --volume /usr/local/go/pkg/windows_amd64_cgo + --volume /usr/local/go/pkg/windows_386_cgo + --name "${OPENIM_DATA_CONTAINER_NAME}" + --hostname "${HOSTNAME}" + "${OPENIM_BUILD_IMAGE}" + chown -R "${USER_ID}":"${GROUP_ID}" + "${REMOTE_ROOT}" + /usr/local/go/pkg/ + ) + "${docker_cmd[@]}" +fi } # Build all openim commands. function openim::build::build_command() { - openim::log::status "Running build command..." - make -C "${OPENIM_ROOT}" multiarch +openim::log::status "Running build command..." +make -C "${OPENIM_ROOT}" multiarch } diff --git a/scripts/coverage.sh b/scripts/coverage.sh index ae52836714..e5cef0b5dd 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -19,11 +19,11 @@ echo "mode: atomic" > coverage.txt for d in $(find ./* -maxdepth 10 -type d); do - if ls $d/*.go &> /dev/null; then - go test -coverprofile=profile.out -covermode=atomic $d - if [ -f profile.out ]; then - cat profile.out | grep -v "mode: " >> /tmp/coverage.txt - rm profile.out - fi + if ls $d/*.go &> /dev/null; then + go test -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out | grep -v "mode: " >> /tmp/coverage.txt + rm profile.out fi + fi done diff --git a/scripts/demo.sh b/scripts/demo.sh index 5f8a2023a3..4b877b9ed3 100755 --- a/scripts/demo.sh +++ b/scripts/demo.sh @@ -15,16 +15,16 @@ if ! command -v pv &> /dev/null then - echo "pv not found, installing..." - if [ -e /etc/debian_version ]; then - sudo apt-get update - sudo apt-get install -y pv + echo "pv not found, installing..." + if [ -e /etc/debian_version ]; then + sudo apt-get update + sudo apt-get install -y pv elif [ -e /etc/redhat-release ]; then - sudo yum install -y pv - else - echo "Unsupported OS, please install pv manually." - exit 1 - fi + sudo yum install -y pv + else + echo "Unsupported OS, please install pv manually." + exit 1 + fi fi readonly t_reset=$(tput sgr0) @@ -42,8 +42,8 @@ openim::util::ensure-bash-version trap 'openim::util::onCtrlC' INT function openim::util::onCtrlC() { - echo -e "\n${t_reset}Ctrl+C Press it. It's exiting openim make init..." - exit 0 + echo -e "\n${t_reset}Ctrl+C Press it. It's exiting openim make init..." + exit 0 } openim::util::desc "========> Welcome to the OpenIM Demo" diff --git a/scripts/docker-check-service.sh b/scripts/docker-check-service.sh index adf3834366..30ca89b5a9 100755 --- a/scripts/docker-check-service.sh +++ b/scripts/docker-check-service.sh @@ -22,61 +22,61 @@ cd "$OPENIM_ROOT" openim::util::check_docker_and_compose_versions progress() { - local _main_pid="$1" - local _length=20 - local _ratio=1 - local _colors=("31" "32" "33" "34" "35" "36" "37") - local _wave=("▁" "▂" "▃" "▄" "▅" "▆" "▇" "█" "▇" "▆" "▅" "▄" "▃" "▂") - - while pgrep -P "$_main_pid" &> /dev/null; do - local _mark='>' - local _progress_bar= - for ((i = 1; i <= _length; i++)); do - if ((i > _ratio)); then - _mark='-' - fi - _progress_bar="${_progress_bar}${_mark}" - done - - local _color_idx=$((_ratio % ${#_colors[@]})) - local _color_prefix="\033[${_colors[_color_idx]}m" - local _reset_suffix="\033[0m" - - local _wave_idx=$((_ratio % ${#_wave[@]})) - local _wave_progress=${_wave[_wave_idx]} - - printf "Progress: ${_color_prefix}${_progress_bar}${_reset_suffix} ${_wave_progress} Countdown: %2ds \r" "$_countdown" - ((_ratio++)) - ((_ratio > _length)) && _ratio=1 - sleep 0.1 + local _main_pid="$1" + local _length=20 + local _ratio=1 + local _colors=("31" "32" "33" "34" "35" "36" "37") + local _wave=("▁" "▂" "▃" "▄" "▅" "▆" "▇" "█" "▇" "▆" "▅" "▄" "▃" "▂") + + while pgrep -P "$_main_pid" &> /dev/null; do + local _mark='>' + local _progress_bar= + for ((i = 1; i <= _length; i++)); do + if ((i > _ratio)); then + _mark='-' + fi + _progress_bar="${_progress_bar}${_mark}" done + + local _color_idx=$((_ratio % ${#_colors[@]})) + local _color_prefix="\033[${_colors[_color_idx]}m" + local _reset_suffix="\033[0m" + + local _wave_idx=$((_ratio % ${#_wave[@]})) + local _wave_progress=${_wave[_wave_idx]} + + printf "Progress: ${_color_prefix}${_progress_bar}${_reset_suffix} ${_wave_progress} Countdown: %2ds \r" "$_countdown" + ((_ratio++)) + ((_ratio > _length)) && _ratio=1 + sleep 0.1 + done } countdown() { - local _duration="$1" - - for ((i = _duration; i >= 1; i--)); do - printf "\rCountdown: %2ds \r" "$i" - sleep 1 - done - printf "\rCountdown: %2ds \r" "$_duration" + local _duration="$1" + + for ((i = _duration; i >= 1; i--)); do + printf "\rCountdown: %2ds \r" "$i" + sleep 1 + done + printf "\rCountdown: %2ds \r" "$_duration" } do_sth() { - echo "++++++++++++++++++++++++" - progress $$ & - local _progress_pid=$! - local _countdown=30 - - countdown "$_countdown" & - local _countdown_pid=$! - - sleep 30 - - kill "$_progress_pid" "$_countdown_pid" - - "${SCRIPTS_ROOT}/check-all.sh" - echo -e "${PURPLE_PREFIX}=========> Check docker-compose status ${COLOR_SUFFIX} \n" + echo "++++++++++++++++++++++++" + progress $$ & + local _progress_pid=$! + local _countdown=30 + + countdown "$_countdown" & + local _countdown_pid=$! + + sleep 30 + + kill "$_progress_pid" "$_countdown_pid" + + "${SCRIPTS_ROOT}/check-all.sh" + echo -e "${PURPLE_PREFIX}=========> Check docker-compose status ${COLOR_SUFFIX} \n" } set -e diff --git a/scripts/ensure-tag.sh b/scripts/ensure-tag.sh index 2847342f44..5fedf70192 100755 --- a/scripts/ensure-tag.sh +++ b/scripts/ensure-tag.sh @@ -18,9 +18,9 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. version="${VERSION}" if [ "${version}" == "" ];then - version=v`${OPENIM_ROOT}/_output/tools/gsemver bump` + version=v$(${OPENIM_ROOT}/_output/tools/gsemver bump) fi -if [ -z "`git tag -l ${version}`" ];then +if [ -z "$(git tag -l ${version})" ];then git tag -a -m "release version ${version}" ${version} fi diff --git a/scripts/gen-swagger-docs.sh b/scripts/gen-swagger-docs.sh index ccf5eaeaac..68410e79c5 100755 --- a/scripts/gen-swagger-docs.sh +++ b/scripts/gen-swagger-docs.sh @@ -67,7 +67,7 @@ echo -e "=== any\nRepresents an untyped JSON map - see the description of the fi asciidoctor definitions.adoc asciidoctor paths.adoc -cp ${OPENIM_OUTPUT_TMP}/definitions.html ${OPENIM_OUTPUT_TMP}/_output/ -cp ${OPENIM_OUTPUT_TMP}/paths.html ${OPENIM_OUTPUT_TMP}/_output/operations.html +cp "$OPENIM_OUTPUT_TMP/definitions.html" "$OPENIM_OUTPUT_TMP/_output/" +cp "$OPENIM_OUTPUT_TMP/paths.html" "$OPENIM_OUTPUT_TMP/_output/operations.html" success "SUCCESS" \ No newline at end of file diff --git a/scripts/genconfig.sh b/scripts/genconfig.sh index 659e8f4be6..498b0b9089 100755 --- a/scripts/genconfig.sh +++ b/scripts/genconfig.sh @@ -25,12 +25,12 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${OPENIM_ROOT}/scripts/lib/init.sh" if [ $# -ne 2 ];then - openim::log::error "Usage: scripts/genconfig.sh scripts/environment.sh configs/openim-api.yaml" - exit 1 + openim::log::error "Usage: scripts/genconfig.sh scripts/environment.sh configs/config.yaml" + exit 1 fi if [ -z "${OPENIM_IP}" ]; then - openim::util::require-dig + openim::util::require-dig fi source "${env_file}" @@ -40,15 +40,15 @@ declare -A envs set +u for env in $(sed -n 's/^[^#].*${\(.*\)}.*/\1/p' ${template_file}) do - if [ -z "$(eval echo \$${env})" ];then - openim::log::error "environment variable '${env}' not set" - missing=true - fi + if [ -z "$(eval echo \$${env})" ];then + openim::log::error "environment variable '${env}' not set" + missing=true + fi done if [ "${missing}" ];then - openim::log::error 'You may run `source scripts/environment.sh` to set these environment' - exit 1 + openim::log::error "You may run 'source scripts/environment.sh' to set these environment" + exit 1 fi eval "cat << EOF diff --git a/scripts/gendoc.sh b/scripts/gendoc.sh index c948fcdf96..ece090190e 100755 --- a/scripts/gendoc.sh +++ b/scripts/gendoc.sh @@ -14,43 +14,43 @@ # limitations under the License. DEFAULT_DIRS=( - "pkg" - "internal/pkg" + "pkg" + "internal/pkg" ) BASE_URL="github.com/openimsdk/open-im-server" usage() { - echo "Usage: $0 [OPTIONS]" - echo - echo "This script iterates over directories and generates doc.go if necessary." - echo "By default, it processes 'pkg' and 'internal/pkg' directories." - echo - echo "Options:" - echo " -d DIRS, --dirs DIRS Specify the directories to be processed, separated by commas. E.g., 'pkg,internal/pkg'." - echo " -u URL, --url URL Set the base URL for the import path. Default is '$BASE_URL'." - echo " -h, --help Show this help message." - echo + echo "Usage: $0 [OPTIONS]" + echo + echo "This script iterates over directories and generates doc.go if necessary." + echo "By default, it processes 'pkg' and 'internal/pkg' directories." + echo + echo "Options:" + echo " -d DIRS, --dirs DIRS Specify the directories to be processed, separated by commas. E.g., 'pkg,internal/pkg'." + echo " -u URL, --url URL Set the base URL for the import path. Default is '$BASE_URL'." + echo " -h, --help Show this help message." + echo } process_dir() { - local dir=$1 - local base_url=$2 - - for d in $(find $dir -type d); do - if [ ! -f $d/doc.go ]; then - if ls $d/*.go > /dev/null 2>&1; then - echo $d/doc.go - echo "package $(basename $d) // import \"$base_url/$d\"" > $d/doc.go - fi - fi - done + local dir=$1 + local base_url=$2 + + for d in $(find $dir -type d); do + if [ ! -f $d/doc.go ]; then + if ls $d/*.go > /dev/null 2>&1; then + echo $d/doc.go + echo "package $(basename $d) // import \"$base_url/$d\"" > $d/doc.go + fi + fi + done } while [[ $# -gt 0 ]]; do - key="$1" - - case $key in - -d|--dirs) + key="$1" + + case $key in + -d|--dirs) IFS=',' read -ra DIRS <<< "$2" shift # shift past argument shift # shift past value diff --git a/scripts/githooks/commit-msg.sh b/scripts/githooks/commit-msg.sh index efff13fd03..d2d96645bc 100644 --- a/scripts/githooks/commit-msg.sh +++ b/scripts/githooks/commit-msg.sh @@ -34,15 +34,15 @@ RED="\e[31m" ENDCOLOR="\e[0m" printMessage() { - printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" + printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" } printSuccess() { - printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" + printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" } printError() { - printf "${RED}OpenIM : $1${ENDCOLOR}\n" + printf "${RED}OpenIM : $1${ENDCOLOR}\n" } printMessage "Running the OpenIM commit-msg hook." @@ -50,9 +50,9 @@ printMessage "Running the OpenIM commit-msg hook." # This example catches duplicate Signed-off-by lines. test "" = "$(grep '^Signed-off-by: ' "$1" | - sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { - echo >&2 Duplicate Signed-off-by lines. - exit 1 +sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { +echo >&2 Duplicate Signed-off-by lines. +exit 1 } # TODO: go-gitlint dir set @@ -60,21 +60,21 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. GITLINT_DIR="$OPENIM_ROOT/_output/tools/go-gitlint" $GITLINT_DIR \ - --msg-file=$1 \ - --subject-regex="^(build|chore|ci|docs|feat|feature|fix|perf|refactor|revert|style|bot|test)(.*)?:\s?.*" \ - --subject-maxlen=150 \ - --subject-minlen=10 \ - --body-regex=".*" \ - --max-parents=1 +--msg-file=$1 \ +--subject-regex="^(build|chore|ci|docs|feat|feature|fix|perf|refactor|revert|style|bot|test)(.*)?:\s?.*" \ +--subject-maxlen=150 \ +--subject-minlen=10 \ +--body-regex=".*" \ +--max-parents=1 if [ $? -ne 0 ] then - if ! command -v $GITLINT_DIR &>/dev/null; then - printError "$GITLINT_DIR not found. Please run 'make tools' OR 'make tools.verify.go-gitlint' make verto install it." - fi - printError "Please fix your commit message to match kubecub coding standards" - printError "https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md" - exit 1 +if ! command -v $GITLINT_DIR &>/dev/null; then + printError "$GITLINT_DIR not found. Please run 'make tools' OR 'make tools.verify.go-gitlint' make verto install it." +fi +printError "Please fix your commit message to match kubecub coding standards" +printError "https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md" +exit 1 fi ### Add Sign-off-by line to the end of the commit message @@ -88,5 +88,5 @@ SIGNED_OFF_BY_EXISTS=$? # Add "Signed-off-by" line if it doesn't exist if [ $SIGNED_OFF_BY_EXISTS -ne 0 ]; then - echo -e "\nSigned-off-by: $NAME <$EMAIL>" >> "$1" +echo -e "\nSigned-off-by: $NAME <$EMAIL>" >> "$1" fi \ No newline at end of file diff --git a/scripts/githooks/pre-commit.sh b/scripts/githooks/pre-commit.sh index 7fd21593c1..cc756c9ad1 100644 --- a/scripts/githooks/pre-commit.sh +++ b/scripts/githooks/pre-commit.sh @@ -34,15 +34,15 @@ RED="\e[31m" ENDCOLOR="\e[0m" printMessage() { - printf "${YELLOW}openim : $1${ENDCOLOR}\n" + printf "${YELLOW}openim : $1${ENDCOLOR}\n" } printSuccess() { - printf "${GREEN}openim : $1${ENDCOLOR}\n" + printf "${GREEN}openim : $1${ENDCOLOR}\n" } printError() { - printf "${RED}openim : $1${ENDCOLOR}\n" + printf "${RED}openim : $1${ENDCOLOR}\n" } printMessage "Running local openim pre-commit hook." @@ -55,9 +55,9 @@ limit=${GIT_FILE_SIZE_LIMIT:-2000000} # Default 2MB limitInMB=$(( $limit / 1000000 )) function file_too_large(){ - filename=$0 - filesize=$(( $1 / 2**20 )) - + filename=$0 + filesize=$(( $1 / 2**20 )) + cat < /dev/null 2>&1 then - against=HEAD + against=HEAD else - against="$empty_tree" + against="$empty_tree" fi # Set split so that for loop below can handle spaces in file names by splitting on line breaks @@ -104,7 +104,7 @@ fi if [[ ! $local_branch =~ $valid_branch_regex ]] then - printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. + printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. Your commit will be rejected. You should rename your branch to a valid name(feat/name OR bug/name) and try again." printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694" exit 1 diff --git a/scripts/githooks/pre-push.sh b/scripts/githooks/pre-push.sh index e341cf4f2f..9bd9389158 100644 --- a/scripts/githooks/pre-push.sh +++ b/scripts/githooks/pre-push.sh @@ -25,20 +25,20 @@ local_branch="$(git rev-parse --abbrev-ref HEAD)" valid_branch_regex="^(main|master|develop|release(-[a-zA-Z0-9._-]+)?)$|(feature|feat|openim|hotfix|test|bug|ci|cicd|style|)\/[a-z0-9._-]+$|^HEAD$" printMessage() { - printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" + printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" } printSuccess() { - printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" + printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" } printError() { - printf "${RED}OpenIM : $1${ENDCOLOR}\n" + printf "${RED}OpenIM : $1${ENDCOLOR}\n" } printMessage "Running local OpenIM pre-push hook." -if [[ `git status --porcelain` ]]; then +if [[ $(git status --porcelain) ]]; then printError "This scripts needs to run against committed code only. Please commit or stash you changes." exit 1 fi @@ -101,7 +101,7 @@ print_color "Deleted Files: ${deleted_files}" "${BACKGROUND_GREEN}" if [[ ! $local_branch =~ $valid_branch_regex ]] then - printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. + printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. Your commit will be rejected. You should rename your branch to a valid name(feat/name OR fix/name) and try again." printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694" exit 1 diff --git a/scripts/init-config.sh b/scripts/init-config.sh index c75fc55023..82eefbb54d 100755 --- a/scripts/init-config.sh +++ b/scripts/init-config.sh @@ -31,7 +31,7 @@ readonly ENV_FILE=${ENV_FILE:-"${OPENIM_ROOT}/scripts/install/environment.sh"} # Templates for configuration files declare -A TEMPLATES=( ["${OPENIM_ROOT}/deployments/templates/env-template.yaml"]="${OPENIM_ROOT}/.env" - ["${OPENIM_ROOT}/deployments/templates/openim.yaml"]="${OPENIM_ROOT}/config/config.yaml" + ["${OPENIM_ROOT}/deployments/templates/config.yaml"]="${OPENIM_ROOT}/config/config.yaml" ["${OPENIM_ROOT}/deployments/templates/prometheus.yml"]="${OPENIM_ROOT}/config/prometheus.yml" ["${OPENIM_ROOT}/deployments/templates/alertmanager.yml"]="${OPENIM_ROOT}/config/alertmanager.yml" ) @@ -39,7 +39,7 @@ declare -A TEMPLATES=( # Templates for example files declare -A EXAMPLES=( ["${OPENIM_ROOT}/deployments/templates/env-template.yaml"]="${OPENIM_ROOT}/config/templates/env.template" - ["${OPENIM_ROOT}/deployments/templates/openim.yaml"]="${OPENIM_ROOT}/config/templates/config.yaml.template" + ["${OPENIM_ROOT}/deployments/templates/config.yaml"]="${OPENIM_ROOT}/config/templates/config.yaml.template" ["${OPENIM_ROOT}/deployments/templates/prometheus.yml"]="${OPENIM_ROOT}/config/templates/prometheus.yml.template" ["${OPENIM_ROOT}/deployments/templates/alertmanager.yml"]="${OPENIM_ROOT}/config/templates/alertmanager.yml.template" ) @@ -84,7 +84,7 @@ generate_config_files() { local output_file="${TEMPLATES[$template]}" process_file "$template" "$output_file" true done - + # Handle COPY_TEMPLATES array for template in "${!COPY_TEMPLATES[@]}"; do local output_file="${COPY_TEMPLATES[$template]}" @@ -95,22 +95,25 @@ generate_config_files() { # Function to generate example files generate_example_files() { env_cmd="env -i" + + env_vars["OPENIM_IP"]="127.0.0.1" + env_vars["LOG_STORAGE_LOCATION"]="../../" + for var in "${!env_vars[@]}"; do - env_cmd+=" $var='${env_vars[$var]}'" + env_cmd+=" $var='${env_vars[$var]}'" done - + # Processing EXAMPLES array for template in "${!EXAMPLES[@]}"; do local example_file="${EXAMPLES[$template]}" process_file "$template" "$example_file" true done - + # Processing COPY_EXAMPLES array for template in "${!COPY_EXAMPLES[@]}"; do local example_file="${COPY_EXAMPLES[$template]}" process_file "$template" "$example_file" false done - } # Function to process a single file, either by generating or copying @@ -118,11 +121,11 @@ process_file() { local template=$1 local output_file=$2 local use_genconfig=$3 - + if [[ -f "${output_file}" ]]; then if [[ "${FORCE_OVERWRITE}" == true ]]; then openim::log::info "Force overwriting ${output_file}." - elif [[ "${SKIP_EXISTING}" == true ]]; then + elif [[ "${SKIP_EXISTING}" == true ]]; then openim::log::info "Skipping generation of ${output_file} as it already exists." return else @@ -139,7 +142,7 @@ process_file() { openim::log::info "Generating ${output_file} as it does not exist." fi fi - + if [[ "$use_genconfig" == true ]]; then openim::log::info "⌚ Working with template file: ${template} to generate ${output_file}..." if [[ ! -f "${OPENIM_ROOT}/scripts/genconfig.sh" ]]; then @@ -147,15 +150,15 @@ process_file() { exit 1 fi if [[ -n "${env_cmd}" ]]; then - eval "$env_cmd ${OPENIM_ROOT}/scripts/genconfig.sh '${ENV_FILE}' '${template}' > '${output_file}'" || { + eval "$env_cmd ${OPENIM_ROOT}/scripts/genconfig.sh '${ENV_FILE}' '${template}' > '${output_file}'" || { openim::log::error "Error processing template file ${template}" exit 1 - } + } else - "${OPENIM_ROOT}/scripts/genconfig.sh" "${ENV_FILE}" "${template}" > "${output_file}" || { + "${OPENIM_ROOT}/scripts/genconfig.sh" "${ENV_FILE}" "${template}" > "${output_file}" || { openim::log::error "Error processing template file ${template}" exit 1 - } + } fi else openim::log::info "📋 Copying ${template} to ${output_file}..." @@ -164,7 +167,7 @@ process_file() { exit 1 } fi - + sleep 0.5 } @@ -181,7 +184,6 @@ clean_config_files() { # Function to clean example files clean_example_files() { - # 合并 EXAMPLES 和 COPY_EXAMPLES 数组 local all_examples=("${EXAMPLES[@]}" "${COPY_EXAMPLES[@]}") for example_file in "${all_examples[@]}"; do @@ -197,32 +199,32 @@ while [[ $# -gt 0 ]]; do -h|--help) show_help exit 0 - ;; + ;; --force) FORCE_OVERWRITE=true shift - ;; + ;; --skip) SKIP_EXISTING=true shift - ;; + ;; --examples) GENERATE_EXAMPLES=true shift - ;; + ;; --clean-config) CLEAN_CONFIG=true shift - ;; + ;; --clean-examples) CLEAN_EXAMPLES=true shift - ;; + ;; *) echo "Unknown option: $1" show_help exit 1 - ;; + ;; esac done diff --git a/scripts/init-env.sh b/scripts/init-env.sh index ca0c471add..75b871b088 100755 --- a/scripts/init-env.sh +++ b/scripts/init-env.sh @@ -25,9 +25,9 @@ source "${OPENIM_ROOT}/scripts/install/common.sh" openim::log::info "\n# Begin Install OpenIM Config" for file in "${OPENIM_SERVER_TARGETS[@]}"; do - VARNAME="$(echo $file | tr '[:lower:]' '[:upper:]' | tr '.' '_' | tr '-' '_')" - VARVALUE="$OPENIM_OUTPUT_HOSTBIN/$file" - # /etc/profile.d/openim-env.sh - echo "export $VARNAME=$VARVALUE" > /etc/profile.d/openim-env.sh - source /etc/profile.d/openim-env.sh + VARNAME="$(echo $file | tr '[:lower:]' '[:upper:]' | tr '.' '_' | tr '-' '_')" + VARVALUE="$OPENIM_OUTPUT_HOSTBIN/$file" + # /etc/profile.d/openim-env.sh + echo "export $VARNAME=$VARVALUE" > /etc/profile.d/openim-env.sh + source /etc/profile.d/openim-env.sh done diff --git a/scripts/init-githooks.sh b/scripts/init-githooks.sh index 399054bb82..4ee470742e 100755 --- a/scripts/init-githooks.sh +++ b/scripts/init-githooks.sh @@ -39,62 +39,62 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. HOOKS_DIR="${OPENIM_ROOT}/.git/hooks" help_info() { - echo "Usage: $0 [options]" - echo - echo "This script helps to manage git hooks." - echo - echo "Options:" - echo " -h, --help Show this help message and exit." - echo " -d, --delete Delete the hooks that have been added." - echo " By default, it will prompt to enable git hooks." + echo "Usage: $0 [options]" + echo + echo "This script helps to manage git hooks." + echo + echo "Options:" + echo " -h, --help Show this help message and exit." + echo " -d, --delete Delete the hooks that have been added." + echo " By default, it will prompt to enable git hooks." } delete_hooks() { - for file in ${OPENIM_ROOT}/scripts/githooks/*.sh; do - hook_name=$(basename "$file" .sh) # This removes the .sh extension - rm -f "$HOOKS_DIR/$hook_name" - done - echo "Git hooks have been deleted." + for file in "${OPENIM_ROOT}"/scripts/githooks/*.sh; do + hook_name=$(basename "$file" .sh) # This removes the .sh extension + rm -f "$HOOKS_DIR/$hook_name" + done + echo "Git hooks have been deleted." } enable_hooks() { - echo "Would you like to:" - echo "1) Enable git hooks mode" - echo "2) Delete existing git hooks" - echo "Please select a number (or any other key to exit):" - read -r choice - - case "$choice" in - 1) - for file in ${OPENIM_ROOT}/scripts/githooks/*.sh; do - hook_name=$(basename "$file" .sh) # This removes the .sh extension - cp -f "$file" "$HOOKS_DIR/$hook_name" - done - - chmod +x $HOOKS_DIR/* - - echo "Git hooks mode has been enabled." - echo "With git hooks enabled, every time you perform a git action (e.g. git commit), the corresponding hooks script will be triggered automatically." - echo "This means that if the size of the file you're committing exceeds the set limit (e.g. 42MB), the commit will be rejected." - ;; - 2) - delete_hooks - ;; - *) - echo "Exiting without making changes." - ;; - esac + echo "Would you like to:" + echo "1) Enable git hooks mode" + echo "2) Delete existing git hooks" + echo "Please select a number (or any other key to exit):" + read -r choice + + case "$choice" in + 1) + for file in ${OPENIM_ROOT}/scripts/githooks/*.sh; do + hook_name=$(basename "$file" .sh) # This removes the .sh extension + cp -f "$file" "$HOOKS_DIR/$hook_name" + done + + chmod +x $HOOKS_DIR/* + + echo "Git hooks mode has been enabled." + echo "With git hooks enabled, every time you perform a git action (e.g. git commit), the corresponding hooks script will be triggered automatically." + echo "This means that if the size of the file you're committing exceeds the set limit (e.g. 42MB), the commit will be rejected." + ;; + 2) + delete_hooks + ;; + *) + echo "Exiting without making changes." + ;; + esac } case "$1" in - -h|--help) - help_info - ;; - -d|--delete) - delete_hooks - ;; - *) - enable_hooks - ;; + -h|--help) + help_info + ;; + -d|--delete) + delete_hooks + ;; + *) + enable_hooks + ;; esac diff --git a/scripts/install-im-server.sh b/scripts/install-im-server.sh index 9afbb97c91..9588032d7b 100755 --- a/scripts/install-im-server.sh +++ b/scripts/install-im-server.sh @@ -49,10 +49,10 @@ DOCKER_COMPOSE_COMMAND= # Check if docker-compose command is available openim::util::check_docker_and_compose_versions if command -v docker compose &> /dev/null; then - openim::log::info "docker compose command is available" - DOCKER_COMPOSE_COMMAND="docker compose" + openim::log::info "docker compose command is available" + DOCKER_COMPOSE_COMMAND="docker compose" else - DOCKER_COMPOSE_COMMAND="docker-compose" + DOCKER_COMPOSE_COMMAND="docker-compose" fi export SERVER_IMAGE_VERSION @@ -67,12 +67,12 @@ ${DOCKER_COMPOSE_COMMAND} up -d # Function to check container status check_containers() { - if ! ${DOCKER_COMPOSE_COMMAND} ps | grep -q 'Up'; then - echo "Error: One or more docker containers failed to start." - ${DOCKER_COMPOSE_COMMAND} logs - return 1 - fi - return 0 + if ! ${DOCKER_COMPOSE_COMMAND} ps | grep -q 'Up'; then + echo "Error: One or more docker containers failed to start." + ${DOCKER_COMPOSE_COMMAND} logs + return 1 + fi + return 0 } # Wait for a short period to allow containers to initialize diff --git a/scripts/install/dependency.sh b/scripts/install/dependency.sh index 78995bcf96..e7c7eb426f 100755 --- a/scripts/install/dependency.sh +++ b/scripts/install/dependency.sh @@ -24,66 +24,66 @@ OPENIM_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd -P) # Start MongoDB service docker run -d \ - --name mongo \ - -p 37017:27017 \ - -v "${DATA_DIR}/components/mongodb/data/db:/data/db" \ - -v "${DATA_DIR}/components/mongodb/data/logs:/data/logs" \ - -v "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo" \ - -v "./scripts/mongo-init.sh:/docker-entrypoint-initdb.d/mongo-init.sh:ro" \ - -e TZ=Asia/Shanghai \ - -e wiredTigerCacheSizeGB=1 \ - -e MONGO_INITDB_ROOT_USERNAME=${OPENIM_USER} \ - -e MONGO_INITDB_ROOT_PASSWORD=${PASSWORD} \ - -e MONGO_INITDB_DATABASE=openIM \ - -e MONGO_OPENIM_USERNAME=${OPENIM_USER} \ - -e MONGO_OPENIM_PASSWORD=${PASSWORD} \ - --restart always \ - mongo:6.0.2 --wiredTigerCacheSizeGB 1 --auth +--name mongo \ +-p 37017:27017 \ +-v "${DATA_DIR}/components/mongodb/data/db:/data/db" \ +-v "${DATA_DIR}/components/mongodb/data/logs:/data/logs" \ +-v "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo" \ +-v "./scripts/mongo-init.sh:/docker-entrypoint-initdb.d/mongo-init.sh:ro" \ +-e TZ=Asia/Shanghai \ +-e wiredTigerCacheSizeGB=1 \ +-e MONGO_INITDB_ROOT_USERNAME=${OPENIM_USER} \ +-e MONGO_INITDB_ROOT_PASSWORD=${PASSWORD} \ +-e MONGO_INITDB_DATABASE=openim_v3 \ +-e MONGO_OPENIM_USERNAME=${OPENIM_USER} \ +-e MONGO_OPENIM_PASSWORD=${PASSWORD} \ +--restart always \ +mongo:6.0.2 --wiredTigerCacheSizeGB 1 --auth # Start Redis service docker run -d \ - --name redis \ - -p 16379:6379 \ - -v "${DATA_DIR}/components/redis/data:/data" \ - -v "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf" \ - -e TZ=Asia/Shanghai \ - --sysctl net.core.somaxconn=1024 \ - --restart always \ - redis:7.0.0 redis-server --requirepass ${PASSWORD} --appendonly yes +--name redis \ +-p 16379:6379 \ +-v "${DATA_DIR}/components/redis/data:/data" \ +-v "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf" \ +-e TZ=Asia/Shanghai \ +--sysctl net.core.somaxconn=1024 \ +--restart always \ +redis:7.0.0 redis-server --requirepass ${PASSWORD} --appendonly yes # Start Zookeeper service docker run -d \ - --name zookeeper \ - -p 2181:2181 \ - -v "/etc/localtime:/etc/localtime" \ - -e TZ=Asia/Shanghai \ - --restart always \ - wurstmeister/zookeeper +--name zookeeper \ +-p 2181:2181 \ +-v "/etc/localtime:/etc/localtime" \ +-e TZ=Asia/Shanghai \ +--restart always \ +wurstmeister/zookeeper # Start Kafka service docker run -d \ - --name kafka \ - -p 9092:9092 \ - -e TZ=Asia/Shanghai \ - -e KAFKA_BROKER_ID=0 \ - -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \ - -e KAFKA_CREATE_TOPICS="latestMsgToRedis:8:1,msgToPush:8:1,offlineMsgToMongoMysql:8:1" \ - -e KAFKA_ADVERTISED_LISTENERS="INSIDE://127.0.0.1:9092,OUTSIDE://103.116.45.174:9092" \ - -e KAFKA_LISTENERS="INSIDE://:9092,OUTSIDE://:9093" \ - -e KAFKA_LISTENER_SECURITY_PROTOCOL_MAP="INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT" \ - -e KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE \ - --restart always \ - --link zookeeper \ - wurstmeister/kafka +--name kafka \ +-p 9092:9092 \ +-e TZ=Asia/Shanghai \ +-e KAFKA_BROKER_ID=0 \ +-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \ +-e KAFKA_CREATE_TOPICS="latestMsgToRedis:8:1,msgToPush:8:1,offlineMsgToMongoMysql:8:1" \ +-e KAFKA_ADVERTISED_LISTENERS="INSIDE://127.0.0.1:9092,OUTSIDE://103.116.45.174:9092" \ +-e KAFKA_LISTENERS="INSIDE://:9092,OUTSIDE://:9093" \ +-e KAFKA_LISTENER_SECURITY_PROTOCOL_MAP="INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT" \ +-e KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE \ +--restart always \ +--link zookeeper \ +wurstmeister/kafka # Start MinIO service docker run -d \ - --name minio \ - -p 10005:9000 \ - -p 9090:9090 \ - -v "/mnt/data:/data" \ - -v "/mnt/config:/root/.minio" \ - -e MINIO_ROOT_USER=${OPENIM_USER} \ - -e MINIO_ROOT_PASSWORD=${PASSWORD} \ - --restart always \ - minio/minio server /data --console-address ':9090' +--name minio \ +-p 10005:9000 \ +-p 9090:9090 \ +-v "/mnt/data:/data" \ +-v "/mnt/config:/root/.minio" \ +-e MINIO_ROOT_USER=${OPENIM_USER} \ +-e MINIO_ROOT_PASSWORD=${PASSWORD} \ +--restart always \ +minio/minio server /data --console-address ':9090' diff --git a/scripts/install/environment.sh b/scripts/install/environment.sh index 41a70c64d8..aeb4fcc364 100755 --- a/scripts/install/environment.sh +++ b/scripts/install/environment.sh @@ -28,7 +28,7 @@ source "${OPENIM_ROOT}/scripts/lib/init.sh" #TODO: Access to the OPENIM_IP networks outside, or you want to use the OPENIM_IP network # OPENIM_IP=127.0.0.1 if [ -z "${OPENIM_IP}" ]; then - OPENIM_IP=$(openim::util::get_server_ip) + OPENIM_IP=$(openim::util::get_server_ip) fi # config.gateway custom bridge modes @@ -37,9 +37,9 @@ fi # fi function def() { - local var_name="$1" - local default_value="${2:-}" - eval "readonly $var_name=\"\${$var_name:-$(printf '%q' "$default_value")}\"" + local var_name="$1" + local default_value="${2:-}" + eval "readonly $var_name=\"\${$var_name:-$(printf '%q' "$default_value")}\"" } # OpenIM Docker Compose 数据存储的默认路径 @@ -52,7 +52,7 @@ def "OPENIM_USER" "root" readonly PASSWORD=${PASSWORD:-'openIM123'} # 设置统一的数据库名称,方便管理 -def "DATABASE_NAME" "openIM_v3" +def "DATABASE_NAME" "openim_v3" # Linux系统 openim 用户 def "LINUX_USERNAME" "openim" @@ -89,8 +89,8 @@ SUBNET=$(echo $DOCKER_BRIDGE_SUBNET | cut -d '/' -f 2) LAST_OCTET=$(echo $IP_PREFIX | cut -d '.' -f 4) generate_ip() { - local NEW_IP="$(echo $IP_PREFIX | cut -d '.' -f 1-3).$((LAST_OCTET++))" - echo $NEW_IP + local NEW_IP="$(echo $IP_PREFIX | cut -d '.' -f 1-3).$((LAST_OCTET++))" + echo $NEW_IP } LAST_OCTET=$((LAST_OCTET + 1)) DOCKER_BRIDGE_GATEWAY=$(generate_ip) diff --git a/scripts/install/install-protobuf.sh b/scripts/install/install-protobuf.sh index 33ceaeb0df..838b390b50 100755 --- a/scripts/install/install-protobuf.sh +++ b/scripts/install/install-protobuf.sh @@ -21,17 +21,17 @@ # This tool is customized to meet the specific needs of OpenIM and resides in its separate repository. # It can be downloaded from the following link: # https://github.com/OpenIMSDK/Open-IM-Protoc/releases/tag/v1.0.0 -# +# # About the tool: # https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/protoc-tools.md # Download link (Windows): https://github.com/OpenIMSDK/Open-IM-Protoc/releases/download/v1.0.0/windows.zip # Download link (Linux): https://github.com/OpenIMSDK/Open-IM-Protoc/releases/download/v1.0.0/linux.zip -# +# # Installation steps (taking Windows as an example): # 1. Visit the above link and download the version suitable for Windows. # 2. Extract the downloaded file. # 3. Add the extracted tool to your PATH environment variable so that it can be run directly from the command line. -# +# # Note: The specific installation and usage instructions may vary based on the tool's actual implementation. It's advised to refer to official documentation. # -------------------------------------------------------------- @@ -40,79 +40,79 @@ DOWNLOAD_DIR="/tmp/openim-protoc" INSTALL_DIR="/usr/local/bin" function help_message { - echo "Usage: ./install-protobuf.sh [option]" - echo "Options:" - echo "-i, --install Install the OpenIM Protoc tool." - echo "-u, --uninstall Uninstall the OpenIM Protoc tool." - echo "-r, --reinstall Reinstall the OpenIM Protoc tool." - echo "-c, --check Check if the OpenIM Protoc tool is installed." - echo "-h, --help Display this help message." + echo "Usage: ./install-protobuf.sh [option]" + echo "Options:" + echo "-i, --install Install the OpenIM Protoc tool." + echo "-u, --uninstall Uninstall the OpenIM Protoc tool." + echo "-r, --reinstall Reinstall the OpenIM Protoc tool." + echo "-c, --check Check if the OpenIM Protoc tool is installed." + echo "-h, --help Display this help message." } function install_protobuf { - echo "Installing OpenIM Protoc tool..." - - # Create temporary directory and download the zip file - mkdir -p $DOWNLOAD_DIR - wget $PROTOC_DOWNLOAD_URL -O $DOWNLOAD_DIR/linux.zip - - # Unzip the file - unzip -o $DOWNLOAD_DIR/linux.zip -d $DOWNLOAD_DIR - - # Move binaries to the install directory and make them executable - sudo cp $DOWNLOAD_DIR/linux/protoc $INSTALL_DIR/ - sudo cp $DOWNLOAD_DIR/linux/protoc-gen-go $INSTALL_DIR/ - sudo chmod +x $INSTALL_DIR/protoc - sudo chmod +x $INSTALL_DIR/protoc-gen-go - - # Clean up - rm -rf $DOWNLOAD_DIR - - echo "OpenIM Protoc tool installed successfully!" + echo "Installing OpenIM Protoc tool..." + + # Create temporary directory and download the zip file + mkdir -p $DOWNLOAD_DIR + wget $PROTOC_DOWNLOAD_URL -O $DOWNLOAD_DIR/linux.zip + + # Unzip the file + unzip -o $DOWNLOAD_DIR/linux.zip -d $DOWNLOAD_DIR + + # Move binaries to the install directory and make them executable + sudo cp $DOWNLOAD_DIR/linux/protoc $INSTALL_DIR/ + sudo cp $DOWNLOAD_DIR/linux/protoc-gen-go $INSTALL_DIR/ + sudo chmod +x $INSTALL_DIR/protoc + sudo chmod +x $INSTALL_DIR/protoc-gen-go + + # Clean up + rm -rf $DOWNLOAD_DIR + + echo "OpenIM Protoc tool installed successfully!" } function uninstall_protobuf { - echo "Uninstalling OpenIM Protoc tool..." - - # Removing binaries from the install directory - sudo rm -f $INSTALL_DIR/protoc - sudo rm -f $INSTALL_DIR/protoc-gen-go - - echo "OpenIM Protoc tool uninstalled successfully!" + echo "Uninstalling OpenIM Protoc tool..." + + # Removing binaries from the install directory + sudo rm -f $INSTALL_DIR/protoc + sudo rm -f $INSTALL_DIR/protoc-gen-go + + echo "OpenIM Protoc tool uninstalled successfully!" } function reinstall_protobuf { - echo "Reinstalling OpenIM Protoc tool..." - uninstall_protobuf - install_protobuf + echo "Reinstalling OpenIM Protoc tool..." + uninstall_protobuf + install_protobuf } function check_protobuf { - echo "Checking for OpenIM Protoc tool installation..." - - which protoc > /dev/null 2>&1 - if [ $? -eq 0 ]; then - echo "OpenIM Protoc tool is installed." - else - echo "OpenIM Protoc tool is not installed." - fi + echo "Checking for OpenIM Protoc tool installation..." + + which protoc > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "OpenIM Protoc tool is installed." + else + echo "OpenIM Protoc tool is not installed." + fi } while [ "$1" != "" ]; do - case $1 in - -i | --install ) install_protobuf - ;; - -u | --uninstall ) uninstall_protobuf - ;; - -r | --reinstall ) reinstall_protobuf - ;; - -c | --check ) check_protobuf - ;; - -h | --help ) help_message - exit - ;; - * ) help_message - exit 1 - esac - shift + case $1 in + -i | --install ) install_protobuf + ;; + -u | --uninstall ) uninstall_protobuf + ;; + -r | --reinstall ) reinstall_protobuf + ;; + -c | --check ) check_protobuf + ;; + -h | --help ) help_message + exit + ;; + * ) help_message + exit 1 + esac + shift done diff --git a/scripts/install/install.sh b/scripts/install/install.sh index b88fe90831..d5ec5b7f77 100755 --- a/scripts/install/install.sh +++ b/scripts/install/install.sh @@ -14,38 +14,38 @@ # limitations under the License. # # OpenIM Server Installation Script -# +# # Description: -# This script is designed to handle the installation, Is a deployment solution +# This script is designed to handle the installation, Is a deployment solution # that uses the Linux systen extension. uninstallation, and # status checking of OpenIM components on the server. OpenIM is a presumed # communication or messaging platform based on the context. -# +# # Usage: -# To utilize this script, you need to invoke it with specific commands +# To utilize this script, you need to invoke it with specific commands # and options as detailed below. -# +# # Commands: -# -i, --install : Use this command to initiate the installation of all +# -i, --install : Use this command to initiate the installation of all # OpenIM components. -# -u, --uninstall : Use this command to uninstall or remove all +# -u, --uninstall : Use this command to uninstall or remove all # OpenIM components from the server. -# -s, --status : This command can be used to check and report the +# -s, --status : This command can be used to check and report the # current operational status of the installed OpenIM components. # -h, --help : For any assistance or to view the available commands, # use this command to display the help menu. -# +# # Example Usage: # To install all OpenIM components: -# ./scripts/install/install.sh -i -# or -# ./scripts/install/install.sh --install -# +# ./scripts/install/install.sh -i +# or +# ./scripts/install/install.sh --install +# # Note: # Ensure you have the necessary privileges to execute installation or -# uninstallation operations. It's generally recommended to take a backup +# uninstallation operations. It's generally recommended to take a backup # before making major changes. -# +# ############################################################################### OPENIM_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd -P) @@ -57,99 +57,99 @@ ${OPENIM_ROOT}/scripts/install/test.sh # Detailed help function function openim::install::show_help() { - echo "OpenIM Installer" - echo "Usage: $0 [options]" - echo "" - echo "Commands:" - echo " -i, --install Install all OpenIM components." - echo " -u, --uninstall Remove all OpenIM components." - echo " -s, --status Check the current status of OpenIM components." - echo " -h, --help Show this help menu." - echo "" - echo "Example: " - echo " $0 -i Will install all OpenIM components." - echo " $0 --install Same as above." + echo "OpenIM Installer" + echo "Usage: $0 [options]" + echo "" + echo "Commands:" + echo " -i, --install Install all OpenIM components." + echo " -u, --uninstall Remove all OpenIM components." + echo " -s, --status Check the current status of OpenIM components." + echo " -h, --help Show this help menu." + echo "" + echo "Example: " + echo " $0 -i Will install all OpenIM components." + echo " $0 --install Same as above." } function openim::install::install_openim() { - openim::common::sudo "mkdir -p ${OPENIM_DATA_DIR} ${OPENIM_INSTALL_DIR} ${OPENIM_CONFIG_DIR} ${OPENIM_LOG_DIR}" - openim::log::info "check openim dependency" - openim::common::sudo "cp -r ${OPENIM_ROOT}/config/* ${OPENIM_CONFIG_DIR}/" - - ${OPENIM_ROOT}/scripts/genconfig.sh ${ENV_FILE} ${OPENIM_ROOT}/deployments/templates/openim.yaml > ${OPENIM_CONFIG_DIR}/config.yaml - ${OPENIM_ROOT}/scripts/genconfig.sh ${ENV_FILE} ${OPENIM_ROOT}/deployments/templates/prometheus.yml > ${OPENIM_CONFIG_DIR}/prometheus.yml - - openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} - - ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::install || return 1 - ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::install || return 1 - ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::install || return 1 - ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::install || return 1 - ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::install || return 1 - ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::install || return 1 - - openim::common::sudo "cp -r ${OPENIM_ROOT}/deployments/templates/openim.target /etc/systemd/system/openim.target" - openim::common::sudo "systemctl daemon-reload" - openim::common::sudo "systemctl restart openim.target" - openim::common::sudo "systemctl enable openim.target" - openim::log::success "openim install success" + openim::common::sudo "mkdir -p ${OPENIM_DATA_DIR} ${OPENIM_INSTALL_DIR} ${OPENIM_CONFIG_DIR} ${OPENIM_LOG_DIR}" + openim::log::info "check openim dependency" + openim::common::sudo "cp -r ${OPENIM_ROOT}/config/* ${OPENIM_CONFIG_DIR}/" + + ${OPENIM_ROOT}/scripts/genconfig.sh ${ENV_FILE} ${OPENIM_ROOT}/deployments/templates/config.yaml > ${OPENIM_CONFIG_DIR}/config.yaml + ${OPENIM_ROOT}/scripts/genconfig.sh ${ENV_FILE} ${OPENIM_ROOT}/deployments/templates/prometheus.yml > ${OPENIM_CONFIG_DIR}/prometheus.yml + + openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} + + ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::install || return 1 + ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::install || return 1 + ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::install || return 1 + ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::install || return 1 + ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::install || return 1 + ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::install || return 1 + + openim::common::sudo "cp -r ${OPENIM_ROOT}/deployments/templates/openim.target /etc/systemd/system/openim.target" + openim::common::sudo "systemctl daemon-reload" + openim::common::sudo "systemctl restart openim.target" + openim::common::sudo "systemctl enable openim.target" + openim::log::success "openim install success" } function openim::uninstall::uninstall_openim() { - openim::log::info "uninstall openim" - - ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::uninstall || return 1 - ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::uninstall || return 1 - ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::uninstall || return 1 - ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::uninstall || return 1 - ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::uninstall || return 1 - ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::uninstall || return 1 - - set +o errexit - openim::common::sudo "systemctl stop openim.target" - openim::common::sudo "systemctl disable openim.target" - openim::common::sudo "rm -f /etc/systemd/system/openim.target" - set -o errexit - openim::log::success "openim uninstall success" + openim::log::info "uninstall openim" + + ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::uninstall || return 1 + ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::uninstall || return 1 + ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::uninstall || return 1 + ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::uninstall || return 1 + ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::uninstall || return 1 + ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::uninstall || return 1 + + set +o errexit + openim::common::sudo "systemctl stop openim.target" + openim::common::sudo "systemctl disable openim.target" + openim::common::sudo "rm -f /etc/systemd/system/openim.target" + set -o errexit + openim::log::success "openim uninstall success" } function openim::install::status() { - openim::log::info "check openim status" - - ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::status || return 1 - ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::status || return 1 - ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::status || return 1 - ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::status || return 1 - ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::status || return 1 - ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::status || return 1 - - openim::log::success "openim status success" + openim::log::info "check openim status" + + ${OPENIM_ROOT}/scripts/install/openim-msggateway.sh openim::msggateway::status || return 1 + ${OPENIM_ROOT}/scripts/install/openim-msgtransfer.sh openim::msgtransfer::status || return 1 + ${OPENIM_ROOT}/scripts/install/openim-push.sh openim::push::status || return 1 + ${OPENIM_ROOT}/scripts/install/openim-crontask.sh openim::crontask::status || return 1 + ${OPENIM_ROOT}/scripts/install/openim-rpc.sh openim::rpc::status || return 1 + ${OPENIM_ROOT}/scripts/install/openim-api.sh openim::api::status || return 1 + + openim::log::success "openim status success" } # If no arguments are provided, show help if [[ $# -eq 0 ]]; then - openim::install::show_help - exit 0 + openim::install::show_help + exit 0 fi # Argument parsing to call functions based on user input while (( "$#" )); do - case "$1" in - -i|--install) - openim::install::install_openim - shift - ;; - -u|--uninstall) - openim::uninstall::uninstall_openim - shift - ;; - -s|--status) - openim::install::status - shift - ;; - -h|--help|*) - openim::install::show_help - exit 0 - ;; - esac + case "$1" in + -i|--install) + openim::install::install_openim + shift + ;; + -u|--uninstall) + openim::uninstall::uninstall_openim + shift + ;; + -s|--status) + openim::install::status + shift + ;; + -h|--help|*) + openim::install::show_help + exit 0 + ;; + esac done \ No newline at end of file diff --git a/scripts/install/openim-api.sh b/scripts/install/openim-api.sh index 9f66d0ba06..2c3c19afb5 100755 --- a/scripts/install/openim-api.sh +++ b/scripts/install/openim-api.sh @@ -34,55 +34,55 @@ readonly OPENIM_API_SERVICE_TARGETS=( readonly OPENIM_API_SERVICE_LISTARIES=("${OPENIM_API_SERVICE_TARGETS[@]##*/}") function openim::api::start() { - echo "++ OPENIM_API_SERVICE_LISTARIES: ${OPENIM_API_SERVICE_LISTARIES[@]}" - echo "++ OPENIM_API_PORT_LISTARIES: ${OPENIM_API_PORT_LISTARIES[@]}" - echo "++ OpenIM API config path: ${OPENIM_API_CONFIG}" - - openim::log::info "Starting ${SERVER_NAME} ..." - - printf "+------------------------+--------------+\n" - printf "| Service Name | Port |\n" - printf "+------------------------+--------------+\n" - - length=${#OPENIM_API_SERVICE_LISTARIES[@]} - - for ((i=0; i<$length; i++)); do + echo "++ OPENIM_API_SERVICE_LISTARIES: ${OPENIM_API_SERVICE_LISTARIES[@]}" + echo "++ OPENIM_API_PORT_LISTARIES: ${OPENIM_API_PORT_LISTARIES[@]}" + echo "++ OpenIM API config path: ${OPENIM_API_CONFIG}" + + openim::log::info "Starting ${SERVER_NAME} ..." + + printf "+------------------------+--------------+\n" + printf "| Service Name | Port |\n" + printf "+------------------------+--------------+\n" + + length=${#OPENIM_API_SERVICE_LISTARIES[@]} + + for ((i=0; i<$length; i++)); do printf "| %-22s | %6s |\n" "${OPENIM_API_SERVICE_LISTARIES[$i]}" "${OPENIM_API_PORT_LISTARIES[$i]}" printf "+------------------------+--------------+\n" - done - # start all api services - for ((i = 0; i < ${#OPENIM_API_SERVICE_LISTARIES[*]}; i++)); do + done + # start all api services + for ((i = 0; i < ${#OPENIM_API_SERVICE_LISTARIES[*]}; i++)); do openim::util::stop_services_on_ports ${OPENIM_API_PORT_LISTARIES[$i]} openim::log::info "OpenIM ${OPENIM_API_SERVICE_LISTARIES[$i]} config path: ${OPENIM_API_CONFIG}" - + # Get the service and Prometheus ports. OPENIM_API_SERVICE_PORTS=( $(openim::util::list-to-string ${OPENIM_API_PORT_LISTARIES[$i]}) ) - + # TODO Only one port is supported. An error occurs on multiple ports if [ ${#OPENIM_API_SERVICE_PORTS[@]} -ne 1 ]; then - openim::log::error_exit "Set only one port for ${OPENIM_API_SERVICE_LISTARIES[$i]} service." + openim::log::error_exit "Set only one port for ${OPENIM_API_SERVICE_LISTARIES[$i]} service." fi - + for ((j = 0; j < ${#OPENIM_API_SERVICE_PORTS[@]}; j++)); do - openim::log::info "Starting ${OPENIM_API_SERVICE_LISTARIES[$i]} service, port: ${OPENIM_API_SERVICE_PORTS[j]}, binary root: ${OPENIM_OUTPUT_HOSTBIN}/${OPENIM_API_SERVICE_LISTARIES[$i]}" - openim::api::start_service "${OPENIM_API_SERVICE_LISTARIES[$i]}" "${OPENIM_API_PORT_LISTARIES[j]}" - sleep 2 - done + openim::log::info "Starting ${OPENIM_API_SERVICE_LISTARIES[$i]} service, port: ${OPENIM_API_SERVICE_PORTS[j]}, binary root: ${OPENIM_OUTPUT_HOSTBIN}/${OPENIM_API_SERVICE_LISTARIES[$i]}" + openim::api::start_service "${OPENIM_API_SERVICE_LISTARIES[$i]}" "${OPENIM_API_PORT_LISTARIES[j]}" + sleep 2 done - - OPENIM_API_PORT_STRINGARIES=( $(openim::util::list-to-string ${OPENIM_API_PORT_LISTARIES[@]}) ) - openim::util::check_ports ${OPENIM_API_PORT_STRINGARIES[@]} + done + + OPENIM_API_PORT_STRINGARIES=( $(openim::util::list-to-string ${OPENIM_API_PORT_LISTARIES[@]}) ) + openim::util::check_ports ${OPENIM_API_PORT_STRINGARIES[@]} } function openim::api::start_service() { local binary_name="$1" local service_port="$2" local prometheus_port="$3" - + local cmd="${OPENIM_OUTPUT_HOSTBIN}/${binary_name} --port ${service_port} -c ${OPENIM_API_CONFIG}" - + nohup ${cmd} >> "${LOG_FILE}" 2>&1 & - + if [ $? -ne 0 ]; then openim::log::error_exit "Failed to start ${binary_name} on port ${service_port}." fi @@ -100,61 +100,61 @@ EOF # install openim-api function openim::api::install() { - openim::log::info "Installing ${SERVER_NAME} ..." - - pushd "${OPENIM_ROOT}" - - # 1. Build openim-api - make build BINS=${SERVER_NAME} - openim::common::sudo "cp -r ${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME} ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" - openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/${SERVER_NAME}/${SERVER_NAME}" - - # 2. Generate and install the openim-api configuration file (config) - openim::log::status "${SERVER_NAME} config file: ${OPENIM_CONFIG_DIR}/config.yaml" - - # 3. Create and install the ${SERVER_NAME} systemd unit file - echo ${LINUX_PASSWORD} | sudo -S bash -c \ - "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" - openim::log::status "${SERVER_NAME} systemd file: ${SYSTEM_FILE_PATH}" - - # 4. Start the openim-api service - openim::common::sudo "systemctl daemon-reload" - openim::common::sudo "systemctl restart ${SERVER_NAME}" - openim::common::sudo "systemctl enable ${SERVER_NAME}" - openim::api::status || return 1 - openim::api::info - - openim::log::info "install ${SERVER_NAME} successfully" - popd + openim::log::info "Installing ${SERVER_NAME} ..." + + pushd "${OPENIM_ROOT}" + + # 1. Build openim-api + make build BINS=${SERVER_NAME} + openim::common::sudo "cp -r ${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME} ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" + openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/${SERVER_NAME}/${SERVER_NAME}" + + # 2. Generate and install the openim-api configuration file (config) + openim::log::status "${SERVER_NAME} config file: ${OPENIM_CONFIG_DIR}/config.yaml" + + # 3. Create and install the ${SERVER_NAME} systemd unit file + echo ${LINUX_PASSWORD} | sudo -S bash -c \ + "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" + openim::log::status "${SERVER_NAME} systemd file: ${SYSTEM_FILE_PATH}" + + # 4. Start the openim-api service + openim::common::sudo "systemctl daemon-reload" + openim::common::sudo "systemctl restart ${SERVER_NAME}" + openim::common::sudo "systemctl enable ${SERVER_NAME}" + openim::api::status || return 1 + openim::api::info + + openim::log::info "install ${SERVER_NAME} successfully" + popd } # Unload function openim::api::uninstall() { - openim::log::info "Uninstalling ${SERVER_NAME} ..." - - set +o errexit - openim::common::sudo "systemctl stop ${SERVER_NAME}" - openim::common::sudo "systemctl disable ${SERVER_NAME}" - openim::common::sudo "rm -f ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" - openim::common::sudo "rm -f ${OPENIM_CONFIG_DIR}/${SERVER_NAME}.yaml" - openim::common::sudo "rm -f /etc/systemd/system/${SERVER_NAME}.service" - set -o errexit - openim::log::info "uninstall ${SERVER_NAME} successfully" + openim::log::info "Uninstalling ${SERVER_NAME} ..." + + set +o errexit + openim::common::sudo "systemctl stop ${SERVER_NAME}" + openim::common::sudo "systemctl disable ${SERVER_NAME}" + openim::common::sudo "rm -f ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" + openim::common::sudo "rm -f ${OPENIM_CONFIG_DIR}/${SERVER_NAME}.yaml" + openim::common::sudo "rm -f /etc/systemd/system/${SERVER_NAME}.service" + set -o errexit + openim::log::info "uninstall ${SERVER_NAME} successfully" } # Status Check function openim::api::status() { - openim::log::info "Checking ${SERVER_NAME} status ..." - - # Check the running status of the ${SERVER_NAME}. If active (running) is displayed, the ${SERVER_NAME} is started successfully. - systemctl status ${SERVER_NAME}|grep -q 'active' || { - openim::log::error "${SERVER_NAME} failed to start, maybe not installed properly" - return 1 - } - - openim::util::check_ports ${OPENIM_API_PORT_LISTARIES[@]} + openim::log::info "Checking ${SERVER_NAME} status ..." + + # Check the running status of the ${SERVER_NAME}. If active (running) is displayed, the ${SERVER_NAME} is started successfully. + systemctl status ${SERVER_NAME}|grep -q 'active' || { + openim::log::error "${SERVER_NAME} failed to start, maybe not installed properly" + return 1 + } + + openim::util::check_ports ${OPENIM_API_PORT_LISTARIES[@]} } if [[ "$*" =~ openim::api:: ]];then - eval $* + eval $* fi diff --git a/scripts/install/openim-crontask.sh b/scripts/install/openim-crontask.sh index 26dc1a47f8..cc9e686ffe 100755 --- a/scripts/install/openim-crontask.sh +++ b/scripts/install/openim-crontask.sh @@ -13,12 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# +# # OpenIM CronTask Control Script -# +# # Description: # This script provides a control interface for the OpenIM CronTask service within a Linux environment. It supports two installation methods: installation via function calls to systemctl, and direct installation through background processes. -# +# # Features: # 1. Robust error handling leveraging Bash built-ins such as 'errexit', 'nounset', and 'pipefail'. # 2. Capability to source common utility functions and configurations, ensuring environmental consistency. @@ -30,13 +30,13 @@ # 1. Direct Script Execution: # This will start the OpenIM CronTask directly through a background process. # Example: ./openim-crontask.sh openim::crontask::start -# +# # 2. Controlling through Functions for systemctl operations: # Specific operations like installation, uninstallation, and status check can be executed by passing the respective function name as an argument to the script. # Example: ./openim-crontask.sh openim::crontask::install -# +# # Note: Ensure that the appropriate permissions and environmental variables are set prior to script execution. -# +# OPENIM_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd -P) [[ -z ${COMMON_SOURCED} ]] && source "${OPENIM_ROOT}"/scripts/install/common.sh @@ -44,14 +44,14 @@ OPENIM_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd -P) SERVER_NAME="openim-crontask" function openim::crontask::start() { - openim::log::info "Start OpenIM Cron, binary root: ${SERVER_NAME}" - openim::log::status "Start OpenIM Cron, path: ${OPENIM_CRONTASK_BINARY}" - - openim::util::stop_services_with_name ${OPENIM_CRONTASK_BINARY} - - openim::log::status "start cron_task process, path: ${OPENIM_CRONTASK_BINARY}" - nohup ${OPENIM_CRONTASK_BINARY} -c ${OPENIM_PUSH_CONFIG} >> ${LOG_FILE} 2>&1 & - openim::util::check_process_names ${SERVER_NAME} + openim::log::info "Start OpenIM Cron, binary root: ${SERVER_NAME}" + openim::log::status "Start OpenIM Cron, path: ${OPENIM_CRONTASK_BINARY}" + + openim::util::stop_services_with_name ${OPENIM_CRONTASK_BINARY} + + openim::log::status "start cron_task process, path: ${OPENIM_CRONTASK_BINARY}" + nohup ${OPENIM_CRONTASK_BINARY} -c ${OPENIM_PUSH_CONFIG} >> ${LOG_FILE} 2>&1 & + openim::util::check_process_names ${SERVER_NAME} } ###################################### Linux Systemd ###################################### @@ -67,28 +67,28 @@ EOF # install openim-crontask function openim::crontask::install() { pushd "${OPENIM_ROOT}" - + # 1. Build openim-crontask make build BINS=${SERVER_NAME} - + openim::common::sudo "cp -r ${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME} ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/${SERVER_NAME}/${SERVER_NAME}" - + # 2. Generate and install the openim-crontask configuration file (openim-crontask.yaml) openim::log::status "${SERVER_NAME} config file: ${OPENIM_CONFIG_DIR}/config.yaml" - + # 3. Create and install the ${SERVER_NAME} systemd unit file echo ${LINUX_PASSWORD} | sudo -S bash -c \ - "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" + "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" openim::log::status "${SERVER_NAME} systemd file: ${SYSTEM_FILE_PATH}" - + # 4. Start the openim-crontask service openim::common::sudo "systemctl daemon-reload" openim::common::sudo "systemctl restart ${SERVER_NAME}" openim::common::sudo "systemctl enable ${SERVER_NAME}" openim::crontask::status || return 1 openim::crontask::info - + openim::log::info "install ${SERVER_NAME} successfully" popd } diff --git a/scripts/install/openim-man.sh b/scripts/install/openim-man.sh index f23c54a20b..fac5cebea4 100755 --- a/scripts/install/openim-man.sh +++ b/scripts/install/openim-man.sh @@ -17,7 +17,7 @@ # # Description: # This script manages the man pages for the OpenIM software suite. -# It provides facilities to install, uninstall, and verify the +# It provides facilities to install, uninstall, and verify the # installation status of the man pages related to OpenIM components. # # Usage: @@ -26,15 +26,15 @@ # ./openim-man.sh openim::man::status - Check installation status # # Dependencies: -# - Assumes there's a common.sh in "${OPENIM_ROOT}/scripts/install/" +# - Assumes there's a common.sh in "${OPENIM_ROOT}/scripts/install/" # containing shared functions and variables. -# - Relies on the script "${OPENIM_ROOT}/scripts/update-generated-docs.sh" +# - Relies on the script "${OPENIM_ROOT}/scripts/update-generated-docs.sh" # to generate the man pages. # # Notes: -# - This script must be run with appropriate permissions to modify the +# - This script must be run with appropriate permissions to modify the # system man directories. -# - Always ensure you're in the script's directory or provide the correct +# - Always ensure you're in the script's directory or provide the correct # path when executing. ################################################################################ @@ -54,43 +54,43 @@ EOF # Install the man pages for openim function openim::man::install() { - # Navigate to the openim root directory - pushd "${OPENIM_ROOT}" > /dev/null - - # Generate man pages for each component - "${OPENIM_ROOT}/scripts/update-generated-docs.sh" - openim::common::sudo "cp docs/man/man1/* /usr/share/man/man1/" - - # Verify installation status - if openim::man::status; then - openim::log::info "Installed openim-server man page successfully" - openim::man::info - fi - - # Return to the original directory - popd > /dev/null + # Navigate to the openim root directory + pushd "${OPENIM_ROOT}" > /dev/null + + # Generate man pages for each component + "${OPENIM_ROOT}/scripts/update-generated-docs.sh" + openim::common::sudo "cp docs/man/man1/* /usr/share/man/man1/" + + # Verify installation status + if openim::man::status; then + openim::log::info "Installed openim-server man page successfully" + openim::man::info + fi + + # Return to the original directory + popd > /dev/null } # Uninstall the man pages for openim function openim::man::uninstall() { - # Turn off exit-on-error temporarily to handle non-existing files gracefully - set +o errexit - openim::common::sudo "rm -f /usr/share/man/man1/openim-*" - set -o errexit - - openim::log::info "Uninstalled openim man pages successfully" + # Turn off exit-on-error temporarily to handle non-existing files gracefully + set +o errexit + openim::common::sudo "rm -f /usr/share/man/man1/openim-*" + set -o errexit + + openim::log::info "Uninstalled openim man pages successfully" } # Check the installation status of the man pages function openim::man::status() { - if ! ls /usr/share/man/man1/openim-* &> /dev/null; then - openim::log::error "OpenIM man files not found. Perhaps they were not installed correctly." - return 1 - fi - return 0 + if ! ls /usr/share/man/man1/openim-* &> /dev/null; then + openim::log::error "OpenIM man files not found. Perhaps they were not installed correctly." + return 1 + fi + return 0 } # Execute the appropriate function based on the given arguments if [[ "$*" =~ openim::man:: ]]; then - eval "$*" + eval "$*" fi diff --git a/scripts/install/openim-msggateway.sh b/scripts/install/openim-msggateway.sh index 2b2a84b12a..d9fec4928c 100755 --- a/scripts/install/openim-msggateway.sh +++ b/scripts/install/openim-msggateway.sh @@ -26,19 +26,19 @@ openim::util::set_max_fd 200000 SERVER_NAME="openim-msggateway" function openim::msggateway::start() { - openim::log::info "Start OpenIM Msggateway, binary root: ${SERVER_NAME}" - openim::log::status "Start OpenIM Msggateway, path: ${OPENIM_MSGGATEWAY_BINARY}" - - openim::util::stop_services_with_name ${OPENIM_MSGGATEWAY_BINARY} - - # OpenIM message gateway service port - OPENIM_MESSAGE_GATEWAY_PORTS=$(openim::util::list-to-string ${OPENIM_MESSAGE_GATEWAY_PORT} ) + openim::log::info "Start OpenIM Msggateway, binary root: ${SERVER_NAME}" + openim::log::status "Start OpenIM Msggateway, path: ${OPENIM_MSGGATEWAY_BINARY}" + + openim::util::stop_services_with_name ${OPENIM_MSGGATEWAY_BINARY} + + # OpenIM message gateway service port + OPENIM_MESSAGE_GATEWAY_PORTS=$(openim::util::list-to-string ${OPENIM_MESSAGE_GATEWAY_PORT} ) read -a OPENIM_MSGGATEWAY_PORTS_ARRAY <<< ${OPENIM_MESSAGE_GATEWAY_PORTS} openim::util::stop_services_on_ports ${OPENIM_MSGGATEWAY_PORTS_ARRAY[*]} # OpenIM WS port OPENIM_WS_PORTS=$(openim::util::list-to-string ${OPENIM_WS_PORT} ) read -a OPENIM_WS_PORTS_ARRAY <<< ${OPENIM_WS_PORTS} - + # Message Gateway Prometheus port of the service MSG_GATEWAY_PROM_PORTS=$(openim::util::list-to-string ${MSG_GATEWAY_PROM_PORT} ) read -a MSG_GATEWAY_PROM_PORTS_ARRAY <<< ${MSG_GATEWAY_PROM_PORTS} @@ -123,7 +123,7 @@ function openim::msggateway::status() { # Check the running status of the ${SERVER_NAME}. If active (running) is displayed, the ${SERVER_NAME} is started successfully. systemctl status ${SERVER_NAME}|grep -q 'active' || { openim::log::error "${SERVER_NAME} failed to start, maybe not installed properly" - + return 1 } diff --git a/scripts/install/openim-msgtransfer.sh b/scripts/install/openim-msgtransfer.sh index f6039637c9..1cead3a9af 100755 --- a/scripts/install/openim-msgtransfer.sh +++ b/scripts/install/openim-msgtransfer.sh @@ -28,59 +28,59 @@ openim::util::set_max_fd 200000 SERVER_NAME="openim-msgtransfer" function openim::msgtransfer::start() { - openim::log::info "Start OpenIM Msggateway, binary root: ${SERVER_NAME}" - openim::log::status "Start OpenIM Msggateway, path: ${OPENIM_MSGTRANSFER_BINARY}" - - openim::util::stop_services_with_name ${OPENIM_MSGTRANSFER_BINARY} - - # Message Transfer Prometheus port list - MSG_TRANSFER_PROM_PORTS=(openim::util::list-to-string ${MSG_TRANSFER_PROM_PORT} ) - - openim::log::status "OpenIM Prometheus ports: ${MSG_TRANSFER_PROM_PORTS[*]}" - - openim::log::status "OpenIM Msggateway config path: ${OPENIM_MSGTRANSFER_CONFIG}" - - openim::log::info "openim maggateway num: ${OPENIM_MSGGATEWAY_NUM}" - - if [ "${OPENIM_MSGGATEWAY_NUM}" -lt 1 ]; then + openim::log::info "Start OpenIM Msggateway, binary root: ${SERVER_NAME}" + openim::log::status "Start OpenIM Msggateway, path: ${OPENIM_MSGTRANSFER_BINARY}" + + openim::util::stop_services_with_name ${OPENIM_MSGTRANSFER_BINARY} + + # Message Transfer Prometheus port list + MSG_TRANSFER_PROM_PORTS=(openim::util::list-to-string ${MSG_TRANSFER_PROM_PORT} ) + + openim::log::status "OpenIM Prometheus ports: ${MSG_TRANSFER_PROM_PORTS[*]}" + + openim::log::status "OpenIM Msggateway config path: ${OPENIM_MSGTRANSFER_CONFIG}" + + openim::log::info "openim maggateway num: ${OPENIM_MSGGATEWAY_NUM}" + + if [ "${OPENIM_MSGGATEWAY_NUM}" -lt 1 ]; then opeim::log::error_exit "OPENIM_MSGGATEWAY_NUM must be greater than 0" - fi - - if [ ${OPENIM_MSGGATEWAY_NUM} -ne $((${#MSG_TRANSFER_PROM_PORTS[@]} - 1)) ]; then + fi + + if [ ${OPENIM_MSGGATEWAY_NUM} -ne $((${#MSG_TRANSFER_PROM_PORTS[@]} - 1)) ]; then openim::log::error_exit "OPENIM_MSGGATEWAY_NUM must be equal to the number of MSG_TRANSFER_PROM_PORTS" + fi + + for (( i=0; i<$OPENIM_MSGGATEWAY_NUM; i++ )) do + openim::log::info "prometheus port: ${MSG_TRANSFER_PROM_PORTS[$i]}" + PROMETHEUS_PORT_OPTION="" + if [[ -n "${OPENIM_PROMETHEUS_PORTS[$i]}" ]]; then + PROMETHEUS_PORT_OPTION="--prometheus_port ${OPENIM_PROMETHEUS_PORTS[$i]}" fi - - for (( i=0; i<$OPENIM_MSGGATEWAY_NUM; i++ )) do - openim::log::info "prometheus port: ${MSG_TRANSFER_PROM_PORTS[$i]}" - PROMETHEUS_PORT_OPTION="" - if [[ -n "${OPENIM_PROMETHEUS_PORTS[$i]}" ]]; then - PROMETHEUS_PORT_OPTION="--prometheus_port ${OPENIM_PROMETHEUS_PORTS[$i]}" - fi - nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} -n ${i}>> ${LOG_FILE} 2>&1 & - done - - openim::util::check_process_names "${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME}" + nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} -n ${i}>> ${LOG_FILE} 2>&1 & + done + + openim::util::check_process_names "${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME}" } function openim::msgtransfer::check() { - PIDS=$(pgrep -f "${OPENIM_OUTPUT_HOSTBIN}/openim-msgtransfer") - - NUM_PROCESSES=$(echo "$PIDS" | wc -l) - - if [ "$NUM_PROCESSES" -eq "$OPENIM_MSGGATEWAY_NUM" ]; then - openim::log::info "Found $OPENIM_MSGGATEWAY_NUM processes named $OPENIM_OUTPUT_HOSTBIN" - for PID in $PIDS; do - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - ps -p $PID -o pid,cmd - elif [[ "$OSTYPE" == "darwin"* ]]; then - ps -p $PID -o pid,comm - else - openim::log::error "Unsupported OS type: $OSTYPE" - fi - done - else - openim::log::error_exit "Expected $OPENIM_MSGGATEWAY_NUM openim msgtransfer processes, but found $NUM_PROCESSES msgtransfer processes." - fi + PIDS=$(pgrep -f "${OPENIM_OUTPUT_HOSTBIN}/openim-msgtransfer") + + NUM_PROCESSES=$(echo "$PIDS" | wc -l) + + if [ "$NUM_PROCESSES" -eq "$OPENIM_MSGGATEWAY_NUM" ]; then + openim::log::info "Found $OPENIM_MSGGATEWAY_NUM processes named $OPENIM_OUTPUT_HOSTBIN" + for PID in $PIDS; do + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + ps -p $PID -o pid,cmd + elif [[ "$OSTYPE" == "darwin"* ]]; then + ps -p $PID -o pid,comm + else + openim::log::error "Unsupported OS type: $OSTYPE" + fi + done + else + openim::log::error_exit "Expected $OPENIM_MSGGATEWAY_NUM openim msgtransfer processes, but found $NUM_PROCESSES msgtransfer processes." + fi } ###################################### Linux Systemd ###################################### @@ -96,30 +96,30 @@ EOF # install openim-msgtransfer function openim::msgtransfer::install() { pushd "${OPENIM_ROOT}" - + # 1. Build openim-msgtransfer make build BINS=${SERVER_NAME} - + openim::common::sudo "cp -r ${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME} ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/${SERVER_NAME}/${SERVER_NAME}" - + openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/bin/${SERVER_NAME}" - + # 2. Generate and install the openim-msgtransfer configuration file (openim-msgtransfer.yaml) # nono - + # 3. Create and install the ${SERVER_NAME} systemd unit file echo ${LINUX_PASSWORD} | sudo -S bash -c \ - "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" + "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" openim::log::status "${SERVER_NAME} systemd file: ${SYSTEM_FILE_PATH}" - + # 4. Start the openim-msgtransfer service openim::common::sudo "systemctl daemon-reload" openim::common::sudo "systemctl restart ${SERVER_NAME}" openim::common::sudo "systemctl enable ${SERVER_NAME}" openim::msgtransfer::status || return 1 openim::msgtransfer::info - + openim::log::info "install ${SERVER_NAME} successfully" popd } diff --git a/scripts/install/openim-push.sh b/scripts/install/openim-push.sh index c17b80e673..d43743e4f9 100755 --- a/scripts/install/openim-push.sh +++ b/scripts/install/openim-push.sh @@ -14,10 +14,10 @@ # limitations under the License. # # OpenIM Push Control Script -# +# # Description: # This script provides a control interface for the OpenIM Push service within a Linux environment. It supports two installation methods: installation via function calls to systemctl, and direct installation through background processes. -# +# # Features: # 1. Robust error handling leveraging Bash built-ins such as 'errexit', 'nounset', and 'pipefail'. # 2. Capability to source common utility functions and configurations, ensuring environmental consistency. @@ -29,7 +29,7 @@ # 1. Direct Script Execution: # This will start the OpenIM push directly through a background process. # Example: ./openim-push.sh -# +# # 2. Controlling through Functions for systemctl operations: # Specific operations like installation, uninstallation, and status check can be executed by passing the respective function name as an argument to the script. # Example: ./openim-push.sh openim::push::install @@ -39,7 +39,7 @@ # export OPENIM_PUSH_PORT="9090 9091 9092" # # Note: Ensure that the appropriate permissions and environmental variables are set prior to script execution. -# +# set -o errexit set +o nounset set -o pipefail @@ -50,30 +50,30 @@ OPENIM_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd -P) SERVER_NAME="openim-push" function openim::push::start() { - openim::log::status "Start OpenIM Push, binary root: ${SERVER_NAME}" - openim::log::info "Start OpenIM Push, path: ${OPENIM_PUSH_BINARY}" - - openim::log::status "prepare start push process, path: ${OPENIM_PUSH_BINARY}" - openim::log::status "prepare start push process, port: ${OPENIM_PUSH_PORT}, prometheus port: ${PUSH_PROM_PORT}" - - OPENIM_PUSH_PORTS_ARRAY=$(openim::util::list-to-string ${OPENIM_PUSH_PORT} ) - PUSH_PROM_PORTS_ARRAY=$(openim::util::list-to-string ${PUSH_PROM_PORT} ) - - openim::util::stop_services_with_name ${SERVER_NAME} - - openim::log::status "push port list: ${OPENIM_PUSH_PORTS_ARRAY[@]}" - openim::log::status "prometheus port list: ${PUSH_PROM_PORTS_ARRAY[@]}" - - if [ ${#OPENIM_PUSH_PORTS_ARRAY[@]} -ne ${#PUSH_PROM_PORTS_ARRAY[@]} ]; then - openim::log::error_exit "The length of the two port lists is different!" - fi - - for (( i=0; i<${#OPENIM_PUSH_PORTS_ARRAY[@]}; i++ )); do - openim::log::info "start push process, port: ${OPENIM_PUSH_PORTS_ARRAY[$i]}, prometheus port: ${PUSH_PROM_PORTS_ARRAY[$i]}" - nohup ${OPENIM_PUSH_BINARY} --port ${OPENIM_PUSH_PORTS_ARRAY[$i]} -c ${OPENIM_PUSH_CONFIG} --prometheus_port ${PUSH_PROM_PORTS_ARRAY[$i]} >> ${LOG_FILE} 2>&1 & - done - - openim::util::check_process_names ${SERVER_NAME} + openim::log::status "Start OpenIM Push, binary root: ${SERVER_NAME}" + openim::log::info "Start OpenIM Push, path: ${OPENIM_PUSH_BINARY}" + + openim::log::status "prepare start push process, path: ${OPENIM_PUSH_BINARY}" + openim::log::status "prepare start push process, port: ${OPENIM_PUSH_PORT}, prometheus port: ${PUSH_PROM_PORT}" + + OPENIM_PUSH_PORTS_ARRAY=$(openim::util::list-to-string ${OPENIM_PUSH_PORT} ) + PUSH_PROM_PORTS_ARRAY=$(openim::util::list-to-string ${PUSH_PROM_PORT} ) + + openim::util::stop_services_with_name ${SERVER_NAME} + + openim::log::status "push port list: ${OPENIM_PUSH_PORTS_ARRAY[@]}" + openim::log::status "prometheus port list: ${PUSH_PROM_PORTS_ARRAY[@]}" + + if [ ${#OPENIM_PUSH_PORTS_ARRAY[@]} -ne ${#PUSH_PROM_PORTS_ARRAY[@]} ]; then + openim::log::error_exit "The length of the two port lists is different!" + fi + + for (( i=0; i<${#OPENIM_PUSH_PORTS_ARRAY[@]}; i++ )); do + openim::log::info "start push process, port: ${OPENIM_PUSH_PORTS_ARRAY[$i]}, prometheus port: ${PUSH_PROM_PORTS_ARRAY[$i]}" + nohup ${OPENIM_PUSH_BINARY} --port ${OPENIM_PUSH_PORTS_ARRAY[$i]} -c ${OPENIM_PUSH_CONFIG} --prometheus_port ${PUSH_PROM_PORTS_ARRAY[$i]} >> ${LOG_FILE} 2>&1 & + done + + openim::util::check_process_names ${SERVER_NAME} } ###################################### Linux Systemd ###################################### @@ -89,27 +89,27 @@ EOF # install openim-push function openim::push::install() { pushd "${OPENIM_ROOT}" - + # 1. Build openim-push make build BINS=${SERVER_NAME} openim::common::sudo "cp -r ${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME} ${OPENIM_INSTALL_DIR}/${SERVER_NAME}" openim::log::status "${SERVER_NAME} binary: ${OPENIM_INSTALL_DIR}/${SERVER_NAME}/${SERVER_NAME}" - + # 2. Generate and install the openim-push configuration file (config) openim::log::status "${SERVER_NAME} config file: ${OPENIM_CONFIG_DIR}/config.yaml" - + # 3. Create and install the ${SERVER_NAME} systemd unit file echo ${LINUX_PASSWORD} | sudo -S bash -c \ - "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" + "SERVER_NAME=${SERVER_NAME} ./scripts/genconfig.sh ${ENV_FILE} deployments/templates/openim.service > ${SYSTEM_FILE_PATH}" openim::log::status "${SERVER_NAME} systemd file: ${SYSTEM_FILE_PATH}" - + # 4. Start the openim-push service openim::common::sudo "systemctl daemon-reload" openim::common::sudo "systemctl restart ${SERVER_NAME}" openim::common::sudo "systemctl enable ${SERVER_NAME}" openim::push::status || return 1 openim::push::info - + openim::log::info "install ${SERVER_NAME} successfully" popd } @@ -133,7 +133,7 @@ function openim::push::status() { openim::log::error "${SERVER_NAME} failed to start, maybe not installed properly" return 1 } - + # The listening port is hardcode in the configuration file if echo | telnet ${OPENIM_MSGGATEWAY_HOST} ${OPENIM_PUSH_PORT} 2>&1|grep refused &>/dev/null;then # Assuming a different port for push openim::log::error "cannot access health check port, ${SERVER_NAME} maybe not startup" diff --git a/scripts/install/openim-rpc.sh b/scripts/install/openim-rpc.sh index db1526d6a2..966eef928d 100755 --- a/scripts/install/openim-rpc.sh +++ b/scripts/install/openim-rpc.sh @@ -15,10 +15,10 @@ # limitations under the License. # # OpenIM RPC Service Control Script -# +# # Description: # This script provides a control interface for the OpenIM RPC service within a Linux environment. It offers functionalities to start multiple RPC services, each denoted by their respective names under openim::rpc::service_name. -# +# # Features: # 1. Robust error handling using Bash built-ins like 'errexit', 'nounset', and 'pipefail'. # 2. The capability to source common utility functions and configurations to ensure uniform environmental settings. @@ -125,7 +125,7 @@ function openim::rpc::start() { openim::util::stop_services_on_ports ${OPENIM_RPC_PORT_LISTARIES[$i]} openim::log::info "OpenIM ${OPENIM_RPC_SERVICE_LISTARIES[$i]} config path: ${OPENIM_RPC_CONFIG}" - + # Get the service and Prometheus ports. OPENIM_RPC_SERVICE_PORTS=( $(openim::util::list-to-string ${OPENIM_RPC_PORT_LISTARIES[$i]}) ) read -a OPENIM_RPC_SERVICE_PORTS_ARRAY <<< ${OPENIM_RPC_SERVICE_PORTS} @@ -139,7 +139,7 @@ function openim::rpc::start() { done done - sleep 1 + sleep 5 openim::util::check_ports ${OPENIM_RPC_PORT_TARGETS[@]} # openim::util::check_ports ${OPENIM_RPC_PROM_PORT_TARGETS[@]} diff --git a/scripts/install/openim-tools.sh b/scripts/install/openim-tools.sh index 385df64f52..ac60a5f45e 100755 --- a/scripts/install/openim-tools.sh +++ b/scripts/install/openim-tools.sh @@ -18,9 +18,9 @@ # # Description: # This script is responsible for managing the lifecycle of OpenIM tools, which include starting, stopping, -# and handling pre and post operations. It's designed to be modular and extensible, ensuring that the +# and handling pre and post operations. It's designed to be modular and extensible, ensuring that the # individual operations can be managed separately, and integrated seamlessly with Linux systemd. -# +# # Features: # 1. Robust error handling using Bash built-ins like 'errexit', 'nounset', and 'pipefail'. # 2. The capability to source common utility functions and configurations to ensure uniform environmental settings. @@ -104,12 +104,7 @@ function openim::tools::start_service() { cmd="${cmd} --prometheus_port ${prometheus_port}" fi openim::log::status "Starting ${binary_name}..." - # Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used - if grep -qE 'docker|kubepods' /proc/1/cgroup || [ -f /.dockerenv ]; then - ${cmd} >> "${LOG_FILE}" 2>&1 - else - ${cmd} | tee -a "${LOG_FILE}" - fi + ${cmd} | tee -a "${LOG_FILE}" } function openim::tools::start() { diff --git a/scripts/install/test.sh b/scripts/install/test.sh index 93a39f2985..4a78e45040 100755 --- a/scripts/install/test.sh +++ b/scripts/install/test.sh @@ -15,19 +15,19 @@ # limitations under the License. # # OpenIM RPC Service Test Control Script -# +# # This control script is designed to conduct various tests on the OpenIM RPC services. # It includes functions to perform smoke tests, API tests, and comprehensive service tests. # The script is intended to be used in a Linux environment with appropriate permissions and # environmental variables set. -# +# # It provides robust error handling and logging to facilitate debugging and service monitoring. # Functions within the script can be called directly or passed as arguments to perform # systematic testing, ensuring the integrity of the RPC services. -# +# # Test Functions: # - openim::test::smoke: Runs basic tests to ensure the fundamental functionality of the service. -# - openim::test::api: Executes a series of API tests covering authentication, user, friend, +# - openim::test::api: Executes a series of API tests covering authentication, user, friend, # group, and message functionalities. # - openim::test::test: Performs a complete test suite, invoking utility checks and all defined # test cases, and reports on their success. @@ -40,78 +40,80 @@ IAM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. # API Server API Address:Port INSECURE_OPENIMAPI="http://${OPENIM_API_HOST}:${API_OPENIM_PORT}" INSECURE_OPENIMAUTO=${OPENIM_RPC_AUTH_HOST}:${OPENIM_AUTH_PORT} -CCURL="curl -f -s -XPOST" # Create -UCURL="curl -f -s -XPUT" # Update -RCURL="curl -f -s -XGET" # Retrieve +CCURL="curl -f -s -XPOST" # Create +UCURL="curl -f -s -XPUT" # Update +RCURL="curl -f -s -XGET" # Retrieve DCURL="curl -f -s -XDELETE" # Delete openim::test::check_error() { - local response=$1 - local err_code=$(echo "$response" | jq '.errCode') - openim::log::status "Response from user registration: $response" - if [[ "$err_code" != "0" ]]; then - openim::log::error_exit "Error occurred: $response, You can read the error code in the API documentation https://docs.openim.io/restapi/errcode" - else - openim::log::success "Operation was successful." - fi + local response=$1 + local err_code=$(echo "$response" | jq '.errCode') + openim::log::status "Response from user registration: $response" + if [[ "$err_code" != "0" ]]; then + openim::log::error_exit "Error occurred: $response, You can read the error code in the API documentation https://docs.openim.io/restapi/errcode" + else + openim::log::success "Operation was successful." + fi } # The `openim::test::auth` function serves as a test suite for authentication-related operations. function openim::test::auth() { - # 1. Retrieve and set the authentication token. - openim::test::get_token - - # 2. Force logout the test user from a specific platform. - openim::test::force_logout - - # Log the completion of the auth test suite. - openim::log::success "Auth test suite completed successfully." + # 1. Retrieve and set the authentication token. + openim::test::get_token + + # 2. Force logout the test user from a specific platform. + openim::test::force_logout + + # Log the completion of the auth test suite. + openim::log::success "Auth test suite completed successfully." } #################################### Auth Module #################################### # Define a function to get a token for a specific user openim::test::get_token() { - local user_id="${1:-openIM123456}" # Default user ID if not provided - token_response=$(${CCURL} "${OperationID}" "${Header}" ${INSECURE_OPENIMAPI}/auth/user_token \ - -d'{"secret": "'"$SECRET"'","platformID": 1,"userID": "'$user_id'"}') - token=$(echo $token_response | grep -Po 'token[" :]+\K[^"]+') - echo "$token" + local user_id="${1:-openIM123456}" # Default user ID if not provided + token_response=$( + ${CCURL} "${OperationID}" "${Header}" ${INSECURE_OPENIMAPI}/auth/user_token \ + -d'{"secret": "'"$SECRET"'","platformID": 1,"userID": "'$user_id'"}' + ) + token=$(echo $token_response | grep -Po 'token[" :]+\K[^"]+') + echo "$token" } - Header="-HContent-Type: application/json" OperationID="-HoperationID: 1646445464564" Token="-Htoken: $(openim::test::get_token)" # Forces a user to log out from the specified platform by user ID. openim::test::force_logout() { - local request_body=$(cat < /dev/null && ss -Version | grep 'iproute2' &> /dev/null; then port_check_command="ss" - elif command -v netstat &>/dev/null; then + elif command -v netstat &>/dev/null; then port_check_command="netstat" else openim::log::usage "unable to identify if chat is bound to port ${CHAT_PORT}. unable to find ss or netstat utilities." @@ -46,24 +46,24 @@ openim::chat::validate() { openim::log::usage "$(${port_check_command} -nat | grep "LISTEN" | grep "[\.:]${CHAT_PORT:?}")" exit 1 fi - + # need set the env of "CHAT_UNSUPPORTED_ARCH" on unstable arch. arch=$(uname -m) if [[ $arch =~ arm* ]]; then - export CHAT_UNSUPPORTED_ARCH=arm + export CHAT_UNSUPPORTED_ARCH=arm fi # validate installed version is at least equal to minimum version=$(chat --version | grep Version | head -n 1 | cut -d " " -f 3) if [[ $(openim::chat::version "${CHAT_VERSION}") -gt $(openim::chat::version "${version}") ]]; then - export PATH="${OPENIM_ROOT}"/third_party/chat:${PATH} - hash chat - echo "${PATH}" - version=$(chat --version | grep Version | head -n 1 | cut -d " " -f 3) - if [[ $(openim::chat::version "${CHAT_VERSION}") -gt $(openim::chat::version "${version}") ]]; then - openim::log::usage "chat version ${CHAT_VERSION} or greater required." - openim::log::info "You can use 'scripts/install-chat.sh' to install a copy in third_party/." - exit 1 - fi + export PATH="${OPENIM_ROOT}"/third_party/chat:${PATH} + hash chat + echo "${PATH}" + version=$(chat --version | grep Version | head -n 1 | cut -d " " -f 3) + if [[ $(openim::chat::version "${CHAT_VERSION}") -gt $(openim::chat::version "${version}") ]]; then + openim::log::usage "chat version ${CHAT_VERSION} or greater required." + openim::log::info "You can use 'scripts/install-chat.sh' to install a copy in third_party/." + exit 1 + fi fi } @@ -74,7 +74,7 @@ openim::chat::version() { openim::chat::start() { # validate before running openim::chat::validate - + # Start chat CHAT_DIR=${CHAT_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-chat.XXXXXX)} if [[ -d "${ARTIFACTS:-}" ]]; then @@ -85,7 +85,7 @@ openim::chat::start() { openim::log::info "chat --advertise-client-urls ${OPENIM_INTEGRATION_CHAT_URL} --data-dir ${CHAT_DIR} --listen-client-urls http://${CHAT_HOST}:${CHAT_PORT} --log-level=${CHAT_LOGLEVEL} 2> \"${CHAT_LOGFILE}\" >/dev/null" chat --advertise-client-urls "${OPENIM_INTEGRATION_CHAT_URL}" --data-dir "${CHAT_DIR}" --listen-client-urls "${OPENIM_INTEGRATION_CHAT_URL}" --log-level="${CHAT_LOGLEVEL}" 2> "${CHAT_LOGFILE}" >/dev/null & CHAT_PID=$! - + echo "Waiting for chat to come up." openim::util::wait_for_url "${OPENIM_INTEGRATION_CHAT_URL}/health" "chat: " 0.25 80 curl -fs -X POST "${OPENIM_INTEGRATION_CHAT_URL}/v3/kv/put" -d '{"key": "X3Rlc3Q=", "value": ""}' @@ -108,7 +108,7 @@ openim::chat::start_scraping() { } openim::chat::scrape() { - curl -s -S "${OPENIM_INTEGRATION_CHAT_URL}/metrics" > "${CHAT_SCRAPE_DIR}/next" && mv "${CHAT_SCRAPE_DIR}/next" "${CHAT_SCRAPE_DIR}/$(date +%s).scrape" + curl -s -S "${OPENIM_INTEGRATION_CHAT_URL}/metrics" > "${CHAT_SCRAPE_DIR}/next" && mv "${CHAT_SCRAPE_DIR}/next" "${CHAT_SCRAPE_DIR}/$(date +%s).scrape" } openim::chat::stop() { @@ -144,17 +144,17 @@ openim::chat::install() { ( local os local arch - + os=$(openim::util::host_os) arch=$(openim::util::host_arch) - + cd "${OPENIM_ROOT}/third_party" || return 1 if [[ $(readlink chat) == chat-v${CHAT_VERSION}-${os}-* ]]; then openim::log::info "chat v${CHAT_VERSION} already installed. To use:" openim::log::info "export PATH=\"$(pwd)/chat:\${PATH}\"" return #already installed fi - + if [[ ${os} == "darwin" ]]; then download_file="chat-v${CHAT_VERSION}-${os}-${arch}.zip" url="https://github.com/chat-io/chat/releases/download/v${CHAT_VERSION}/${download_file}" @@ -162,7 +162,7 @@ openim::chat::install() { unzip -o "${download_file}" ln -fns "chat-v${CHAT_VERSION}-${os}-${arch}" chat rm "${download_file}" - elif [[ ${os} == "linux" ]]; then + elif [[ ${os} == "linux" ]]; then url="https://github.com/coreos/chat/releases/download/v${CHAT_VERSION}/chat-v${CHAT_VERSION}-${os}-${arch}.tar.gz" download_file="chat-v${CHAT_VERSION}-${os}-${arch}.tar.gz" openim::util::download_file "${url}" "${download_file}" diff --git a/scripts/lib/color.sh b/scripts/lib/color.sh index 4d69c17712..744fccf5ad 100755 --- a/scripts/lib/color.sh +++ b/scripts/lib/color.sh @@ -21,24 +21,24 @@ # shellcheck disable=SC2034 if [ -z "${COLOR_OPEN+x}" ]; then - COLOR_OPEN=1 + COLOR_OPEN=1 fi # Function for colored echo openim::color::echo() { - COLOR=$1 - [ $COLOR_OPEN -eq 1 ] && echo -e "${COLOR} $(date '+%Y-%m-%d %H:%M:%S') $@ ${COLOR_SUFFIX}" - shift + COLOR=$1 + [ $COLOR_OPEN -eq 1 ] && echo -e "${COLOR} $(date '+%Y-%m-%d %H:%M:%S') $@ ${COLOR_SUFFIX}" + shift } # Define color variables -# --- Feature --- +# --- Feature --- COLOR_NORMAL='\033[0m';COLOR_BOLD='\033[1m';COLOR_DIM='\033[2m';COLOR_UNDER='\033[4m'; COLOR_ITALIC='\033[3m';COLOR_NOITALIC='\033[23m';COLOR_BLINK='\033[5m'; COLOR_REVERSE='\033[7m';COLOR_CONCEAL='\033[8m';COLOR_NOBOLD='\033[22m'; COLOR_NOUNDER='\033[24m';COLOR_NOBLINK='\033[25m'; -# --- Front color --- +# --- Front color --- COLOR_BLACK='\033[30m'; COLOR_RED='\033[31m'; COLOR_GREEN='\033[32m'; @@ -48,13 +48,13 @@ COLOR_MAGENTA='\033[35m'; COLOR_CYAN='\033[36m'; COLOR_WHITE='\033[37m'; -# --- background color --- +# --- background color --- COLOR_BBLACK='\033[40m';COLOR_BRED='\033[41m'; COLOR_BGREEN='\033[42m';COLOR_BYELLOW='\033[43m'; COLOR_BBLUE='\033[44m';COLOR_BMAGENTA='\033[45m'; COLOR_BCYAN='\033[46m';COLOR_BWHITE='\033[47m'; -# --- Color definitions --- +# --- Color definitions --- # Color definitions COLOR_SUFFIX="\033[0m" # End all colors and special effects BLACK_PREFIX="\033[30m" # Black prefix @@ -86,54 +86,54 @@ openim::color::print_color() { # test functions openim::color::test() { - echo "Starting the color tests..." - - echo "Testing normal echo without color" - openim::color::echo $COLOR_NORMAL "This is a normal text" - - echo "Testing bold echo" - openim::color::echo $COLOR_BOLD "This is bold text" - - echo "Testing dim echo" - openim::color::echo $COLOR_DIM "This is dim text" - - echo "Testing underlined echo" - openim::color::echo $COLOR_UNDER "This is underlined text" - - echo "Testing italic echo" - openim::color::echo $COLOR_ITALIC "This is italic text" - - echo "Testing red color" - openim::color::echo $COLOR_RED "This is red text" - - echo "Testing green color" - openim::color::echo $COLOR_GREEN "This is green text" - - echo "Testing yellow color" - openim::color::echo $COLOR_YELLOW "This is yellow text" - - echo "Testing blue color" - openim::color::echo $COLOR_BLUE "This is blue text" - - echo "Testing magenta color" - openim::color::echo $COLOR_MAGENTA "This is magenta text" - - echo "Testing cyan color" - openim::color::echo $COLOR_CYAN "This is cyan text" - - echo "Testing black background" - openim::color::echo $COLOR_BBLACK "This is text with black background" - - echo "Testing red background" - openim::color::echo $COLOR_BRED "This is text with red background" - - echo "Testing green background" - openim::color::echo $COLOR_BGREEN "This is text with green background" - - echo "Testing blue background" - openim::color::echo $COLOR_BBLUE "This is text with blue background" - - echo "All tests completed!" + echo "Starting the color tests..." + + echo "Testing normal echo without color" + openim::color::echo $COLOR_NORMAL "This is a normal text" + + echo "Testing bold echo" + openim::color::echo $COLOR_BOLD "This is bold text" + + echo "Testing dim echo" + openim::color::echo $COLOR_DIM "This is dim text" + + echo "Testing underlined echo" + openim::color::echo $COLOR_UNDER "This is underlined text" + + echo "Testing italic echo" + openim::color::echo $COLOR_ITALIC "This is italic text" + + echo "Testing red color" + openim::color::echo $COLOR_RED "This is red text" + + echo "Testing green color" + openim::color::echo $COLOR_GREEN "This is green text" + + echo "Testing yellow color" + openim::color::echo $COLOR_YELLOW "This is yellow text" + + echo "Testing blue color" + openim::color::echo $COLOR_BLUE "This is blue text" + + echo "Testing magenta color" + openim::color::echo $COLOR_MAGENTA "This is magenta text" + + echo "Testing cyan color" + openim::color::echo $COLOR_CYAN "This is cyan text" + + echo "Testing black background" + openim::color::echo $COLOR_BBLACK "This is text with black background" + + echo "Testing red background" + openim::color::echo $COLOR_BRED "This is text with red background" + + echo "Testing green background" + openim::color::echo $COLOR_BGREEN "This is text with green background" + + echo "Testing blue background" + openim::color::echo $COLOR_BBLUE "This is text with blue background" + + echo "All tests completed!" } # openim::color::test diff --git a/scripts/lib/logging.sh b/scripts/lib/logging.sh index 8e7d5b09a7..c520850e8f 100755 --- a/scripts/lib/logging.sh +++ b/scripts/lib/logging.sh @@ -21,24 +21,24 @@ ENABLE_LOGGING="${ENABLE_LOGGING:-true}" # If OPENIM_OUTPUT is not set, set it to the default value if [ -z "${OPENIM_OUTPUT+x}" ]; then - OPENIM_OUTPUT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../_output" && pwd -P)" + OPENIM_OUTPUT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../_output" && pwd -P)" fi # Set the log file path LOG_FILE="${OPENIM_OUTPUT}/logs/openim_$(date '+%Y%m%d').log" if [[ ! -d "${OPENIM_OUTPUT}/logs" ]]; then - mkdir -p "${OPENIM_OUTPUT}/logs" - touch "$LOG_FILE" + mkdir -p "${OPENIM_OUTPUT}/logs" + touch "$LOG_FILE" fi # Define the logging function function echo_log() { - if $ENABLE_LOGGING; then - echo -e "$@" | tee -a "${LOG_FILE}" - else - echo -e "$@" - fi + if $ENABLE_LOGGING; then + echo -e "$@" | tee -a "${LOG_FILE}" + else + echo -e "$@" + fi } # MAX_LOG_SIZE=10485760 # 10MB @@ -50,11 +50,11 @@ function echo_log() { # Borrowed from https://gist.github.com/ahendrix/7030300 openim::log::errexit() { local err="${PIPESTATUS[*]}" - + # If the shell we are in doesn't have errexit set (common in subshells) then # don't dump stacks. set +o | grep -qe "-o errexit" || return - + set +o xtrace local code="${1:-1}" # Print out the stack trace described by $function_stack @@ -73,7 +73,7 @@ openim::log::install_errexit() { # trap ERR to provide an error handler whenever a command exits nonzero this # is a more verbose version of set -o errexit trap 'openim::log::errexit' ERR - + # setting errtrace allows our ERR trap handler to be propagated to functions, # expansions and subshells set -o errtrace @@ -110,7 +110,7 @@ openim::log::error_exit() { local code="${2:-1}" local stack_skip="${3:-0}" stack_skip=$((stack_skip + 1)) - + if [[ ${OPENIM_VERBOSE} -ge 4 ]]; then local source_file=${BASH_SOURCE[${stack_skip}]} local source_line=${BASH_LINENO[$((stack_skip - 1))]} @@ -118,18 +118,18 @@ openim::log::error_exit() { [[ -z ${1-} ]] || { echo_log " ${1}" >&2 } - + openim::log::stack ${stack_skip} - + echo_log "Exiting with status ${code}" >&2 fi - + exit "${code}" } # Log an error but keep going. Don't dump the stack or exit. openim::log::error() { - timestamp=$(date +"[%m%d %H:%M:%S]") + timestamp=$(date +"[%Y-%m-%d %H:%M:%S %Z]") echo_log "!!! ${timestamp} ${1-}" >&2 shift for message; do @@ -152,7 +152,7 @@ openim::log::usage_from_stdin() { while read -r line; do messages+=("${line}") done - + openim::log::usage "${messages[@]}" } @@ -162,7 +162,7 @@ openim::log::info() { if [[ ${OPENIM_VERBOSE} < ${V} ]]; then return fi - + for message; do echo_log "${message}" done @@ -181,7 +181,7 @@ openim::log::info_from_stdin() { while read -r line; do messages+=("${line}") done - + openim::log::info "${messages[@]}" } @@ -191,8 +191,8 @@ openim::log::status() { if [[ ${OPENIM_VERBOSE} < ${V} ]]; then return fi - - timestamp=$(date +"[%m%d %H:%M:%S]") + + timestamp=$(date +"[%Y-%m-%d %H:%M:%S %Z]") echo_log "+++ ${timestamp} ${1}" shift for message; do @@ -203,20 +203,20 @@ openim::log::status() { openim::log::success() { local V="${V:-0}" if [[ ${OPENIM_VERBOSE} < ${V} ]]; then - return + return fi timestamp=$(date +"%m%d %H:%M:%S") echo_log -e "${COLOR_GREEN}[success ${timestamp}] ${COLOR_SUFFIX}==> " "$@" } function openim::log::test_log() { - echo_log "test log" - openim::log::info "openim::log::info" - openim::log::progress "openim::log::progress" - openim::log::status "openim::log::status" - openim::log::success "openim::log::success" - openim::log::error "openim::log::error" - openim::log::error_exit "openim::log::error_exit" + echo_log "test log" + openim::log::info "openim::log::info" + openim::log::progress "openim::log::progress" + openim::log::status "openim::log::status" + openim::log::success "openim::log::success" + openim::log::error "openim::log::error" + openim::log::error_exit "openim::log::error_exit" } # openim::log::test_log \ No newline at end of file diff --git a/scripts/lib/release.sh b/scripts/lib/release.sh index 16f2cd97ab..2a525f12c2 100755 --- a/scripts/lib/release.sh +++ b/scripts/lib/release.sh @@ -46,11 +46,11 @@ OPENIM_BUILD_CONFORMANCE=${OPENIM_BUILD_CONFORMANCE:-y} OPENIM_BUILD_PULL_LATEST_IMAGES=${OPENIM_BUILD_PULL_LATEST_IMAGES:-y} if [ -z "${OPENIM_ROOT}" ]; then - OPENIM_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" + OPENIM_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" fi if [ -z "${TOOLS_DIR}" ]; then - TOOLS_DIR="${OPENIM_ROOT}/_output/tools" + TOOLS_DIR="${OPENIM_ROOT}/_output/tools" fi # Validate a ci version @@ -77,10 +77,10 @@ function openim::release::parse_and_validate_ci_version() { openim::log::error "Invalid ci version: '${version}', must match regex ${version_regex}" return 1 } - + # The VERSION variables are used when this file is sourced, hence # the shellcheck SC2034 'appears unused' warning is to be ignored. - + # shellcheck disable=SC2034 VERSION_MAJOR="${BASH_REMATCH[1]}" # shellcheck disable=SC2034 @@ -115,7 +115,7 @@ function openim::release::package_tarballs() { openim::release::package_openim_manifests_tarball & openim::release::package_server_tarballs & openim::util::wait-for-jobs || { openim::log::error "previous tarball phase failed"; return 1; } - + openim::release::package_final_tarball & # _final depends on some of the previous phases openim::util::wait-for-jobs || { openim::log::error "previous tarball phase failed"; return 1; } } @@ -143,24 +143,24 @@ function openim::release::package_src_tarball() { git archive -o "${src_tarball}" HEAD else find "${OPENIM_ROOT}" -mindepth 1 -maxdepth 1 \ - ! \( \ - \( -path "${OPENIM_ROOT}"/_\* -o \ - -path "${OPENIM_ROOT}"/.git\* -o \ - -path "${OPENIM_ROOT}"/.github\* -o \ - -path "${OPENIM_ROOT}"/components\* -o \ - -path "${OPENIM_ROOT}"/logs\* -o \ - -path "${OPENIM_ROOT}"/.gitignore\* -o \ - -path "${OPENIM_ROOT}"/.gsemver.yml\* -o \ - -path "${OPENIM_ROOT}"/.config\* -o \ - -path "${OPENIM_ROOT}"/.chglog\* -o \ - -path "${OPENIM_ROOT}"/.gitlint -o \ - -path "${OPENIM_ROOT}"/.golangci.yml -o \ - -path "${OPENIM_ROOT}"/build/goreleaser.yaml -o \ - -path "${OPENIM_ROOT}"/.note.md -o \ - -path "${OPENIM_ROOT}"/.todo.md \ - \) -prune \ - \) -print0 \ - | "${TAR}" czf "${src_tarball}" --transform "s|${OPENIM_ROOT#/*}|openim|" --null -T - + ! \( \ + \( -path "${OPENIM_ROOT}"/_\* -o \ + -path "${OPENIM_ROOT}"/.git\* -o \ + -path "${OPENIM_ROOT}"/.github\* -o \ + -path "${OPENIM_ROOT}"/components\* -o \ + -path "${OPENIM_ROOT}"/logs\* -o \ + -path "${OPENIM_ROOT}"/.gitignore\* -o \ + -path "${OPENIM_ROOT}"/.gsemver.yml\* -o \ + -path "${OPENIM_ROOT}"/.config\* -o \ + -path "${OPENIM_ROOT}"/.chglog\* -o \ + -path "${OPENIM_ROOT}"/.gitlint -o \ + -path "${OPENIM_ROOT}"/.golangci.yml -o \ + -path "${OPENIM_ROOT}"/build/goreleaser.yaml -o \ + -path "${OPENIM_ROOT}"/.note.md -o \ + -path "${OPENIM_ROOT}"/.todo.md \ + \) -prune \ + \) -print0 \ + | "${TAR}" czf "${src_tarball}" --transform "s|${OPENIM_ROOT#/*}|openim|" --null -T - fi } @@ -168,7 +168,7 @@ function openim::release::package_src_tarball() { function openim::release::package_server_tarballs() { # Find all of the built client binaries local long_platforms=("${LOCAL_OUTPUT_BINPATH}"/*/*) - + if [[ -n ${OPENIM_BUILD_PLATFORMS-} ]]; then read -ra long_platforms <<< "${OPENIM_BUILD_PLATFORMS}" fi @@ -636,7 +636,7 @@ function openim::release::github_release() { for file in ${RELEASE_TARS}/*.tar.gz; do if [[ -f "$file" ]]; then filename=$(basename "$file") - openim::log::info "Update file ${filename} to release vertion ${OPENIM_GIT_VERSION}" + openim::log::info "Update file ${filename} to release vertion ${OPENIM_GIT_VERSION}" ${TOOLS_DIR}/github-release upload \ --user ${OPENIM_GITHUB_ORG} \ --repo ${OPENIM_GITHUB_REPO} \ diff --git a/scripts/lib/util.sh b/scripts/lib/util.sh index f15a263468..a40668d70e 100755 --- a/scripts/lib/util.sh +++ b/scripts/lib/util.sh @@ -30,27 +30,27 @@ function openim:util::setup_ssh_key_copy() { local hosts_file="$1" local username="${2:-root}" local password="${3:-123}" - + local sshkey_file=~/.ssh/id_rsa.pub - - # check sshkey file + + # check sshkey file if [[ ! -e $sshkey_file ]]; then expect -c " spawn ssh-keygen -t rsa expect \"Enter*\" { send \"\n\"; exp_continue; } " fi - + # get hosts list local hosts=$(awk '/^[^#]/ {print $1}' "${hosts_file}") - + ssh_key_copy() { local target=$1 - + # delete history sed -i "/$target/d" ~/.ssh/known_hosts - - # copy key + + # copy key expect -c " set timeout 100 spawn ssh-copy-id $username@$target @@ -62,14 +62,14 @@ function openim:util::setup_ssh_key_copy() { expect eof " } - + # auto sshkey pair for host in $hosts; do if ! ping -i 0.2 -c 3 -W 1 "$host" > /dev/null 2>&1; then echo "[ERROR]: Can't connect $host" continue fi - + local host_entry=$(awk "/$host/"'{print $1, $2}' /etc/hosts) if [[ $host_entry ]]; then local hostaddr=$(echo "$host_entry" | awk '{print $1}') @@ -102,7 +102,7 @@ openim::util::array_contains() { for element; do if [[ "${element}" == "${search}" ]]; then return 0 - fi + fi done return 1 } @@ -113,12 +113,12 @@ openim::util::wait_for_url() { local wait=${3:-1} local times=${4:-30} local maxtime=${5:-1} - + command -v curl >/dev/null || { openim::log::usage "curl must be installed" exit 1 } - + local i for i in $(seq 1 "${times}"); do local out @@ -156,20 +156,20 @@ openim::util::trap_add() { local trap_add_cmd trap_add_cmd=$1 shift - + for trap_add_name in "$@"; do local existing_cmd local new_cmd - + # Grab the currently defined trap commands for this trap existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') - + if [[ -z "${existing_cmd}" ]]; then new_cmd="${trap_add_cmd}" else new_cmd="${trap_add_cmd};${existing_cmd}" fi - + # Assign the test. Disable the shellcheck warning telling that trap # commands should be single quoted to avoid evaluating them at this # point instead evaluating them at run time. The logic of adding new @@ -200,14 +200,14 @@ openim::util::host_os() { case "$(uname -s)" in Darwin) host_os=darwin - ;; + ;; Linux) host_os=linux - ;; + ;; *) openim::log::error "Unsupported host OS. Must be Linux or Mac OS X." exit 1 - ;; + ;; esac echo "${host_os}" } @@ -217,70 +217,70 @@ openim::util::host_arch() { case "$(uname -m)" in x86_64*) host_arch=amd64 - ;; + ;; i?86_64*) host_arch=amd64 - ;; + ;; amd64*) host_arch=amd64 - ;; + ;; aarch64*) host_arch=arm64 - ;; + ;; arm64*) host_arch=arm64 - ;; + ;; arm*) host_arch=arm - ;; + ;; i?86*) host_arch=x86 - ;; + ;; s390x*) host_arch=s390x - ;; + ;; ppc64le*) host_arch=ppc64le - ;; + ;; *) openim::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." exit 1 - ;; + ;; esac echo "${host_arch}" } # Define a bash function to check the versions of Docker and Docker Compose openim::util::check_docker_and_compose_versions() { - # Define the required versions of Docker and Docker Compose - required_docker_version="20.10.0" - required_compose_version="2.0" - - # Get the currently installed Docker version - installed_docker_version=$(docker --version | awk '{print $3}' | sed 's/,//') - - # Check if the installed Docker version matches the required version - if [[ "$installed_docker_version" < "$required_docker_version" ]]; then - echo "Docker version mismatch. Installed: $installed_docker_version, Required: $required_docker_version" - return 1 - fi - - # Check if the docker compose sub-command is available - if ! docker compose version &> /dev/null; then - echo "Docker does not support the docker compose sub-command" - echo "You need to upgrade Docker to the right version" - return 1 - fi - - # Get the currently installed Docker Compose version - installed_compose_version=$(docker compose version --short) - - # Check if the installed Docker Compose version matches the required version - if [[ "$installed_compose_version" < "$required_compose_version" ]]; then - echo "Docker Compose version mismatch. Installed: $installed_compose_version, Required: $required_compose_version" - return 1 - fi - + # Define the required versions of Docker and Docker Compose + required_docker_version="20.10.0" + required_compose_version="2.0" + + # Get the currently installed Docker version + installed_docker_version=$(docker --version | awk '{print $3}' | sed 's/,//') + + # Check if the installed Docker version matches the required version + if [[ "$installed_docker_version" < "$required_docker_version" ]]; then + echo "Docker version mismatch. Installed: $installed_docker_version, Required: $required_docker_version" + return 1 + fi + + # Check if the docker compose sub-command is available + if ! docker compose version &> /dev/null; then + echo "Docker does not support the docker compose sub-command" + echo "You need to upgrade Docker to the right version" + return 1 + fi + + # Get the currently installed Docker Compose version + installed_compose_version=$(docker compose version --short) + + # Check if the installed Docker Compose version matches the required version + if [[ "$installed_compose_version" < "$required_compose_version" ]]; then + echo "Docker Compose version mismatch. Installed: $installed_compose_version, Required: $required_compose_version" + return 1 + fi + } @@ -292,80 +292,80 @@ openim::util::check_docker_and_compose_versions() { # openim::util::check_ports 8080 8081 8082 # The function returns a status of 1 if any of the processes is not running. openim::util::check_ports() { - # An array to collect ports of processes that are not running. - local not_started=() - - # An array to collect information about processes that are running. - local started=() - - openim::log::info "Checking ports: $*" - # Iterate over each given port. - for port in "$@"; do - # Initialize variables - # Check the OS and use the appropriate command - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - if command -v ss > /dev/null 2>&1; then - info=$(ss -ltnp | grep ":$port" || true) - else - info=$(netstat -ltnp | grep ":$port" || true) - fi - elif [[ "$OSTYPE" == "darwin"* ]]; then - # For macOS, use lsof - info=$(lsof -P -i:"$port" | grep "LISTEN" || true) - fi - - # Check if any process is using the port - if [[ -z $info ]]; then - not_started+=($port) - else - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - # Extract relevant details for Linux: Process Name, PID, and FD. - details=$(echo $info | sed -n 's/.*users:(("\([^"]*\)",pid=\([^,]*\),fd=\([^)]*\))).*/\1 \2 \3/p') - command=$(echo $details | awk '{print $1}') - pid=$(echo $details | awk '{print $2}') - fd=$(echo $details | awk '{print $3}') - elif [[ "$OSTYPE" == "darwin"* ]]; then - # Handle extraction for macOS - pid=$(echo $info | awk '{print $2}' | cut -d'/' -f1) - command=$(ps -p $pid -o comm= | xargs basename) - fd=$(echo $info | awk '{print $4}' | cut -d'/' -f1) - fi - - # Get the start time of the process using the PID - if [[ -z $pid ]]; then - start_time="N/A" - else - start_time=$(ps -p $pid -o lstart=) - fi - - started+=("Port $port - Command: $command, PID: $pid, FD: $fd, Started: $start_time") - fi - done - - # Print information about ports whose processes are not running. - if [[ ${#not_started[@]} -ne 0 ]]; then - openim::log::info "\n### Not started ports:" - for port in "${not_started[@]}"; do - openim::log::error "Port $port is not started." - done + # An array to collect ports of processes that are not running. + local not_started=() + + # An array to collect information about processes that are running. + local started=() + + openim::log::info "Checking ports: $*" + # Iterate over each given port. + for port in "$@"; do + # Initialize variables + # Check the OS and use the appropriate command + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + if command -v ss > /dev/null 2>&1; then + info=$(ss -ltnp | grep ":$port" || true) + else + info=$(netstat -ltnp | grep ":$port" || true) + fi + elif [[ "$OSTYPE" == "darwin"* ]]; then + # For macOS, use lsof + info=$(lsof -P -i:"$port" | grep "LISTEN" || true) fi - - # Print information about ports whose processes are running. - if [[ ${#started[@]} -ne 0 ]]; then - openim::log::info "\n### Started ports:" - for info in "${started[@]}"; do - openim::log::info "$info" - done - fi - - # If any of the processes is not running, return a status of 1. - if [[ ${#not_started[@]} -ne 0 ]]; then - echo "++++ OpenIM Log >> cat ${LOG_FILE}" - return 1 + + # Check if any process is using the port + if [[ -z $info ]]; then + not_started+=($port) else - openim::log::success "All specified processes are running." - return 0 + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + # Extract relevant details for Linux: Process Name, PID, and FD. + details=$(echo $info | sed -n 's/.*users:(("\([^"]*\)",pid=\([^,]*\),fd=\([^)]*\))).*/\1 \2 \3/p') + command=$(echo $details | awk '{print $1}') + pid=$(echo $details | awk '{print $2}') + fd=$(echo $details | awk '{print $3}') + elif [[ "$OSTYPE" == "darwin"* ]]; then + # Handle extraction for macOS + pid=$(echo $info | awk '{print $2}' | cut -d'/' -f1) + command=$(ps -p $pid -o comm= | xargs basename) + fd=$(echo $info | awk '{print $4}' | cut -d'/' -f1) + fi + + # Get the start time of the process using the PID + if [[ -z $pid ]]; then + start_time="N/A" + else + start_time=$(ps -p $pid -o lstart=) + fi + + started+=("Port $port - Command: $command, PID: $pid, FD: $fd, Started: $start_time") fi + done + + # Print information about ports whose processes are not running. + if [[ ${#not_started[@]} -ne 0 ]]; then + openim::log::info "\n### Not started ports:" + for port in "${not_started[@]}"; do + openim::log::error "Port $port is not started." + done + fi + + # Print information about ports whose processes are running. + if [[ ${#started[@]} -ne 0 ]]; then + openim::log::info "\n### Started ports:" + for info in "${started[@]}"; do + openim::log::info "$info" + done + fi + + # If any of the processes is not running, return a status of 1. + if [[ ${#not_started[@]} -ne 0 ]]; then + echo "++++ OpenIM Log >> cat ${LOG_FILE}" + return 1 + else + openim::log::success "All specified processes are running." + return 0 + fi } # set +o errexit @@ -381,75 +381,75 @@ openim::util::check_ports() { # openim::util::check_process_names nginx mysql redis # The function returns a status of 1 if any of the processes is not running. openim::util::check_process_names() { - # Function to get the port of a process - get_port() { - local pid=$1 - if [[ "$OSTYPE" == "linux-gnu"* ]]; then - # Linux - ss -ltnp 2>/dev/null | grep $pid | awk '{print $4}' | cut -d ':' -f2 - elif [[ "$OSTYPE" == "darwin"* ]]; then - # macOS - lsof -nP -iTCP -sTCP:LISTEN -a -p $pid | awk 'NR>1 {print $9}' | sed 's/.*://' - else - echo "Unsupported OS" - return 1 - fi - } - - # Arrays to collect details of processes - local not_started=() - local started=() - - openim::log::info "Checking processes: $*" - # Iterate over each given process name - for process_name in "$@"; do - # Use `pgrep` to find process IDs related to the given process name - local pids=($(pgrep -f $process_name)) - - # Check if any process IDs were found - if [[ ${#pids[@]} -eq 0 ]]; then - not_started+=($process_name) - else - # If there are PIDs, loop through each one - for pid in "${pids[@]}"; do - local command=$(ps -p $pid -o cmd=) - local start_time=$(ps -p $pid -o lstart=) - local port=$(get_port $pid) - - # Check if port information was found for the PID - if [[ -z $port ]]; then - port="N/A" - fi - - started+=("Process $process_name - Command: $command, PID: $pid, Port: $port, Start time: $start_time") - done - fi - done - - # Print information - if [[ ${#not_started[@]} -ne 0 ]]; then - openim::log::info "Not started processes:" - for process_name in "${not_started[@]}"; do - openim::log::error "Process $process_name is not started." - done - fi - - if [[ ${#started[@]} -ne 0 ]]; then - echo - openim::log::info "Started processes:" - for info in "${started[@]}"; do - openim::log::info "$info" - done + # Function to get the port of a process + get_port() { + local pid=$1 + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + # Linux + ss -ltnp 2>/dev/null | grep $pid | awk '{print $4}' | cut -d ':' -f2 + elif [[ "$OSTYPE" == "darwin"* ]]; then + # macOS + lsof -nP -iTCP -sTCP:LISTEN -a -p $pid | awk 'NR>1 {print $9}' | sed 's/.*://' + else + echo "Unsupported OS" + return 1 fi - - # Return status - if [[ ${#not_started[@]} -ne 0 ]]; then - echo "++++ OpenIM Log >> cat ${LOG_FILE}" - return 1 + } + + # Arrays to collect details of processes + local not_started=() + local started=() + + openim::log::info "Checking processes: $*" + # Iterate over each given process name + for process_name in "$@"; do + # Use `pgrep` to find process IDs related to the given process name + local pids=($(pgrep -f $process_name)) + + # Check if any process IDs were found + if [[ ${#pids[@]} -eq 0 ]]; then + not_started+=($process_name) else - openim::log::success "All processes are running." - return 0 + # If there are PIDs, loop through each one + for pid in "${pids[@]}"; do + local command=$(ps -p $pid -o cmd=) + local start_time=$(ps -p $pid -o lstart=) + local port=$(get_port $pid) + + # Check if port information was found for the PID + if [[ -z $port ]]; then + port="N/A" + fi + + started+=("Process $process_name - Command: $command, PID: $pid, Port: $port, Start time: $start_time") + done fi + done + + # Print information + if [[ ${#not_started[@]} -ne 0 ]]; then + openim::log::info "Not started processes:" + for process_name in "${not_started[@]}"; do + openim::log::error "Process $process_name is not started." + done + fi + + if [[ ${#started[@]} -ne 0 ]]; then + echo + openim::log::info "Started processes:" + for info in "${started[@]}"; do + openim::log::info "$info" + done + fi + + # Return status + if [[ ${#not_started[@]} -ne 0 ]]; then + echo "++++ OpenIM Log >> cat ${LOG_FILE}" + return 1 + else + openim::log::success "All processes are running." + return 0 + fi } # openim::util::check_process_names docker-pr @@ -462,30 +462,30 @@ openim::util::check_process_names() { # openim::util::stop_services_on_ports 8080 8081 8082 # The function returns a status of 1 if any service couldn't be stopped. openim::util::stop_services_on_ports() { - # An array to collect ports of processes that couldn't be stopped. - local not_stopped=() - - # An array to collect information about processes that were stopped. - local stopped=() - - openim::log::info "Stopping services on ports: $*" - # Iterate over each given port. - for port in "$@"; do - # Use the `lsof` command to find process information related to the given port. - info=$(lsof -i :$port -n -P | grep LISTEN || true) - - # If there's process information, it means the process associated with the port is running. - if [[ -n $info ]]; then - # Extract the Process ID. - while read -r line; do - local pid=$(echo $line | awk '{print $2}') - - # Try to stop the service by killing its process. - if kill -TERM $pid; then - stopped+=($port) - else - not_stopped+=($port) - fi + # An array to collect ports of processes that couldn't be stopped. + local not_stopped=() + + # An array to collect information about processes that were stopped. + local stopped=() + + openim::log::info "Stopping services on ports: $*" + # Iterate over each given port. + for port in "$@"; do + # Use the `lsof` command to find process information related to the given port. + info=$(lsof -i :$port -n -P | grep LISTEN || true) + + # If there's process information, it means the process associated with the port is running. + if [[ -n $info ]]; then + # Extract the Process ID. + while read -r line; do + local pid=$(echo $line | awk '{print $2}') + + # Try to stop the service by killing its process. + if kill -TERM $pid; then + stopped+=($port) + else + not_stopped+=($port) + fi done <<< "$info" fi done @@ -519,7 +519,7 @@ openim::util::stop_services_on_ports() { # nc -l -p 12345 # nc -l -p 123456 # ps -ef | grep "nc -l" -# openim::util::stop_services_on_ports 1234 12345 +# openim::util::stop_services_on_ports 1234 12345 # The `openim::util::stop_services_with_name` function stops services with specified names. @@ -1086,7 +1086,7 @@ function openim::util::ensure-install-nginx { exit 1 fi - for port in 80 + for port in "80" do if echo |telnet 127.0.0.1 $port 2>&1|grep refused &>/dev/null;then exit 1 @@ -1263,7 +1263,7 @@ function openim:util::setup_ssh_key_copy() { local sshkey_file=~/.ssh/id_rsa.pub - # check sshkey file + # check sshkey file if [[ ! -e $sshkey_file ]]; then expect -c " spawn ssh-keygen -t rsa @@ -1280,7 +1280,7 @@ function openim:util::setup_ssh_key_copy() { # delete history sed -i "/$target/d" ~/.ssh/known_hosts - # copy key + # copy key expect -c " set timeout 100 spawn ssh-copy-id $username@$target @@ -1571,7 +1571,7 @@ openim::util::check_ports() { else start_time=$(ps -p $pid -o lstart=) fi - + started+=("Port $port - Command: $command, PID: $pid, FD: $fd, Started: $start_time") fi done @@ -1639,7 +1639,7 @@ openim::util::check_process_names() { for process_name in "$@"; do # Use `pgrep` to find process IDs related to the given process name local pids=($(pgrep -f $process_name)) - + # Check if any process IDs were found if [[ ${#pids[@]} -eq 0 ]]; then not_started+=($process_name) @@ -1713,7 +1713,7 @@ openim::util::stop_services_on_ports() { # Extract the Process ID. while read -r line; do local pid=$(echo $line | awk '{print $2}') - + # Try to stop the service by killing its process. if kill -TERM $pid; then stopped+=($port) @@ -1753,7 +1753,7 @@ openim::util::stop_services_on_ports() { # nc -l -p 12345 # nc -l -p 123456 # ps -ef | grep "nc -l" -# openim::util::stop_services_on_ports 1234 12345 +# openim::util::stop_services_on_ports 1234 12345 # The `openim::util::stop_services_with_name` function stops services with specified names. @@ -2320,7 +2320,7 @@ function openim::util::ensure-install-nginx { exit 1 fi - for port in 80 + for port in "80" do if echo |telnet 127.0.0.1 $port 2>&1|grep refused &>/dev/null;then exit 1 @@ -2467,7 +2467,7 @@ function openim::util::desc() { } function openim::util:run::prompt() { - echo -n "$yellow\$ $reset" + echo -n "${yellow}\$ ${reset}" } started="" @@ -2488,7 +2488,7 @@ function openim::util::run() { if [ -n "$DEMO_RUN_FAST" ]; then rate=1000 fi - echo "$green$1$reset" | pv -qL $rate + echo "${green}$1${reset}" | pv -qL "$rate" if [ -n "$DEMO_RUN_FAST" ]; then sleep 0.5 fi @@ -2516,7 +2516,7 @@ function openim::util::run::relative() { # This function retrieves the IP address of the current server. # It primarily uses the `curl` command to fetch the public IP address from ifconfig.me. -# If curl or the service is not available, it falls back +# If curl or the service is not available, it falls back # to the internal IP address provided by the hostname command. # TODO: If a delay is found, the delay needs to be addressed function openim::util::get_server_ip() { @@ -2524,7 +2524,7 @@ function openim::util::get_server_ip() { if command -v curl &> /dev/null; then # Try to retrieve the public IP address using curl and ifconfig.me IP=$(dig TXT +short o-o.myaddr.l.google.com @ns1.google.com | sed 's/"//g' | tr -d '\n') - + # Check if IP retrieval was successful if [[ -z "$IP" ]]; then # If not, get the internal IP address @@ -2534,7 +2534,7 @@ function openim::util::get_server_ip() { # If curl is not available, get the internal IP address IP=$(ip addr show | grep 'inet ' | grep -v 127.0.0.1 | awk '{print $2}' | cut -d'/' -f1 | head -n 1) fi - + # Return the fetched IP address echo "$IP" } @@ -2580,7 +2580,7 @@ function openim::util::set_max_fd() { if [ "$desired_fd" = "maximum" ] || [ "$desired_fd" = "max" ]; then desired_fd="$max_fd_limit" fi - + # Check if desired_fd is less than or equal to max_fd_limit. if [ "$desired_fd" -le "$max_fd_limit" ]; then ulimit -n "$desired_fd" @@ -2696,7 +2696,7 @@ function openim::util::run::relative() { # This function retrieves the IP address of the current server. # It primarily uses the `curl` command to fetch the public IP address from ifconfig.me. -# If curl or the service is not available, it falls back +# If curl or the service is not available, it falls back # to the internal IP address provided by the hostname command. # TODO: If a delay is found, the delay needs to be addressed function openim::util::get_server_ip() { @@ -2704,7 +2704,7 @@ function openim::util::get_server_ip() { if command -v curl &> /dev/null; then # Try to retrieve the public IP address using curl and ifconfig.me IP=$(dig TXT +short o-o.myaddr.l.google.com @ns1.google.com | sed 's/"//g' | tr -d '\n') - + # Check if IP retrieval was successful if [[ -z "$IP" ]]; then # If not, get the internal IP address @@ -2714,7 +2714,7 @@ function openim::util::get_server_ip() { # If curl is not available, get the internal IP address IP=$(ip addr show | grep 'inet ' | grep -v 127.0.0.1 | awk '{print $2}' | cut -d'/' -f1 | head -n 1) fi - + # Return the fetched IP address echo "$IP" } @@ -2760,7 +2760,7 @@ function openim::util::set_max_fd() { if [ "$desired_fd" = "maximum" ] || [ "$desired_fd" = "max" ]; then desired_fd="$max_fd_limit" fi - + # Check if desired_fd is less than or equal to max_fd_limit. if [ "$desired_fd" -le "$max_fd_limit" ]; then ulimit -n "$desired_fd" diff --git a/scripts/lib/version.sh b/scripts/lib/version.sh index 04eb89b091..cb47136fb2 100755 --- a/scripts/lib/version.sh +++ b/scripts/lib/version.sh @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - + # ----------------------------------------------------------------------------- # Version management helpers. These functions help to set, save and load the # following variables: @@ -35,7 +35,7 @@ openim::version::get_version_vars() { openim::version::load_version_vars "${OPENIM_GIT_VERSION_FILE}" return fi - + # If the iamrnetes source was exported through git archive, then # we likely don't have a git tree, but these magic values may be filled in. # shellcheck disable=SC2016,SC2050 @@ -48,12 +48,12 @@ openim::version::get_version_vars() { # something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: ' # can be extracted from it. if [[ '$Format:%D$' =~ tag:\ (v[^ ,]+) ]]; then - OPENIM_GIT_VERSION="${BASH_REMATCH[1]}" + OPENIM_GIT_VERSION="${BASH_REMATCH[1]}" fi fi - + local git=(git --work-tree "${OPENIM_ROOT}") - + if [[ -n ${OPENIM_GIT_COMMIT-} ]] || OPENIM_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then if [[ -z ${OPENIM_GIT_TREE_STATE-} ]]; then # Check if the tree is dirty. default to dirty @@ -63,7 +63,7 @@ openim::version::get_version_vars() { OPENIM_GIT_TREE_STATE="dirty" fi fi - + # Use git describe to find the version based on tags. if [[ -n ${OPENIM_GIT_VERSION-} ]] || OPENIM_GIT_VERSION=$("${git[@]}" describe --tags --always --match='v*' 2>/dev/null); then # This translates the "git describe" to an actual semver.org @@ -81,7 +81,7 @@ openim::version::get_version_vars() { # shellcheck disable=SC2001 # We have distance to subversion (v1.1.0-subversion-1-gCommitHash) OPENIM_GIT_VERSION=$(echo "${OPENIM_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/") - elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then + elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then # shellcheck disable=SC2001 # We have distance to base tag (v1.1.0-1-gCommitHash) OPENIM_GIT_VERSION=$(echo "${OPENIM_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/") @@ -94,7 +94,7 @@ openim::version::get_version_vars() { #OPENIM_GIT_VERSION+="-dirty" : fi - + # Try to match the "git describe" output to a regex to try to extract # the "major" and "minor" versions and whether this is the exact tagged # version or whether the tree is between two tagged versions. @@ -105,12 +105,12 @@ openim::version::get_version_vars() { OPENIM_GIT_MINOR+="+" fi fi - + # If OPENIM_GIT_VERSION is not a valid Semantic Version, then refuse to build. if ! [[ "${OPENIM_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then - echo "OPENIM_GIT_VERSION should be a valid Semantic Version. Current value: ${OPENIM_GIT_VERSION}" - echo "Please see more details here: https://semver.org" - exit 1 + echo "OPENIM_GIT_VERSION should be a valid Semantic Version. Current value: ${OPENIM_GIT_VERSION}" + echo "Please see more details here: https://semver.org" + exit 1 fi fi fi @@ -123,7 +123,7 @@ openim::version::save_version_vars() { echo "!!! Internal error. No file specified in openim::version::save_version_vars" return 1 } - + cat <"${version_file}" OPENIM_GIT_COMMIT='${OPENIM_GIT_COMMIT-}' OPENIM_GIT_TREE_STATE='${OPENIM_GIT_TREE_STATE-}' @@ -140,6 +140,6 @@ openim::version::load_version_vars() { echo "!!! Internal error. No file specified in openim::version::load_version_vars" return 1 } - + source "${version_file}" } diff --git a/scripts/make-rules/common.mk b/scripts/make-rules/common.mk index 5fda9b927c..f8537b6cab 100644 --- a/scripts/make-rules/common.mk +++ b/scripts/make-rules/common.mk @@ -126,7 +126,7 @@ APIROOT=$(ROOT_DIR)/pkg/proto # Linux command settings # TODO: Whether you need to join utils? -FIND := find . ! -path './utils/*' ! -path './vendor/*' ! -path './third_party/*' +FIND := find . ! -path './utils/*' ! -path './vendor/*' ! -path './third_party/*' ! -path './components/*' ! -path './logs/*' XARGS := xargs -r --no-run-if-empty # Linux command settings-CODE DIRS Copyright diff --git a/scripts/make-rules/golang.mk b/scripts/make-rules/golang.mk index 44918d01c3..915639b61c 100644 --- a/scripts/make-rules/golang.mk +++ b/scripts/make-rules/golang.mk @@ -244,7 +244,7 @@ go.imports: tools.verify.goimports ## go.verify: execute all verity scripts. .PHONY: go.verify -go.verify: +go.verify: tools.verify.misspell @echo "Starting verification..." @scripts_list=$$(find $(ROOT_DIR)/scripts -type f -name 'verify-*' | sort); \ for script in $$scripts_list; do \ diff --git a/scripts/make-rules/tools.mk b/scripts/make-rules/tools.mk index 5f076d6e71..5d39258eae 100644 --- a/scripts/make-rules/tools.mk +++ b/scripts/make-rules/tools.mk @@ -217,6 +217,11 @@ install.depth: install.go-callvis: @$(GO) install github.com/ofabry/go-callvis@latest +## install.misspell +.PHONY: install.misspell +install.misspell: + @$(GO) install github.com/client9/misspell/cmd/misspell@latest + ## install.gothanks: Install gothanks, used to thank go dependencies .PHONY: install.gothanks install.gothanks: diff --git a/scripts/release.sh b/scripts/release.sh index 042a6c3a0d..4984b21663 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -14,9 +14,9 @@ # limitations under the License. # Description: -# This script automates the process of building and releasing OpenIM, +# This script automates the process of building and releasing OpenIM, # including tasks like setting up the environment, verifying prerequisites, -# building commands, packaging tarballs, uploading tarballs, creating GitHub +# building commands, packaging tarballs, uploading tarballs, creating GitHub # releases, and generating changelogs. # # Usage: @@ -35,12 +35,12 @@ # This script can also be executed via the 'make release' command as an alternative. # # Dependencies: -# This script depends on external scripts found in the 'scripts' directory and -# assumes the presence of necessary tools and permissions for building and +# This script depends on external scripts found in the 'scripts' directory and +# assumes the presence of necessary tools and permissions for building and # releasing software. # # Note: -# The script uses standard bash script practices with error handling, +# The script uses standard bash script practices with error handling, # and it defaults to executing all steps if no specific option is provided. # # Build a OpenIM release. This will build the binaries, create the Docker @@ -58,17 +58,17 @@ OPENIM_RELEASE_RUN_TESTS=${OPENIM_RELEASE_RUN_TESTS-y} # Function to show help message show_help() { - echo "Usage: $(basename $0) [options]" - echo "Options:" - echo " -h, --help Show this help message" - echo " -se, --setup-env Execute setup environment" - echo " -vp, --verify-prereqs Execute prerequisites verification" - echo " -bc, --build-command Execute build command" - echo " -bi, --build-image Execute build image (default: not executed)" - echo " -pt, --package-tarballs Execute package tarballs" - echo " -ut, --upload-tarballs Execute upload tarballs" - echo " -gr, --github-release Execute GitHub release" - echo " -gc, --generate-changelog Execute generate changelog" + echo "Usage: $(basename $0) [options]" + echo "Options:" + echo " -h, --help Show this help message" + echo " -se, --setup-env Execute setup environment" + echo " -vp, --verify-prereqs Execute prerequisites verification" + echo " -bc, --build-command Execute build command" + echo " -bi, --build-image Execute build image (default: not executed)" + echo " -pt, --package-tarballs Execute package tarballs" + echo " -ut, --upload-tarballs Execute upload tarballs" + echo " -gr, --github-release Execute GitHub release" + echo " -gc, --generate-changelog Execute generate changelog" } # Initialize all actions to false @@ -83,57 +83,57 @@ perform_generate_changelog=false # Process command-line arguments while getopts "hsevpbciptutgrgc-" opt; do - case "${opt}" in - h) show_help; exit 0 ;; - se) perform_setup_env=true ;; - vp) perform_verify_prereqs=true ;; - bc) perform_build_command=true ;; - bi) perform_build_image=true ;; # Handling new option - pt) perform_package_tarballs=true ;; - ut) perform_upload_tarballs=true ;; - gr) perform_github_release=true ;; - gc) perform_generate_changelog=true ;; - --) case "${OPTARG}" in - help) show_help; exit 0 ;; - setup-env) perform_setup_env=true ;; - verify-prereqs) perform_verify_prereqs=true ;; - build-command) perform_build_command=true ;; - build-image) perform_build_image=true ;; # Handling new long option - package-tarballs) perform_package_tarballs=true ;; - upload-tarballs) perform_upload_tarballs=true ;; - github-release) perform_github_release=true ;; - generate-changelog) perform_generate_changelog=true ;; - *) echo "Invalid option: --${OPTARG}"; show_help; exit 1 ;; - esac ;; - *) show_help; exit 1 ;; - esac + case "${opt}" in + h) show_help; exit 0 ;; + se) perform_setup_env=true ;; + vp) perform_verify_prereqs=true ;; + bc) perform_build_command=true ;; + bi) perform_build_image=true ;; # Handling new option + pt) perform_package_tarballs=true ;; + ut) perform_upload_tarballs=true ;; + gr) perform_github_release=true ;; + gc) perform_generate_changelog=true ;; + --) case "${OPTARG}" in + help) show_help; exit 0 ;; + setup-env) perform_setup_env=true ;; + verify-prereqs) perform_verify_prereqs=true ;; + build-command) perform_build_command=true ;; + build-image) perform_build_image=true ;; # Handling new long option + package-tarballs) perform_package_tarballs=true ;; + upload-tarballs) perform_upload_tarballs=true ;; + github-release) perform_github_release=true ;; + generate-changelog) perform_generate_changelog=true ;; + *) echo "Invalid option: --${OPTARG}"; show_help; exit 1 ;; + esac ;; + *) show_help; exit 1 ;; + esac done # Enable all actions by default if no options are provided if [ "$#" -eq 0 ]; then - perform_setup_env=true - perform_verify_prereqs=true - perform_build_command=true - perform_package_tarballs=true - perform_upload_tarballs=true - perform_github_release=true - perform_generate_changelog=true - # TODO: Not enabling build_image by default - # perform_build_image=true + perform_setup_env=true + perform_verify_prereqs=true + perform_build_command=true + perform_package_tarballs=true + perform_upload_tarballs=true + perform_github_release=true + perform_generate_changelog=true + # TODO: Not enabling build_image by default + # perform_build_image=true fi # Function to perform actions perform_action() { - local flag=$1 - local message=$2 - local command=$3 - - if [ "$flag" == true ]; then - openim::log::info "## $message..." - if ! $command; then - openim::log::errexit "Error in $message" - fi + local flag=$1 + local message=$2 + local command=$3 + + if [ "$flag" == true ]; then + openim::log::info "## $message..." + if ! $command; then + openim::log::errexit "Error in $message" fi + fi } echo "Starting script execution..." diff --git a/scripts/start-all.sh b/scripts/start-all.sh index b1f9b865ba..5f34cbdbee 100755 --- a/scripts/start-all.sh +++ b/scripts/start-all.sh @@ -39,34 +39,34 @@ openim::log::install_errexit # Function to execute the scripts. function execute_scripts() { - for script_path in "${OPENIM_SERVER_SCRIPT_START_LIST[@]}"; do - # Extract the script name without extension for argument generation. - script_name_with_prefix=$(basename "$script_path" .sh) - - # Remove the "openim-" prefix. - script_name=${script_name_with_prefix#openim-} - - # Construct the argument based on the script name. - arg="openim::${script_name}::start" - - # Check if the script file exists and is executable. - if [[ -x "$script_path" ]]; then - openim::log::status "Starting script: ${script_path##*/}" # Log the script name. - - # Execute the script with the constructed argument. - "$script_path" "$arg" - - # Check if the script executed successfully. - if [[ $? -eq 0 ]]; then - openim::log::info "${script_path##*/} executed successfully." - else - openim::log::errexit "Error executing ${script_path##*/}." - fi - else - openim::log::errexit "Script ${script_path##*/} is missing or not executable." - fi - done - sleep 0.5 + for script_path in "${OPENIM_SERVER_SCRIPT_START_LIST[@]}"; do + # Extract the script name without extension for argument generation. + script_name_with_prefix=$(basename "$script_path" .sh) + + # Remove the "openim-" prefix. + script_name=${script_name_with_prefix#openim-} + + # Construct the argument based on the script name. + arg="openim::${script_name}::start" + + # Check if the script file exists and is executable. + if [[ -x "$script_path" ]]; then + openim::log::status "Starting script: ${script_path##*/}" # Log the script name. + + # Execute the script with the constructed argument. + "$script_path" "$arg" + + # Check if the script executed successfully. + if [[ $? -eq 0 ]]; then + openim::log::info "${script_path##*/} executed successfully." + else + openim::log::errexit "Error executing ${script_path##*/}." + fi + else + openim::log::errexit "Script ${script_path##*/} is missing or not executable." + fi + done + sleep 0.5 } diff --git a/scripts/stop-all.sh b/scripts/stop-all.sh index 692d2d1e4d..1d2eddd78b 100755 --- a/scripts/stop-all.sh +++ b/scripts/stop-all.sh @@ -14,7 +14,7 @@ # limitations under the License. # This script is stop all openim service -# +# # Usage: `scripts/stop.sh`. # Encapsulated as: `make stop`. @@ -38,8 +38,8 @@ openim::util::stop_services_with_name "${OPENIM_OUTPUT_HOSTBIN}" echo -n "Stopping services 15 seconds." for i in {1..15}; do - echo -n "." - sleep 1 + echo -n "." + sleep 1 done echo -e "\nServices stopped." diff --git a/scripts/update-generated-docs.sh b/scripts/update-generated-docs.sh index 93f0f86c16..c37c4a1f9e 100755 --- a/scripts/update-generated-docs.sh +++ b/scripts/update-generated-docs.sh @@ -28,10 +28,10 @@ source "${OPENIM_ROOT}/scripts/lib/init.sh" openim::golang::setup_env BINS=( - gendocs - genopenimdocs - genman - genyaml + gendocs + genopenimdocs + genman + genyaml ) make -C "${OPENIM_ROOT}" WHAT="${BINS[*]}" diff --git a/scripts/verify-shellcheck.sh b/scripts/verify-shellcheck.sh index de860115e7..8a5ad7321e 100755 --- a/scripts/verify-shellcheck.sh +++ b/scripts/verify-shellcheck.sh @@ -43,6 +43,54 @@ disabled=( 1091 # this lint prefers command -v to which, they are not the same 2230 + # Error SC2155 indicates that you should separate variable declaration and assignment to avoid masking the return value of the command. + # In Bash scripts, when you declare and assign a local variable at the same time a command is executed, you only get the output of the command, but not the exit status (return value) of the command. # + 2155 + # ShellCheck issues SC2086 warnings when you refer to a variable in a script but don't put it in double quotes.This can lead to unexpected behavior when scripts encounter Spaces, + # newlines, and wildcards in file names or other data. + 2086 + 2206 + + # TODO: 需要修复,然后开启 + 2034 + 2048 + 2148 + 2059 + 2214 + 2145 + 2128 + 2550 + 2046 + 2181 + 1102 + 2045 + 2068 + 2145 + 2207 + 2231 + 2013 + 2154 + 2120 + 1083 + 2001 + 2012 + 2016 + 2164 + 2223 + 2166 + 2119 + 2162 + 2295 + 2002 + 2004 + 2202 + 2178 + 2064 + 2260 + 2043 + 2178 + 2044 + 2153 ) # comma separate for passing to shellcheck join_by() { @@ -65,14 +113,18 @@ cd "${OPENIM_ROOT}" # forked should be linted and fixed. all_shell_scripts=() while IFS=$'\n' read -r script; - do git check-ignore -q "$script" || all_shell_scripts+=("$script"); -done < <(find . -name "*.sh" \ +do git check-ignore -q "$script" || all_shell_scripts+=("$script"); + done < <(find . -name "*.sh" \ -not \( \ - -path ./_\* -o \ - -path ./.git\* -o \ - -path ./vendor\* -o \ - \( -path ./third_party\* -a -not -path ./third_party/forked\* \) \ - \)) + -path ./_\* -o \ + -path ./.git\* -o \ + -path ./Godeps\* -o \ + -path ./_output\* -o \ + -path ./components\* -o \ + -path ./logs\* -o \ + -path ./vendor\* -o \ + \( -path ./third_party\* -a -not -path ./third_party/forked\* \) \ +\) -print 2>/dev/null) # detect if the host machine has the required shellcheck version installed # if so, we will use that instead. @@ -113,8 +165,8 @@ if ${HAVE_SHELLCHECK}; then else openim::log::info "Using shellcheck ${SHELLCHECK_VERSION} docker image." "${DOCKER}" run \ - --rm -v "${OPENIM_ROOT}:"${OPENIM_ROOT}"" -w "${OPENIM_ROOT}" \ - "${SHELLCHECK_IMAGE}" \ + --rm -v "${OPENIM_ROOT}:${OPENIM_ROOT}" -w "${OPENIM_ROOT}" \ + "${SHELLCHECK_IMAGE}" \ shellcheck "${SHELLCHECK_OPTIONS[@]}" "${all_shell_scripts[@]}" >&2 || res=$? fi diff --git a/scripts/verify-spelling.sh b/scripts/verify-spelling.sh index 487c68cdee..2c02dccf7d 100755 --- a/scripts/verify-spelling.sh +++ b/scripts/verify-spelling.sh @@ -25,17 +25,8 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. export OPENIM_ROOT source "${OPENIM_ROOT}/scripts/lib/init.sh" -# Ensure that we find the binaries we build before anything else. -export GOBIN="${KUBE_OUTPUT_BINPATH}" -PATH="${GOBIN}:${PATH}" - -# Install tools we need -pushd "${OPENIM_ROOT}/tools" >/dev/null - GO111MODULE=on go install github.com/client9/misspell/cmd/misspell -popd >/dev/null - # Spell checking # All the skipping files are defined in scripts/.spelling_failures skipping_file="${OPENIM_ROOT}/scripts/.spelling_failures" failing_packages=$(sed "s| | -e |g" "${skipping_file}") -git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr +git ls-files | grep -v -e "${failing_packages}" | xargs "$OPENIM_ROOT/_output/tools/misspell" -i "Creater,creater,ect" -error -o stderr diff --git a/scripts/verify-typecheck.sh b/scripts/verify-typecheck.sh index a0b8181354..62fca40491 100755 --- a/scripts/verify-typecheck.sh +++ b/scripts/verify-typecheck.sh @@ -33,7 +33,7 @@ cd "${OPENIM_ROOT}" ret=0 TYPECHECK_SERIAL="${TYPECHECK_SERIAL:-false}" scripts/run-in-gopath.sh \ - go run test/typecheck/typecheck.go "$@" "--serial=$TYPECHECK_SERIAL" || ret=$? +go run test/typecheck/typecheck.go "$@" "--serial=$TYPECHECK_SERIAL" || ret=$? if [[ $ret -ne 0 ]]; then openim::log::error "Type Check has failed. This may cause cross platform build failures." >&2 openim::log::error "Please see https://github.com/openimsdk/open-im-server/tree/main/test/typecheck for more information." >&2 diff --git a/scripts/wait-for-it.sh b/scripts/wait-for-it.sh index 99a36affe2..c05b856782 100755 --- a/scripts/wait-for-it.sh +++ b/scripts/wait-for-it.sh @@ -30,119 +30,119 @@ Usage: Timeout in seconds, zero for no timeout -- COMMAND ARGS Execute command with args after the test finishes USAGE - exit 1 + exit 1 } wait_for() { - if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then - echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? else - echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? fi - WAITFORIT_start_ts=$(date +%s) - while : - do - if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then - nc -z $WAITFORIT_HOST $WAITFORIT_PORT - WAITFORIT_result=$? - else - (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 - WAITFORIT_result=$? - fi - if [[ $WAITFORIT_result -eq 0 ]]; then - WAITFORIT_end_ts=$(date +%s) - echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" - break - fi - sleep 1 - done - return $WAITFORIT_result + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result } wait_for_wrapper() { - # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ $WAITFORIT_QUIET -eq 1 ]]; then - timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & - else - timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & - fi - WAITFORIT_PID=$! - trap "kill -INT -$WAITFORIT_PID" INT - wait $WAITFORIT_PID - WAITFORIT_RESULT=$? - if [[ $WAITFORIT_RESULT -ne 0 ]]; then - echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" - fi - return $WAITFORIT_RESULT + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT } # process arguments while [[ $# -gt 0 ]] do - case "$1" in - *:* ) - WAITFORIT_hostport=(${1//:/ }) - WAITFORIT_HOST=${WAITFORIT_hostport[0]} - WAITFORIT_PORT=${WAITFORIT_hostport[1]} - shift 1 - ;; - --child) - WAITFORIT_CHILD=1 - shift 1 - ;; - -q | --quiet) - WAITFORIT_QUIET=1 - shift 1 - ;; - -s | --strict) - WAITFORIT_STRICT=1 - shift 1 - ;; - -h) - WAITFORIT_HOST="$2" - if [[ $WAITFORIT_HOST == "" ]]; then break; fi - shift 2 - ;; - --host=*) - WAITFORIT_HOST="${1#*=}" - shift 1 - ;; - -p) - WAITFORIT_PORT="$2" - if [[ $WAITFORIT_PORT == "" ]]; then break; fi - shift 2 - ;; - --port=*) - WAITFORIT_PORT="${1#*=}" - shift 1 - ;; - -t) - WAITFORIT_TIMEOUT="$2" - if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi - shift 2 - ;; - --timeout=*) - WAITFORIT_TIMEOUT="${1#*=}" - shift 1 - ;; - --) - shift - WAITFORIT_CLI=("$@") - break - ;; - --help) - usage - ;; - *) - echoerr "Unknown argument: $1" - usage - ;; - esac + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac done if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then - echoerr "Error: you need to provide a host and port to test." - usage + echoerr "Error: you need to provide a host and port to test." + usage fi WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} @@ -156,36 +156,36 @@ WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlin WAITFORIT_BUSYTIMEFLAG="" if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then - WAITFORIT_ISBUSY=1 - # Check if busybox timeout uses -t flag - # (recent Alpine versions don't support -t anymore) - if timeout &>/dev/stdout | grep -q -e '-t '; then - WAITFORIT_BUSYTIMEFLAG="-t" - fi + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi else - WAITFORIT_ISBUSY=0 + WAITFORIT_ISBUSY=0 fi if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else wait_for WAITFORIT_RESULT=$? - exit $WAITFORIT_RESULT -else - if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then - wait_for_wrapper - WAITFORIT_RESULT=$? - else - wait_for - WAITFORIT_RESULT=$? - fi + fi fi if [[ $WAITFORIT_CLI != "" ]]; then - if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then - echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" - exit $WAITFORIT_RESULT - fi - exec "${WAITFORIT_CLI[@]}" -else + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT fi diff --git a/test/typecheck/README.md b/test/typecheck/README.md index 6ba462ec94..e5b76d4c69 100644 --- a/test/typecheck/README.md +++ b/test/typecheck/README.md @@ -1,27 +1,52 @@ -# OpenIM Typecheck +# OpenIM Typecheck: Cross-Platform Source Code Type Checking for Go -OpenIM Typecheck 为所有 Go 构建平台进行跨平台源代码类型检查。 +## Introduction -## 优点 +OpenIM Typecheck is a robust tool designed for cross-platform source code type checking across all Go build platforms. This utility leverages Go’s built-in parsing and type-check libraries (`go/parser` and `go/types`) to deliver efficient and reliable code analysis. -- **速度**:OpenIM 完整编译大约需要 3 分钟,而使用 Typecheck 只需数秒。 -- **资源消耗**:与需要 >40GB 的 RAM 不同,Typecheck 只需 <8GB 的 RAM。 +## Advantages -## 实现 +- **Speed**: A complete compilation with OpenIM can take approximately 3 minutes. In contrast, OpenIM Typecheck achieves this in mere seconds, significantly enhancing productivity. +- **Resource Efficiency**: Unlike the typical requirement of over 40GB of RAM for standard processes, Typecheck operates effectively with less than 8GB of RAM. This reduction in resource consumption makes it highly suitable for a variety of systems, reducing overheads and facilitating smoother operations. -OpenIM Typecheck 使用 Go 内置的解析和类型检查库 (`go/parser` 和 `go/types`)。然而,这些库并不是 go 编译器所使用的。偶尔会出现不匹配的情况,但总的来说,它们是相当接近的。 +## Implementation -## 错误处理 +OpenIM Typecheck employs Go's native parsing and type-checking libraries (`go/parser` and `go/types`). However, it's important to note that these libraries aren't identical to those used by the Go compiler. While occasional mismatches may occur, these libraries generally provide close approximations to the compiler's functionality, offering a reliable basis for type checking. -如果错误不会阻止构建,可以忽略。 +## Error Handling -**`go/types` 报告的错误,但 `go build` 不会**: -- **真正的错误**(根据规范): - - 应尽量修复。如果无法修复或正在进行中(例如,已被外部引用的代码),则可以忽略。 - - 例如:闭包中的未使用变量 -- **不真实的错误**: - - 应忽略并在适当的情况下向上游报告。 - - 例如:staging 和 generated 类型之间的类型检查不匹配 +Typecheck's approach to error handling is pragmatic, focusing on practicality and build continuity. -**`go build` 报告的错误,但我们不会**: -- CGo 错误,包括语法和链接器错误。 +**Errors reported by `go/types` but not by `go build`**: +- **Actual Errors** (as per the specification): + - These should ideally be rectified. If rectification is not feasible, such as in cases of ongoing work or external dependencies in the code, these errors can be overlooked. + - Example: Unused variables within a closure. +- **False Positives**: + - These errors should be ignored and, where appropriate, reported upstream for resolution. + - Example: Type mismatches between staging and generated types. + +**Errors reported by `go build` but not by us**: +- CGo-related errors, including both syntax and linker issues, are outside our scope. + +## Usage + +### Locally + +To run Typecheck locally, simply use the following command: + +```bash +make verify +``` + +### Continuous Integration (CI) + +In CI environments, Typecheck can be integrated into the workflow as follows: + +```yaml +- name: Typecheck + run: make verify +``` + +This streamlined process facilitates efficient error detection and resolution, ensuring a robust and reliable build pipeline. + +More to learn about typecheck [share blog](https://nsddd.top/posts/concurrent-type-checking-and-cross-platform-development-in-go/) \ No newline at end of file diff --git a/test/typecheck/typecheck.go b/test/typecheck/typecheck.go index 8f353881b7..975ce988da 100644 --- a/test/typecheck/typecheck.go +++ b/test/typecheck/typecheck.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// do a fast type check of kubernetes code, for all platforms. +// do a fast type check of openim code, for all platforms. package main import ( @@ -47,14 +47,12 @@ var ( crossPlatforms = []string{ "linux/amd64", "windows/386", "darwin/amd64", "darwin/arm64", - "linux/386", + "linux/386", "linux/arm", "windows/amd64", "linux/arm64", "linux/ppc64le", "linux/s390x", "windows/arm64", } - // "linux/arm", - // directories we always ignore standardIgnoreDirs = []string{ // Staging code is symlinked from vendor/k8s.io, and uses import @@ -62,17 +60,14 @@ var ( // inside of staging/, but works when typechecked as part of vendor/. "staging", "components", + "logs", // OS-specific vendor code tends to be imported by OS-specific // packages. We recursively typecheck imported vendored packages for // each OS, but don't typecheck everything for every OS. "vendor", + "test", "_output", - "OpenIMSKD/tools", - // This is a weird one. /testdata/ is *mostly* ignored by Go, - // and this translates to kubernetes/vendor not working. - // edit/record.go doesn't compile without gopkg.in/yaml.v2 - // in $GOSRC/$GOROOT (both typecheck and the shell script). - "pkg/kubectl/cmd/testdata/edit", + "*/mw/rpc_server_interceptor.go", // Tools we use for maintaining the code base but not necessarily // ship as part of the release "sopenim::golang::setup_env:tools/yamlfmt/yamlfmt.go:tools", diff --git a/test/wrktest.sh b/test/wrktest.sh index a97ebf0439..10a41121f5 100755 --- a/test/wrktest.sh +++ b/test/wrktest.sh @@ -34,7 +34,7 @@ openim_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" wrkdir="${openim_root}/_output/wrk" jobname="openim-api" duration="300s" -threads=$((3 * `grep -c processor /proc/cpuinfo`)) +threads=$((3 * $(grep -c processor /proc/cpuinfo))) source "${openim_root}/scripts/lib/color.sh" @@ -122,7 +122,7 @@ if (s ~ "s") { # Remove existing data file function openim::wrk::prepare() { - rm -f ${wrkdir}/${datfile} + rm -f "${wrkdir}"/"${datfile}" } # Plot according to gunplot data file @@ -216,7 +216,7 @@ openim::wrk::start_performance_test() { do wrkcmd="${cmd} -c ${c} $1" echo "Running wrk command: ${wrkcmd}" - result=`eval ${wrkcmd}` + result=$(eval "${wrkcmd}") openim::wrk::convert_plot_data "${result}" done @@ -241,9 +241,10 @@ while getopts "hd:n:" opt;do esac done -shift $(($OPTIND-1)) +shift $((OPTIND-1)) + +mkdir -p "${wrkdir}" -mkdir -p ${wrkdir} case $1 in "diff") if [ "$#" -lt 3 ];then @@ -255,7 +256,7 @@ case $1 in t2=$(basename $3|sed 's/.dat//g') # 对比图中粉色线条名称 join $2 $3 > /tmp/plot_diff.dat - openim::wrk::plot_diff `basename $2` `basename $3` + openim::wrk::plot_diff "$(basename "$2")" "$(basename "$3")" exit 0 ;; *) diff --git a/tools/data-conversion/README.md b/tools/data-conversion/README.md index 22126e4d86..71387af7fe 100644 --- a/tools/data-conversion/README.md +++ b/tools/data-conversion/README.md @@ -31,7 +31,7 @@ var ( usernameV3 = "root" passwordV3 = "openIM123" addrV3 = "127.0.0.1:13306" - databaseV3 = "openIM_v3" + databaseV3 = "openim_v3" ) ``` diff --git a/tools/data-conversion/openim/cmd/conversion-mysql/conversion-mysql.go b/tools/data-conversion/openim/cmd/conversion-mysql/conversion-mysql.go index 8992e12c4d..8a951e16f9 100644 --- a/tools/data-conversion/openim/cmd/conversion-mysql/conversion-mysql.go +++ b/tools/data-conversion/openim/cmd/conversion-mysql/conversion-mysql.go @@ -38,7 +38,7 @@ func main() { usernameV3 = "root" // v3版本mysql用户名 passwordV3 = "openIM123" // v3版本mysql密码 addrV3 = "127.0.0.1:13306" // v3版本mysql地址 - databaseV3 = "openIM_v3" // v3版本mysql数据库名字 + databaseV3 = "openim_v3" // v3版本mysql数据库名字 ) var concurrency = 1 // 并发数量 diff --git a/tools/data-conversion/openim/common/config.go b/tools/data-conversion/openim/common/config.go index e2bd14a051..e993038d13 100644 --- a/tools/data-conversion/openim/common/config.go +++ b/tools/data-conversion/openim/common/config.go @@ -44,7 +44,7 @@ const ( UsernameV3 = "root" PasswordV3 = "openIM123" IpV3 = "43.134.63.160:13306" - DatabaseV3 = "openIM_v3" + DatabaseV3 = "openim_v3" ) // V3 chat. diff --git a/tools/data-conversion/openim/mysql/cmd.go b/tools/data-conversion/openim/mysql/cmd.go index f5a23facb5..ab3857fba7 100644 --- a/tools/data-conversion/openim/mysql/cmd.go +++ b/tools/data-conversion/openim/mysql/cmd.go @@ -38,7 +38,7 @@ func Cmd() { usernameV3 = "root" passwordV3 = "openIM123" addrV3 = "203.56.175.233:13306" - databaseV3 = "openIM_v3" + databaseV3 = "openim_v3" ) log.SetFlags(log.LstdFlags | log.Llongfile) dsnV2 := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local", usernameV2, passwordV2, addrV2, databaseV2) diff --git a/tools/formitychecker/checker/checker.go b/tools/formitychecker/checker/checker.go index 187b509bc1..7a16433589 100644 --- a/tools/formitychecker/checker/checker.go +++ b/tools/formitychecker/checker/checker.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package checker import ( diff --git a/tools/formitychecker/config/config.go b/tools/formitychecker/config/config.go index 95bcee3469..0c4f6a16b8 100644 --- a/tools/formitychecker/config/config.go +++ b/tools/formitychecker/config/config.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package config import ( diff --git a/tools/formitychecker/formitychecker.go b/tools/formitychecker/formitychecker.go index d078009b66..2bedbfb32d 100644 --- a/tools/formitychecker/formitychecker.go +++ b/tools/formitychecker/formitychecker.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package main import ( diff --git a/tools/imctl/.gitignore b/tools/imctl/.gitignore index a2e773394e..72ff17ca90 100644 --- a/tools/imctl/.gitignore +++ b/tools/imctl/.gitignore @@ -36,20 +36,6 @@ config/config.yaml .env ./.env -### OpenIM deploy ### -deploy/openim_demo -deploy/openim-api -deploy/openim-rpc-msg_gateway -deploy/openim-msgtransfer -deploy/openim-push -deploy/openim_timer_task -deploy/openim-rpc-user -deploy/openim-rpc-friend -deploy/openim-rpc-group -deploy/openim-rpc-msg -deploy/openim-rpc-auth -deploy/Open-IM-SDK-Core - # files used by the developer .idea.md .todo.md diff --git a/tools/imctl/imctl.go b/tools/imctl/imctl.go index 1a501cafc1..91161326ea 100644 --- a/tools/imctl/imctl.go +++ b/tools/imctl/imctl.go @@ -1,3 +1,17 @@ +// Copyright © 2024 OpenIM. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package main import "fmt"