diff --git a/.github/workflows/cla-assistant.yml b/.github/workflows/cla-assistant.yml index 71bdb67992..7d44b05eb4 100644 --- a/.github/workflows/cla-assistant.yml +++ b/.github/workflows/cla-assistant.yml @@ -33,8 +33,8 @@ jobs: remote-repository-name: cla create-file-commit-message: 'Creating file for storing CLA Signatures' # signed-commit-message: '$contributorName has signed the CLA in $owner/$repo#$pullRequestNo' - custom-notsigned-prcomment: 'πŸ’• Thank you for your contribution and please kindly read and sign our [CLA Docs](https://github.com/OpenIM-Robot/cla/blob/main/README.md)' - custom-pr-sign-comment: 'The signature to be committed in order to sign the CLA' - custom-allsigned-prcomment: 'πŸ€– All Contributors have signed the [CLA](https://github.com/OpenIM-Robot/cla/blob/main/README.md).
The signed information is recorded [πŸ€–here](https://github.com/openim-sigs/cla/tree/main/signatures/cla.json)' + custom-notsigned-prcomment: 'πŸ’• Thank you for your contribution and please kindly read and sign our CLA. [CLA Docs](https://github.com/OpenIM-Robot/cla/blob/main/README.md)' + custom-pr-sign-comment: 'I have read the CLA Document and I hereby sign the CLA' + custom-allsigned-prcomment: 'πŸ€– All Contributors have signed the [CLA](https://github.com/OpenIM-Robot/cla/blob/main/README.md).
The signed information is recorded [**here**](https://github.com/OpenIM-Robot/cla/blob/main/signatures/cla.json)' #lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true) #use-dco-flag: true - If you are using DCO instead of CLA diff --git a/.github/workflows/go-build-test.yml b/.github/workflows/go-build-test.yml index 14546f4f45..5341c919d6 100644 --- a/.github/workflows/go-build-test.yml +++ b/.github/workflows/go-build-test.yml @@ -7,6 +7,9 @@ on: pull_request: branches: - main + paths-ignore: + - '**/*.md' + workflow_dispatch: jobs: diff --git a/.github/workflows/help-comment-issue.yml b/.github/workflows/help-comment-issue.yml index c4e72ffc67..b1cc621828 100644 --- a/.github/workflows/help-comment-issue.yml +++ b/.github/workflows/help-comment-issue.yml @@ -29,7 +29,7 @@ jobs: uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.issue.number }} - token: ${{ secrets.BOT_GITHUB_TOKEN }} + token: ${{ secrets.BOT_TOKEN }} body: | This issue is available for anyone to work on. **Make sure to reference this issue in your pull request.** :sparkles: Thank you for your contribution! :sparkles: [Join slack πŸ€–](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to connect and communicate with our developers. diff --git a/.github/workflows/issue-translator.yml b/.github/workflows/issue-translator.yml new file mode 100644 index 0000000000..6a8528ae62 --- /dev/null +++ b/.github/workflows/issue-translator.yml @@ -0,0 +1,19 @@ +name: 'issue-translator' +on: + issue_comment: + types: [created] + issues: + types: [opened] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: usthe/issues-translate-action@v2.7 + with: + BOT_GITHUB_TOKEN: ${{ secrets.BOT_TOKEN }} + IS_MODIFY_TITLE: true + # not require, default false, . Decide whether to modify the issue title + # if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot. + CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. πŸ‘―πŸ‘­πŸ»πŸ§‘β€πŸ€β€πŸ§‘πŸ‘«πŸ§‘πŸΏβ€πŸ€β€πŸ§‘πŸ»πŸ‘©πŸΎβ€πŸ€β€πŸ‘¨πŸΏπŸ‘¬πŸΏ + # not require. Customize the translation robot prefix message. \ No newline at end of file diff --git a/config/discovery.yml b/config/discovery.yml index 3d96ff9b66..78a36f3d1f 100644 --- a/config/discovery.yml +++ b/config/discovery.yml @@ -1,4 +1,4 @@ -enable: "etcd" +enable: etcd etcd: rootDirectory: openim address: [ localhost:12379 ] diff --git a/config/grafana-template/Demo.json b/config/grafana-template/Demo.json index c4668917f4..ea17d2c0ab 100644 --- a/config/grafana-template/Demo.json +++ b/config/grafana-template/Demo.json @@ -54,7 +54,7 @@ "liveNow": false, "panels": [ { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, @@ -62,1120 +62,1251 @@ "y": 0 }, "id": 35, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "panels": [], + "title": "Server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Is the service up.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "description": "Is the service up.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "stepBefore", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 2, - "pointSize": 9, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bool_on_off" + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 6, - "y": 1 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "insertNulls": false, + "lineInterpolation": "stepBefore", + "lineStyle": { + "fill": "solid" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "lineWidth": 2, + "pointSize": 9, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "exemplar": false, - "expr": "up", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "$legendName", - "range": true, - "refId": "A" - } - ], - "title": "UP", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bool_on_off" }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 6, + "y": 1 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of online users and login users within the time frame.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "online users" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#37bbff", - "mode": "fixed", - "seriesBy": "last" - } - } - ] - } - ] + "editorMode": "code", + "exemplar": false, + "expr": "up", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "$legendName", + "range": true, + "refId": "A" + } + ], + "title": "UP", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of online users and login users within the time frame.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 12 - }, - "id": 37, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "online_user_num", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "online users", - "range": true, - "refId": "A" + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "increase(user_login_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "login num", - "range": true, - "refId": "B" + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Login Information", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of register users within the time frame.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" - }, - "overrides": [ { - "matcher": { - "id": "byName", - "options": "register users" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#7437ff", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 12 - }, - "id": 59, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "online users" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#37bbff", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 12 + }, + "id": 37, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "user_register_total", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "register users", - "range": true, - "refId": "A" - } - ], - "title": "Register num", - "type": "timeseries" + "editorMode": "code", + "exemplar": false, + "expr": "online_user_num", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "online users", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of chat msg success.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "editorMode": "code", + "expr": "increase(user_login_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "login num", + "range": true, + "refId": "B" + } + ], + "title": "Login Information", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of register users within the time frame.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "register users" }, - "overrides": [] + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#7437ff", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 23 + "editorMode": "code", + "exemplar": false, + "expr": "user_register_total", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "register users", + "range": true, + "refId": "A" + } + ], + "title": "Register num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of chat msg success.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "id": 38, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(single_chat_msg_process_success_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "single msgs", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "expr": "increase(group_chat_msg_process_success_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "group msgs", - "range": true, - "refId": "B" - } - ], - "title": "Chat Msg Success Num", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 23 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of chat msg failed .", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + "editorMode": "code", + "exemplar": false, + "expr": "increase(single_chat_msg_process_success_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "single msgs", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(group_chat_msg_process_success_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "group msgs", + "range": true, + "refId": "B" + } + ], + "title": "Chat Msg Success Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of chat msg failed .", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [ + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ { - "matcher": { - "id": "byName", - "options": "single msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#ff00dc", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "green", + "value": null }, { - "matcher": { - "id": "byName", - "options": "group msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#0cffef", - "mode": "fixed" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 23 - }, - "id": 39, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "single msgs" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff00dc", + "mode": "fixed", + "seriesBy": "last" + } + } + ] }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(single_chat_msg_process_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "single msgs", - "range": true, - "refId": "A" + { + "matcher": { + "id": "byName", + "options": "group msgs" }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "increase(group_chat_msg_process_failed_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "group msgs", - "range": true, - "refId": "B" - } - ], - "title": "Chat Msg Failed Num", - "type": "timeseries" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#0cffef", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 23 + }, + "id": 39, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of msg failed offline pushed.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "failed msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "dark-red", - "mode": "fixed", - "seriesBy": "last" - } - } - ] - } - ] - }, - "gridPos": { - "h": 11, - "w": 6, - "x": 4, - "y": 33 - }, - "id": 42, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_offline_push_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "failed msgs", - "range": true, - "refId": "A" - } - ], - "title": "Msg Offline Push Failed Num", - "type": "timeseries" + "editorMode": "code", + "exemplar": false, + "expr": "increase(single_chat_msg_process_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "single msgs", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of failed set seq.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + "editorMode": "code", + "expr": "increase(group_chat_msg_process_failed_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "group msgs", + "range": true, + "refId": "B" + } + ], + "title": "Chat Msg Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of msg failed offline pushed.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [ + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ { - "matcher": { - "id": "byName", - "options": "failed msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "semi-dark-green", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 11, - "w": 6, - "x": 14, - "y": 33 - }, - "id": 43, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(seq_set_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "failed addr: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Seq Set Failed Num", - "type": "timeseries" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 0, + "y": 33 + }, + "id": 42, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of successfully inserted messages.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_offline_push_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr:{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Msg Offline Push Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of failed set seq.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed", + "seriesBy": "last" } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 8, + "y": 33 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(seq_set_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Seq Set Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of messages that take a long time to send.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" }, - "overrides": [] + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 16, + "y": 33 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 44 + "editorMode": "code", + "exemplar": false, + "expr": "msg_long_time_push_total", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr:{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Long Time Send Msg Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of successfully inserted messages.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - "id": 44, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_insert_redis_success_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "redis: {{instance}}", - "range": true, - "refId": "A" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 44 + }, + "id": 44, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_insert_redis_success_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "redis: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(msg_insert_mongo_success_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "mongo: {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Msg Success Insert Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of failed insertion messages.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "increase(msg_insert_mongo_success_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "mongo: {{instance}}", - "range": true, - "refId": "B" + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Msg Success Insert Num", - "type": "timeseries" + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 44 + }, + "id": 45, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "This metric represents the number of failed insertion messages.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 44 - }, - "id": 45, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_insert_redis_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "redis: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_insert_redis_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "redis: {{instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "increase(msg_insert_mongo_failed_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "mongo: {{instance}}", - "range": true, - "refId": "B" - } - ], - "title": "Msg Failed Insert Num", - "type": "timeseries" + "editorMode": "code", + "expr": "increase(msg_insert_mongo_failed_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "mongo: {{instance}}", + "range": true, + "refId": "B" } ], - "title": "Server", - "type": "row" + "title": "Msg Failed Insert Num", + "type": "timeseries" }, { "collapsed": true, @@ -1183,7 +1314,7 @@ "h": 1, "w": 24, "x": 0, - "y": 1 + "y": 54 }, "id": 22, "panels": [ @@ -1973,7 +2104,7 @@ "h": 1, "w": 24, "x": 0, - "y": 2 + "y": 55 }, "id": 28, "panels": [ @@ -2827,7 +2958,7 @@ "h": 1, "w": 24, "x": 0, - "y": 3 + "y": 56 }, "id": 25, "panels": [ @@ -3377,849 +3508,848 @@ "type": "row" }, { - "collapsed": false, + "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 4 + "y": 57 }, "id": 6, - "panels": [], - "title": "Process", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 5 - }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ + "panels": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job=~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "CPU Usage Percentage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 5 }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 5 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job=~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU Usage Percentage", + "type": "timeseries" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job!~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "CPU Usage Percentage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of open file descriptors.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 16 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n rate(process_cpu_seconds_total{job!~\"$rpcNameFilter\"}[1m])*100,\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU Usage Percentage", + "type": "timeseries" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_open_fds{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Open File Descriptors", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of open file descriptors.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "description": "This metric represents the number of open file descriptors.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_open_fds{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Open File Descriptors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of open file descriptors.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 16 - }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_open_fds{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Open File Descriptors", + "type": "timeseries" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_open_fds{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Open File Descriptors", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of process virtual memory bytes.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "description": "This metric represents the number of process virtual memory bytes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 27 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_virtual_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Virtual Memory bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of process virtual memory bytes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 27 - }, - "id": 9, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "unit": "bytes" + }, + "overrides": [] }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_virtual_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Virtual Memory bytes", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of process virtual memory bytes.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 27 }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 27 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_virtual_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Virtual Memory bytes", + "type": "timeseries" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_virtual_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Virtual Memory bytes", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of process resident memory bytes.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "description": "This metric represents the number of process resident memory bytes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 38 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_resident_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Resident Memory bytes", + "type": "timeseries" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_resident_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Resident Memory bytes", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "This metric represents the number of process resident memory bytes.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "description": "This metric represents the number of process resident memory bytes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "thresholdsStyle": { - "mode": "off" + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" } }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 38 - }, - "id": 12, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.3.7", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "label_replace(\r\n process_resident_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{job}}: {{instance}}", - "range": true, - "refId": "A" + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_resident_memory_bytes{job!~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Resident Memory bytes", + "type": "timeseries" } ], - "title": "Resident Memory bytes", - "type": "timeseries" + "title": "Process", + "type": "row" }, { "collapsed": true, @@ -4227,7 +4357,7 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 58 }, "id": 3, "panels": [ @@ -5441,6 +5571,6 @@ "timezone": "", "title": "Demo", "uid": "a506d250-b606-4702-86a7-ac6aa1d069a1", - "version": 23, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/config/kafka.yml b/config/kafka.yml index d412e1be06..fd06ae2bb4 100644 --- a/config/kafka.yml +++ b/config/kafka.yml @@ -3,34 +3,38 @@ username: '' # Password for authentication password: '' # Producer acknowledgment settings -producerAck: "" +producerAck: # Compression type to use (e.g., none, gzip, snappy) -compressType: "none" +compressType: none # List of Kafka broker addresses address: [ localhost:19094 ] # Kafka topic for Redis integration -toRedisTopic: "toRedis" +toRedisTopic: toRedis # Kafka topic for MongoDB integration -toMongoTopic: "toMongo" +toMongoTopic: toMongo # Kafka topic for push notifications -toPushTopic: "toPush" +toPushTopic: toPush +# Kafka topic for offline push notifications +toOfflinePushTopic: toOfflinePush # Consumer group ID for Redis topic toRedisGroupID: redis # Consumer group ID for MongoDB topic toMongoGroupID: mongo # Consumer group ID for push notifications topic toPushGroupID: push +# Consumer group ID for offline push notifications topic +toOfflinePushGroupID: offlinePush # TLS (Transport Layer Security) configuration tls: # Enable or disable TLS enableTLS: false # CA certificate file path - caCrt: "" + caCrt: # Client certificate file path - clientCrt: "" + clientCrt: # Client key file path - clientKey: "" + clientKey: # Client key password - clientKeyPwd: "" + clientKeyPwd: # Whether to skip TLS verification (not recommended for production) insecureSkipVerify: false diff --git a/config/minio.yml b/config/minio.yml index 11a9ace354..ad1a32a8c2 100644 --- a/config/minio.yml +++ b/config/minio.yml @@ -1,15 +1,15 @@ # Name of the bucket in MinIO -bucket: "openim" +bucket: openim # Access key ID for MinIO authentication -accessKeyID: "root" +accessKeyID: root # Secret access key for MinIO authentication -secretAccessKey: "openIM123" +secretAccessKey: openIM123 # Session token for MinIO authentication (optional) -sessionToken: '' +sessionToken: # Internal address of the MinIO server -internalAddress: "localhost:10005" +internalAddress: localhost:10005 # External address of the MinIO server, accessible from outside. Supports both HTTP and HTTPS using a domain name -externalAddress: "http://external_ip:10005" +externalAddress: http://external_ip:10005 # Flag to enable or disable public read access to the bucket publicRead: false diff --git a/config/mongodb.yml b/config/mongodb.yml index 98f5694e45..78f85992c9 100644 --- a/config/mongodb.yml +++ b/config/mongodb.yml @@ -1,5 +1,5 @@ # URI for database connection, leave empty if using address and credential settings directly -uri: '' +uri: # List of MongoDB server addresses address: [ localhost:37017 ] # Name of the database diff --git a/config/notification.yml b/config/notification.yml index 278376c244..85ca91af18 100644 --- a/config/notification.yml +++ b/config/notification.yml @@ -28,11 +28,11 @@ groupCreated: # Enables or disables offline push notifications. enable: false # Title for the notification when a group is created. - title: "create group title" + title: create group title # Description for the notification. - desc: "create group desc" + desc: create group desc # Additional information for the notification. - ext: "create group ext" + ext: create group ext groupInfoSet: isSendMsg: false @@ -40,9 +40,9 @@ groupInfoSet: unreadCount: false offlinePush: enable: false - title: "groupInfoSet title" - desc: "groupInfoSet desc" - ext: "groupInfoSet ext" + title: groupInfoSet title + desc: groupInfoSet desc + ext: groupInfoSet ext joinGroupApplication: @@ -51,9 +51,9 @@ joinGroupApplication: unreadCount: false offlinePush: enable: false - title: "joinGroupApplication title" - desc: "joinGroupApplication desc" - ext: "joinGroupApplication ext" + title: joinGroupApplication title + desc: joinGroupApplication desc + ext: joinGroupApplication ext memberQuit: isSendMsg: true @@ -61,9 +61,9 @@ memberQuit: unreadCount: false offlinePush: enable: false - title: "memberQuit title" - desc: "memberQuit desc" - ext: "memberQuit ext" + title: memberQuit title + desc: memberQuit desc + ext: memberQuit ext groupApplicationAccepted: isSendMsg: false @@ -71,9 +71,9 @@ groupApplicationAccepted: unreadCount: false offlinePush: enable: false - title: "groupApplicationAccepted title" - desc: "groupApplicationAccepted desc" - ext: "groupApplicationAccepted ext" + title: groupApplicationAccepted title + desc: groupApplicationAccepted desc + ext: groupApplicationAccepted ext groupApplicationRejected: isSendMsg: false @@ -81,9 +81,9 @@ groupApplicationRejected: unreadCount: false offlinePush: enable: false - title: "groupApplicationRejected title" - desc: "groupApplicationRejected desc" - ext: "groupApplicationRejected ext" + title: groupApplicationRejected title + desc: groupApplicationRejected desc + ext: groupApplicationRejected ext groupOwnerTransferred: @@ -92,9 +92,9 @@ groupOwnerTransferred: unreadCount: false offlinePush: enable: false - title: "groupOwnerTransferred title" - desc: "groupOwnerTransferred desc" - ext: "groupOwnerTransferred ext" + title: groupOwnerTransferred title + desc: groupOwnerTransferred desc + ext: groupOwnerTransferred ext memberKicked: isSendMsg: true @@ -102,9 +102,9 @@ memberKicked: unreadCount: false offlinePush: enable: false - title: "memberKicked title" - desc: "memberKicked desc" - ext: "memberKicked ext" + title: memberKicked title + desc: memberKicked desc + ext: memberKicked ext memberInvited: isSendMsg: true @@ -112,9 +112,9 @@ memberInvited: unreadCount: false offlinePush: enable: false - title: "memberInvited title" - desc: "memberInvited desc" - ext: "memberInvited ext" + title: memberInvited title + desc: memberInvited desc + ext: memberInvited ext memberEnter: isSendMsg: true @@ -122,9 +122,9 @@ memberEnter: unreadCount: false offlinePush: enable: false - title: "memberEnter title" - desc: "memberEnter desc" - ext: "memberEnter ext" + title: memberEnter title + desc: memberEnter desc + ext: memberEnter ext groupDismissed: isSendMsg: true @@ -132,9 +132,9 @@ groupDismissed: unreadCount: false offlinePush: enable: false - title: "groupDismissed title" - desc: "groupDismissed desc" - ext: "groupDismissed ext" + title: groupDismissed title + desc: groupDismissed desc + ext: groupDismissed ext groupMuted: isSendMsg: true @@ -142,9 +142,9 @@ groupMuted: unreadCount: false offlinePush: enable: false - title: "groupMuted title" - desc: "groupMuted desc" - ext: "groupMuted ext" + title: groupMuted title + desc: groupMuted desc + ext: groupMuted ext groupCancelMuted: isSendMsg: true @@ -152,11 +152,11 @@ groupCancelMuted: unreadCount: false offlinePush: enable: false - title: "groupCancelMuted title" - desc: "groupCancelMuted desc" - ext: "groupCancelMuted ext" + title: groupCancelMuted title + desc: groupCancelMuted desc + ext: groupCancelMuted ext defaultTips: - tips: "group Cancel Muted" + tips: group Cancel Muted groupMemberMuted: @@ -165,9 +165,9 @@ groupMemberMuted: unreadCount: false offlinePush: enable: false - title: "groupMemberMuted title" - desc: "groupMemberMuted desc" - ext: "groupMemberMuted ext" + title: groupMemberMuted title + desc: groupMemberMuted desc + ext: groupMemberMuted ext groupMemberCancelMuted: isSendMsg: true @@ -175,9 +175,9 @@ groupMemberCancelMuted: unreadCount: false offlinePush: enable: false - title: "groupMemberCancelMuted title" - desc: "groupMemberCancelMuted desc" - ext: "groupMemberCancelMuted ext" + title: groupMemberCancelMuted title + desc: groupMemberCancelMuted desc + ext: groupMemberCancelMuted ext groupMemberInfoSet: isSendMsg: false @@ -185,9 +185,9 @@ groupMemberInfoSet: unreadCount: false offlinePush: enable: false - title: "groupMemberInfoSet title" - desc: "groupMemberInfoSet desc" - ext: "groupMemberInfoSet ext" + title: groupMemberInfoSet title + desc: groupMemberInfoSet desc + ext: groupMemberInfoSet ext groupInfoSetAnnouncement: isSendMsg: true @@ -195,9 +195,9 @@ groupInfoSetAnnouncement: unreadCount: false offlinePush: enable: false - title: "groupInfoSetAnnouncement title" - desc: "groupInfoSetAnnouncement desc" - ext: "groupInfoSetAnnouncement ext" + title: groupInfoSetAnnouncement title + desc: groupInfoSetAnnouncement desc + ext: groupInfoSetAnnouncement ext groupInfoSetName: @@ -206,9 +206,9 @@ groupInfoSetName: unreadCount: false offlinePush: enable: false - title: "groupInfoSetName title" - desc: "groupInfoSetName desc" - ext: "groupInfoSetName ext" + title: groupInfoSetName title + desc: groupInfoSetName desc + ext: groupInfoSetName ext #############################friend################################# @@ -218,9 +218,9 @@ friendApplicationAdded: unreadCount: false offlinePush: enable: false - title: "Somebody applies to add you as a friend" - desc: "Somebody applies to add you as a friend" - ext: "Somebody applies to add you as a friend" + title: Somebody applies to add you as a friend + desc: Somebody applies to add you as a friend + ext: Somebody applies to add you as a friend friendApplicationApproved: isSendMsg: true @@ -228,9 +228,9 @@ friendApplicationApproved: unreadCount: false offlinePush: enable: true - title: "Someone applies to add your friend application" - desc: "Someone applies to add your friend application" - ext: "Someone applies to add your friend application" + title: Someone applies to add your friend application + desc: Someone applies to add your friend application + ext: Someone applies to add your friend application friendApplicationRejected: isSendMsg: false @@ -238,9 +238,9 @@ friendApplicationRejected: unreadCount: false offlinePush: enable: true - title: "Someone rejected your friend application" - desc: "Someone rejected your friend application" - ext: "Someone rejected your friend application" + title: Someone rejected your friend application + desc: Someone rejected your friend application + ext: Someone rejected your friend application friendAdded: isSendMsg: false @@ -248,9 +248,9 @@ friendAdded: unreadCount: false offlinePush: enable: true - title: "We have become friends" - desc: "We have become friends" - ext: "We have become friends" + title: We have become friends + desc: We have become friends + ext: We have become friends friendDeleted: isSendMsg: false @@ -258,9 +258,9 @@ friendDeleted: unreadCount: false offlinePush: enable: true - title: "deleted a friend" - desc: "deleted a friend" - ext: "deleted a friend" + title: deleted a friend + desc: deleted a friend + ext: deleted a friend friendRemarkSet: isSendMsg: false @@ -268,9 +268,9 @@ friendRemarkSet: unreadCount: false offlinePush: enable: true - title: "Your friend's profile has been changed" - desc: "Your friend's profile has been changed" - ext: "Your friend's profile has been changed" + title: Your friend's profile has been changed + desc: Your friend's profile has been changed + ext: Your friend's profile has been changed blackAdded: isSendMsg: false @@ -278,9 +278,9 @@ blackAdded: unreadCount: false offlinePush: enable: true - title: "blocked a user" - desc: "blocked a user" - ext: "blocked a user" + title: blocked a user + desc: blocked a user + ext: blocked a user blackDeleted: isSendMsg: false @@ -288,9 +288,9 @@ blackDeleted: unreadCount: false offlinePush: enable: true - title: "Remove a blocked user" - desc: "Remove a blocked user" - ext: "Remove a blocked user" + title: Remove a blocked user + desc: Remove a blocked user + ext: Remove a blocked user friendInfoUpdated: isSendMsg: false @@ -298,9 +298,9 @@ friendInfoUpdated: unreadCount: false offlinePush: enable: true - title: "friend info updated" - desc: "friend info updated" - ext: "friend info updated" + title: friend info updated + desc: friend info updated + ext: friend info updated #####################user######################### userInfoUpdated: @@ -309,9 +309,9 @@ userInfoUpdated: unreadCount: false offlinePush: enable: true - title: "Remove a blocked user" - desc: "Remove a blocked user" - ext: "Remove a blocked user" + title: Remove a blocked user + desc: Remove a blocked user + ext: Remove a blocked user userStatusChanged: isSendMsg: false @@ -319,9 +319,9 @@ userStatusChanged: unreadCount: false offlinePush: enable: false - title: "user status changed" - desc: "user status changed" - ext: "user status changed" + title: user status changed + desc: user status changed + ext: user status changed #####################conversation######################### conversationChanged: @@ -330,9 +330,9 @@ conversationChanged: unreadCount: false offlinePush: enable: true - title: "conversation changed" - desc: "conversation changed" - ext: "conversation changed" + title: conversation changed + desc: conversation changed + ext: conversation changed conversationSetPrivate: isSendMsg: true @@ -340,6 +340,6 @@ conversationSetPrivate: unreadCount: false offlinePush: enable: true - title: "burn after reading" - desc: "burn after reading" - ext: "burn after reading" + title: burn after reading + desc: burn after reading + ext: burn after reading diff --git a/config/openim-api.yml b/config/openim-api.yml index 78a688fcd6..4c38e1005b 100644 --- a/config/openim-api.yml +++ b/config/openim-api.yml @@ -3,11 +3,14 @@ api: listenIP: 0.0.0.0 # Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports ports: [ 10002 ] + # API compression level; 0: default compression, 1: best compression, 2: best speed, -1: no compression + compressionLevel: 0 + prometheus: # Whether to enable prometheus enable: true # Prometheus listening ports, must match the number of api.ports - ports: [ 20113 ] + ports: [ 12002 ] # This address can be accessed via a browser grafanaURL: http://127.0.0.1:13000/ diff --git a/config/openim-crontask.yml b/config/openim-crontask.yml index 3839104a44..c05bd2485f 100644 --- a/config/openim-crontask.yml +++ b/config/openim-crontask.yml @@ -1,3 +1,3 @@ -cronExecuteTime: "0 2 * * *" +cronExecuteTime: 0 2 * * * retainChatRecords: 365 fileExpireTime: 90 diff --git a/config/openim-msggateway.yml b/config/openim-msggateway.yml index 0c92d83278..428f3ba476 100644 --- a/config/openim-msggateway.yml +++ b/config/openim-msggateway.yml @@ -1,14 +1,14 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10140 ] + ports: [ 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20112 ] + ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ] # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 @@ -25,6 +25,3 @@ longConnSvr: # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time multiLoginPolicy: 1 - - - diff --git a/config/openim-msgtransfer.yml b/config/openim-msgtransfer.yml index 07a7dc1ab1..753ac10bc0 100644 --- a/config/openim-msgtransfer.yml +++ b/config/openim-msgtransfer.yml @@ -3,4 +3,4 @@ prometheus: enable: true # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly # Because four instances have been launched, four ports need to be specified - ports: [ 20108, 20109, 20110, 20111 ] + ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027 ] diff --git a/config/openim-push.yml b/config/openim-push.yml index 9384008a04..6df2a62b7f 100644 --- a/config/openim-push.yml +++ b/config/openim-push.yml @@ -1,46 +1,41 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10170 ] + ports: [ 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20107 ] + ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185 ] maxConcurrentWorkers: 3 -#"Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified." -enable: "geTui" +#Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified. +enable: geTui geTui: - pushUrl: "https://restapi.getui.com/v2/$appId" - masterSecret: '' - appKey: '' - intent: '' - channelID: '' - channelName: '' + pushUrl: https://restapi.getui.com/v2/$appId + masterSecret: + appKey: + intent: + channelID: + channelName: fcm: # Prioritize using file paths. If the file path is empty, use URL - filePath: "" # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. - authURL: "" # Must start with https or http. + filePath: # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. + authURL: # Must start with https or http. jpns: - appKey: '' - masterSecret: '' - pushURL: '' - pushIntent: '' + appKey: + masterSecret: + pushURL: + pushIntent: # iOS system push sound and badge count iosPush: - pushSound: "xxx" + pushSound: xxx badgeCount: true production: false - - - - - - +fullUserCache: true diff --git a/config/openim-rpc-auth.yml b/config/openim-rpc-auth.yml index 2d861cd5ab..496803e43b 100644 --- a/config/openim-rpc-auth.yml +++ b/config/openim-rpc-auth.yml @@ -1,18 +1,17 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10160 ] + ports: [ 10200 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20106 ] + ports: [ 12200 ] tokenPolicy: # Token validity period, in days expire: 90 - diff --git a/config/openim-rpc-conversation.yml b/config/openim-rpc-conversation.yml index a094bfac10..3581d7e19e 100644 --- a/config/openim-rpc-conversation.yml +++ b/config/openim-rpc-conversation.yml @@ -1,13 +1,13 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10180 ] + ports: [ 10220 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20105 ] + ports: [ 12220 ] diff --git a/config/openim-rpc-friend.yml b/config/openim-rpc-friend.yml index 7b829f971c..3022c09f32 100644 --- a/config/openim-rpc-friend.yml +++ b/config/openim-rpc-friend.yml @@ -1,13 +1,13 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10120 ] + ports: [ 10240 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20104 ] + ports: [ 12240 ] diff --git a/config/openim-rpc-group.yml b/config/openim-rpc-group.yml index 78b44030e0..9a634d12ff 100644 --- a/config/openim-rpc-group.yml +++ b/config/openim-rpc-group.yml @@ -1,13 +1,16 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10150 ] + ports: [ 10260 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20103 ] + ports: [ 12260 ] + + +enableHistoryForNewMembers: true diff --git a/config/openim-rpc-msg.yml b/config/openim-rpc-msg.yml index 17ce26e9b2..82d6e2f539 100644 --- a/config/openim-rpc-msg.yml +++ b/config/openim-rpc-msg.yml @@ -1,20 +1,17 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10130 ] + ports: [ 10280 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20102 ] + ports: [ 12280 ] # Does sending messages require friend verification friendVerify: false - - - diff --git a/config/openim-rpc-third.yml b/config/openim-rpc-third.yml index 6fb60f47f8..d8f2d427f2 100644 --- a/config/openim-rpc-third.yml +++ b/config/openim-rpc-third.yml @@ -1,40 +1,40 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10190 ] + ports: [ 10300 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20101 ] + ports: [ 12300 ] object: # Use MinIO as object storage, or set to "cos", "oss", "kodo", "aws", while also configuring the corresponding settings - enable: "minio" + enable: minio cos: bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com - secretID: '' - secretKey: '' - sessionToken: '' + secretID: + secretKey: + sessionToken: publicRead: false oss: - endpoint: "https://oss-cn-chengdu.aliyuncs.com" - bucket: "demo-9999999" - bucketURL: "https://demo-9999999.oss-cn-chengdu.aliyuncs.com" - accessKeyID: '' - accessKeySecret: '' - sessionToken: '' + endpoint: https://oss-cn-chengdu.aliyuncs.com + bucket: demo-9999999 + bucketURL: https://demo-9999999.oss-cn-chengdu.aliyuncs.com + accessKeyID: + accessKeySecret: + sessionToken: publicRead: false kodo: - endpoint: "http://s3.cn-south-1.qiniucs.com" - bucket: "kodo-bucket-test" - bucketURL: "http://kodo-bucket-test-oetobfb.qiniudns.com" - accessKeyID: '' - accessKeySecret: '' - sessionToken: '' - publicRead: false \ No newline at end of file + endpoint: http://s3.cn-south-1.qiniucs.com + bucket: kodo-bucket-test + bucketURL: http://kodo-bucket-test-oetobfb.qiniudns.com + accessKeyID: + accessKeySecret: + sessionToken: + publicRead: false diff --git a/config/openim-rpc-user.yml b/config/openim-rpc-user.yml index cbfb55b6c7..798105472c 100644 --- a/config/openim-rpc-user.yml +++ b/config/openim-rpc-user.yml @@ -1,17 +1,13 @@ rpc: # API or other RPCs can access this RPC through this IP; if left blank, the internal network IP is obtained by default - registerIP: '' + registerIP: # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default listenIP: 0.0.0.0 # Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports - ports: [ 10110 ] + ports: [ 10320 ] prometheus: # Whether to enable prometheus enable: true # Prometheus listening ports, must be consistent with the number of rpc.ports - ports: [ 20100 ] - - - - + ports: [ 12320 ] diff --git a/config/prometheus.yml b/config/prometheus.yml index 5db41679f4..4f0f7e32c0 100644 --- a/config/prometheus.yml +++ b/config/prometheus.yml @@ -8,76 +8,79 @@ global: alerting: alertmanagers: - static_configs: - - targets: ['internal_ip:19093'] + - targets: [internal_ip:19093] -# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +# Load rules once and periodically evaluate them according to the global evaluation_interval. rule_files: - - "instance-down-rules.yml" -# - "first_rules.yml" -# - "second_rules.yml" + - instance-down-rules.yml +# - first_rules.yml +# - second_rules.yml # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - # The job name is added as a label "job='job_name'"" to any timeseries scraped from this config. + # The job name is added as a label "job=job_name" to any timeseries scraped from this config. # Monitored information captured by prometheus # prometheus fetches application services - - job_name: 'node_exporter' + - job_name: node_exporter static_configs: - - targets: [ 'internal_ip:20114' ] - - job_name: 'openimserver-openim-api' + - targets: [ internal_ip:20500 ] + - job_name: openimserver-openim-api static_configs: - - targets: [ 'internal_ip:20113' ] + - targets: [ internal_ip:12002 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-msggateway' + namespace: default + - job_name: openimserver-openim-msggateway static_configs: - - targets: [ 'internal_ip:20112' ] + - targets: [ internal_ip:12140 ] +# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-msgtransfer' + namespace: default + - job_name: openimserver-openim-msgtransfer static_configs: - - targets: [ 'internal_ip:20111', 'internal_ip:20110', 'internal_ip:20109', 'internal_ip:20108' ] + - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ] +# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-push' + namespace: default + - job_name: openimserver-openim-push static_configs: - - targets: [ 'internal_ip:20107' ] + - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ] +# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12181, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-auth' + namespace: default + - job_name: openimserver-openim-rpc-auth static_configs: - - targets: [ 'internal_ip:20106' ] + - targets: [ internal_ip:12200 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-conversation' + namespace: default + - job_name: openimserver-openim-rpc-conversation static_configs: - - targets: [ 'internal_ip:20105' ] + - targets: [ internal_ip:12220 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-friend' + namespace: default + - job_name: openimserver-openim-rpc-friend static_configs: - - targets: [ 'internal_ip:20104' ] + - targets: [ internal_ip:12240 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-group' + namespace: default + - job_name: openimserver-openim-rpc-group static_configs: - - targets: [ 'internal_ip:20103' ] + - targets: [ internal_ip:12260 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-msg' + namespace: default + - job_name: openimserver-openim-rpc-msg static_configs: - - targets: [ 'internal_ip:20102' ] + - targets: [ internal_ip:12280 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-third' + namespace: default + - job_name: openimserver-openim-rpc-third static_configs: - - targets: [ 'internal_ip:20101' ] + - targets: [ internal_ip:12300 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-user' + namespace: default + - job_name: openimserver-openim-rpc-user static_configs: - - targets: [ 'internal_ip:20100' ] + - targets: [ internal_ip:12320 ] labels: - namespace: 'default' \ No newline at end of file + namespace: default \ No newline at end of file diff --git a/config/redis.yml b/config/redis.yml index 87abed0e1c..2448bcb5c6 100644 --- a/config/redis.yml +++ b/config/redis.yml @@ -1,6 +1,7 @@ address: [ localhost:16379 ] -username: '' +username: password: openIM123 clusterMode: false db: 0 maxRetry: 10 +poolSize: 100 diff --git a/config/share.yml b/config/share.yml index fc97b6a1ff..4c5892615b 100644 --- a/config/share.yml +++ b/config/share.yml @@ -10,5 +10,5 @@ rpcRegisterName: conversation: conversation third: third -imAdminUserID: [ "imAdmin" ] +imAdminUserID: [ imAdmin ] diff --git a/config/webhooks.yml b/config/webhooks.yml index 11a85ba0c4..24fb2413a0 100644 --- a/config/webhooks.yml +++ b/config/webhooks.yml @@ -1,4 +1,4 @@ -url: "webhook://127.0.0.1:10008/callbackExample" +url: webhook://127.0.0.1:10008/callbackExample beforeSendSingleMsg: enable: false timeout: 5 @@ -130,6 +130,13 @@ beforeSetGroupInfo: enable: false timeout: 5 failedContinue: true +afterSetGroupInfoEX: + enable: false + timeout: 5 +beforeSetGroupInfoEX: + enable: false + timeout: 5 + failedContinue: true afterRevokeMsg: enable: false timeout: 5 diff --git a/docker-compose.yml b/docker-compose.yml index 8cc1f24b2f..512f951dbd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -140,50 +140,49 @@ services: networks: - openim - prometheus: - image: ${PROMETHEUS_IMAGE} - container_name: prometheus - restart: always - volumes: - - ./config/prometheus.yml:/etc/prometheus/prometheus.yml - - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml - - ${DATA_DIR}/components/prometheus/data:/prometheus - command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - ports: - - "19091:9090" - networks: - - openim - - alertmanager: - image: ${ALERTMANAGER_IMAGE} - container_name: alertmanager - restart: always - volumes: - - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml - - ./config/email.tmpl:/etc/alertmanager/email.tmpl - ports: - - "19093:9093" - networks: - - openim - - grafana: - image: ${GRAFANA_IMAGE} - container_name: grafana - user: root - restart: always - environment: - - GF_SECURITY_ALLOW_EMBEDDING=true - - GF_SESSION_COOKIE_SAMESITE=none - - GF_SESSION_COOKIE_SECURE=true - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - ports: - - "13000:3000" - volumes: - - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana - networks: - - openim - +# prometheus: +# image: ${PROMETHEUS_IMAGE} +# container_name: prometheus +# restart: always +# volumes: +# - ./config/prometheus.yml:/etc/prometheus/prometheus.yml +# - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml +# - ${DATA_DIR}/components/prometheus/data:/prometheus +# command: +# - '--config.file=/etc/prometheus/prometheus.yml' +# - '--storage.tsdb.path=/prometheus' +# ports: +# - "19091:9090" +# networks: +# - openim +# +# alertmanager: +# image: ${ALERTMANAGER_IMAGE} +# container_name: alertmanager +# restart: always +# volumes: +# - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml +# - ./config/email.tmpl:/etc/alertmanager/email.tmpl +# ports: +# - "19093:9093" +# networks: +# - openim +# +# grafana: +# image: ${GRAFANA_IMAGE} +# container_name: grafana +# user: root +# restart: always +# environment: +# - GF_SECURITY_ALLOW_EMBEDDING=true +# - GF_SESSION_COOKIE_SAMESITE=none +# - GF_SESSION_COOKIE_SECURE=true +# - GF_AUTH_ANONYMOUS_ENABLED=true +# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin +# ports: +# - "13000:3000" +# volumes: +# - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana +# networks: +# - openim diff --git a/go.mod b/go.mod index fba1499fe1..7a11803020 100644 --- a/go.mod +++ b/go.mod @@ -6,21 +6,21 @@ require ( firebase.google.com/go v3.13.0+incompatible github.com/dtm-labs/rockscache v0.1.1 github.com/gin-gonic/gin v1.9.1 - github.com/go-playground/validator/v10 v10.18.0 + github.com/go-playground/validator/v10 v10.20.0 github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.69 - github.com/openimsdk/tools v0.0.49-alpha.55 + github.com/openimsdk/protocol v0.0.72-alpha.18 + github.com/openimsdk/tools v0.0.50-alpha.12 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 go.mongodb.org/mongo-driver v1.14.0 google.golang.org/api v0.165.0 - google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/grpc v1.66.2 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 ) @@ -29,6 +29,7 @@ require github.com/google/uuid v1.6.0 require ( github.com/IBM/sarama v1.43.0 github.com/fatih/color v1.14.1 + github.com/gin-contrib/gzip v1.0.1 github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redismock/v9 v9.2.0 github.com/hashicorp/golang-lru/v2 v2.0.7 @@ -41,13 +42,13 @@ require ( github.com/spf13/viper v1.18.2 github.com/stathat/consistent v1.0.0 go.uber.org/automaxprocs v1.5.3 - golang.org/x/sync v0.6.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/sync v0.8.0 ) require ( cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/firestore v1.14.0 // indirect cloud.google.com/go/iam v1.1.5 // indirect cloud.google.com/go/longrunning v0.5.4 // indirect @@ -72,11 +73,12 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect github.com/aws/smithy-go v1.17.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chai2010/webp v1.1.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clbanning/mxj v1.8.4 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -117,7 +119,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kelindar/simd v1.1.2 // indirect github.com/klauspost/compress v1.17.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/lithammer/shortuuid v3.0.0+incompatible // indirect @@ -132,7 +134,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -169,18 +171,17 @@ require ( go.opentelemetry.io/otel/trace v1.23.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/arch v0.7.0 // indirect golang.org/x/image v0.15.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gorm.io/gorm v1.25.8 // indirect stathat.com/c/consistent v1.0.0 // indirect ) @@ -188,10 +189,10 @@ require ( require ( github.com/go-playground/locales v0.14.1 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/spf13/cobra v1.8.0 - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.27.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) diff --git a/go.sum b/go.sum index 1a8e1d76d8..dd4a05ca1c 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= @@ -65,23 +63,23 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/webp v1.1.1 h1:jTRmEccAJ4MGrhFOrPMpNGIJ/eybIgwKpcACsrTEapk= -github.com/chai2010/webp v1.1.1/go.mod h1:0XVwvZWdjjdxpUEIf7b9g9VkHFnInUSYujwqTLEuldU= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -123,6 +121,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gin-contrib/gzip v1.0.1 h1:HQ8ENHODeLY7a4g1Au/46Z92bdGFl74OhxcZble9WJE= +github.com/gin-contrib/gzip v1.0.1/go.mod h1:njt428fdUNRvjuJf16tZMYZ2Yl+WQB53X5wmhDwXvC4= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= @@ -146,8 +146,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= -github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= -github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw= @@ -259,8 +259,9 @@ github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLA github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -290,8 +291,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -321,12 +322,12 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.69 h1:dVi8meSg8kmUzSH1XQab4MjihqKkkcCAmt1BYXPJuXo= -github.com/openimsdk/protocol v0.0.69/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= -github.com/openimsdk/tools v0.0.49-alpha.55 h1:KPgC53oqiwZYssLKljhtXbWXifMlTj2SSQEusj4Uf4k= -github.com/openimsdk/tools v0.0.49-alpha.55/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/openimsdk/protocol v0.0.72-alpha.18 h1:EytTtgZuXMG1cgTlJryqXXSO1J3t3wrLIn3Os2PRBEE= +github.com/openimsdk/protocol v0.0.72-alpha.18/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= +github.com/openimsdk/tools v0.0.50-alpha.12 h1:rV3BxgqN+F79vZvdoQ+97Eob8ScsRVEM8D+Wrcl23uo= +github.com/openimsdk/tools v0.0.50-alpha.12/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -410,8 +411,8 @@ github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -458,8 +459,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -467,8 +468,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= @@ -495,19 +496,19 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -522,8 +523,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -536,8 +537,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -566,17 +567,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -588,8 +589,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -610,6 +611,7 @@ gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c= stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= diff --git a/internal/api/group.go b/internal/api/group.go index bff0089748..14f50cacdd 100644 --- a/internal/api/group.go +++ b/internal/api/group.go @@ -35,6 +35,10 @@ func (o *GroupApi) SetGroupInfo(c *gin.Context) { a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c) } +func (o *GroupApi) SetGroupInfoEX(c *gin.Context) { + a2r.Call(group.GroupClient.SetGroupInfoEX, o.Client, c) +} + func (o *GroupApi) JoinGroup(c *gin.Context) { a2r.Call(group.GroupClient.JoinGroup, o.Client, c) } diff --git a/internal/api/msg.go b/internal/api/msg.go index ba63fbb66f..bf7cb83a43 100644 --- a/internal/api/msg.go +++ b/internal/api/msg.go @@ -49,14 +49,14 @@ func NewMessageApi(msgRpcClient *rpcclient.Message, userRpcClient *rpcclient.Use userRpcClient: rpcclient.NewUserRpcClientByUser(userRpcClient), imAdminUserID: imAdminUserID} } -func (MessageApi) SetOptions(options map[string]bool, value bool) { +func (*MessageApi) SetOptions(options map[string]bool, value bool) { datautil.SetSwitchFromOptions(options, constant.IsHistory, value) datautil.SetSwitchFromOptions(options, constant.IsPersistent, value) datautil.SetSwitchFromOptions(options, constant.IsSenderSync, value) datautil.SetSwitchFromOptions(options, constant.IsConversationUpdate, value) } -func (m MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq { +func (m *MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq { var newContent string options := make(map[string]bool, 5) switch params.ContentType { @@ -231,7 +231,7 @@ func (m *MessageApi) SendMessage(c *gin.Context) { } // Set the status to successful if the message is sent. - var status int = constant.MsgSendSuccessed + var status = constant.MsgSendSuccessed // Attempt to update the message sending status in the system. _, err = m.Client.SetSendMsgStatus(c, &msg.SetSendMsgStatusReq{ diff --git a/internal/api/router.go b/internal/api/router.go index 0667c3e751..3817070b16 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -2,12 +2,17 @@ package api import ( "fmt" + "github.com/gin-contrib/gzip" + "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" "github.com/go-playground/validator/v10" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "net/http" + "strings" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" @@ -16,8 +21,13 @@ import ( "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mw" - "net/http" - "strings" +) + +const ( + NoCompression = -1 + DefaultCompression = 0 + BestCompression = 1 + BestSpeed = 2 ) func prommetricsGin() gin.HandlerFunc { @@ -52,7 +62,15 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation) authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL) - + switch config.API.Api.CompressionLevel { + case NoCompression: + case DefaultCompression: + r.Use(gzip.Gzip(gzip.DefaultCompression)) + case BestCompression: + r.Use(gzip.Gzip(gzip.BestCompression)) + case BestSpeed: + r.Use(gzip.Gzip(gzip.BestSpeed)) + } r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) u := NewUserApi(*userRpc) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) @@ -112,6 +130,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En { groupRouterGroup.POST("/create_group", g.CreateGroup) groupRouterGroup.POST("/set_group_info", g.SetGroupInfo) + groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEX) groupRouterGroup.POST("/join_group", g.JoinGroup) groupRouterGroup.POST("/quit_group", g.QuitGroup) groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse) diff --git a/internal/api/user.go b/internal/api/user.go index d48111b9eb..dba7cd3129 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -36,9 +36,11 @@ func (u *UserApi) UserRegister(c *gin.Context) { a2r.Call(user.UserClient.UserRegister, u.Client, c) } +// UpdateUserInfo is deprecated. Use UpdateUserInfoEx func (u *UserApi) UpdateUserInfo(c *gin.Context) { a2r.Call(user.UserClient.UpdateUserInfo, u.Client, c) } + func (u *UserApi) UpdateUserInfoEx(c *gin.Context) { a2r.Call(user.UserClient.UpdateUserInfoEx, u.Client, c) } diff --git a/internal/msggateway/client.go b/internal/msggateway/client.go index a4902570a6..bc06fa9507 100644 --- a/internal/msggateway/client.go +++ b/internal/msggateway/client.go @@ -22,6 +22,8 @@ import ( "sync/atomic" "time" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" @@ -30,7 +32,6 @@ import ( "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/stringutil" - "google.golang.org/protobuf/proto" ) var ( @@ -220,6 +221,10 @@ func (c *Client) handleMessage(message []byte) error { resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq) case WSPullMsgBySeqList: resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq) + case WSPullMsg: + resp, messageErr = c.longConnServer.GetSeqMessage(ctx, binaryReq) + case WSGetConvMaxReadSeq: + resp, messageErr = c.longConnServer.GetConversationsHasReadAndMaxSeq(ctx, binaryReq) case WsLogoutMsg: resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) case WsSetBackgroundStatus: @@ -271,11 +276,13 @@ func (c *Client) replyMessage(ctx context.Context, binaryReq *Req, err error, re ErrMsg: errResp.ErrMsg, Data: resp, } + t := time.Now() log.ZDebug(ctx, "gateway reply message", "resp", mReply.String()) err = c.writeBinaryMsg(mReply) if err != nil { log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String()) } + log.ZDebug(ctx, "wireBinaryMsg end", "time cost", time.Since(t)) if binaryReq.ReqIdentifier == WsLogoutMsg { return errs.New("user logout", "operationID", binaryReq.OperationID).Wrap() diff --git a/internal/msggateway/constant.go b/internal/msggateway/constant.go index dc5ad77861..584cebe1e1 100644 --- a/internal/msggateway/constant.go +++ b/internal/msggateway/constant.go @@ -39,6 +39,8 @@ const ( WSPullMsgBySeqList = 1002 WSSendMsg = 1003 WSSendSignalMsg = 1004 + WSPullMsg = 1005 + WSGetConvMaxReadSeq = 1006 WSPushMsg = 2001 WSKickOnlineMsg = 2002 WsLogoutMsg = 2003 diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index 44e79e4122..50da060976 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error { ) hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { - longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges) + longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges) return nil }) diff --git a/internal/msggateway/message_handler.go b/internal/msggateway/message_handler.go index 8a11e6ab3c..4b78c10048 100644 --- a/internal/msggateway/message_handler.go +++ b/internal/msggateway/message_handler.go @@ -19,6 +19,8 @@ import ( "sync" "github.com/go-playground/validator/v10" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/msg" @@ -27,7 +29,6 @@ import ( "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/utils/jsonutil" - "google.golang.org/protobuf/proto" ) type Req struct { @@ -94,6 +95,8 @@ type MessageHandler interface { SendMessage(context context.Context, data *Req) ([]byte, error) SendSignalMessage(context context.Context, data *Req) ([]byte, error) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) + GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) + GetSeqMessage(context context.Context, data *Req) ([]byte, error) UserLogout(context context.Context, data *Req) ([]byte, error) SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error) } @@ -175,7 +178,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]by func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) { req := sdkws.PullMessageBySeqsReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { - return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") + return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") } if err := g.validate.Struct(data); err != nil { return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq") @@ -191,6 +194,44 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([ return c, nil } +func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) { + req := msg.GetConversationsHasReadAndMaxSeqReq{} + if err := proto.Unmarshal(data.Data, &req); err != nil { + return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "GetConversationsHasReadAndMaxSeq") + } + if err := g.validate.Struct(data); err != nil { + return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetConversationsHasReadAndMaxSeq") + } + resp, err := g.msgRpcClient.GetConversationsHasReadAndMaxSeq(context, &req) + if err != nil { + return nil, err + } + c, err := proto.Marshal(resp) + if err != nil { + return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetConversationsHasReadAndMaxSeq") + } + return c, nil +} + +func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, error) { + req := msg.GetSeqMessageReq{} + if err := proto.Unmarshal(data.Data, &req); err != nil { + return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "GetSeqMessage") + } + if err := g.validate.Struct(data); err != nil { + return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetSeqMessage") + } + resp, err := g.msgRpcClient.GetSeqMessage(context, &req) + if err != nil { + return nil, err + } + c, err := proto.Marshal(resp) + if err != nil { + return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetSeqMessage") + } + return c, nil +} + func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) { req := push.DelUserPushTokenReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { diff --git a/internal/msggateway/ws_server.go b/internal/msggateway/ws_server.go index 537b8c5f0a..81392897bf 100644 --- a/internal/msggateway/ws_server.go +++ b/internal/msggateway/ws_server.go @@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) { if clientOK { ws.clients.Set(client.UserID, client) // There is already a connection to the platform - log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID", + log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID", client.PlatformID, "old remote addr", getRemoteAdders(oldClients)) ws.onlineUserConnNum.Add(1) } else { @@ -275,7 +275,7 @@ func (ws *WsServer) registerClient(client *Client) { } wg := sync.WaitGroup{} - log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", ws.msgGatewayConfig.Discovery.Enable) + log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", "discoveryEnable", ws.msgGatewayConfig.Discovery.Enable) if ws.msgGatewayConfig.Discovery.Enable != "k8s" { wg.Add(1) @@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) { wg.Wait() - log.ZInfo( + log.ZDebug( client.ctx, "user online", "online user Num", @@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) { ws.onlineUserConnNum.Add(-1) ws.subscription.DelClient(client) //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) - log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", + log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserConnNum.Load(), ) diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index b4b2245eb0..7dc2ebeea0 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -16,20 +16,22 @@ package msgtransfer import ( "context" + "errors" "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/utils/datautil" - "net/http" - "os" - "os/signal" - "syscall" "github.com/openimsdk/open-im-server/v3/pkg/common/config" - kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" + discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/tools/errs" @@ -64,6 +66,7 @@ type Config struct { func Start(ctx context.Context, index int, config *Config) error { log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts", config.MsgTransfer.Prometheus.Ports, "index", index) + mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) if err != nil { return err @@ -72,12 +75,13 @@ func Start(ctx context.Context, index int, config *Config) error { if err != nil { return err } - client, err := kdisc.NewDiscoveryRegister(&config.Discovery, &config.Share) + client, err := discRegister.NewDiscoveryRegister(&config.Discovery, &config.Share) if err != nil { return err } client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) + msgModel := redis.NewMsgCache(rdb) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) if err != nil { @@ -93,20 +97,21 @@ func Start(ctx context.Context, index int, config *Config) error { return err } seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) - msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) + msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) if err != nil { return err } conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient) + historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgTransferDatabase, &conversationRpcClient, &groupRpcClient) if err != nil { return err } - historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgDatabase) + historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase) if err != nil { return err } + msgTransfer := &MsgTransfer{ historyCH: historyCH, historyMongoCH: historyMongoCH, @@ -137,7 +142,7 @@ func (m *MsgTransfer) Start(index int, config *Config) error { return } - if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed { + if err := prommetrics.TransferInit(prometheusPort); err != nil && !errors.Is(err, http.ErrServerClosed) { netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort) netDone <- struct{}{} } diff --git a/internal/msgtransfer/online_history_msg_handler.go b/internal/msgtransfer/online_history_msg_handler.go index d671ec52a2..b0078649cb 100644 --- a/internal/msgtransfer/online_history_msg_handler.go +++ b/internal/msgtransfer/online_history_msg_handler.go @@ -16,6 +16,12 @@ package msgtransfer import ( "context" + "encoding/json" + "errors" + "strconv" + "strings" + "time" + "github.com/IBM/sarama" "github.com/go-redis/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/config" @@ -31,9 +37,6 @@ import ( "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/utils/stringutil" "google.golang.org/protobuf/proto" - "strconv" - "strings" - "time" ) const ( @@ -54,19 +57,19 @@ type OnlineHistoryRedisConsumerHandler struct { redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] - msgDatabase controller.CommonMsgDatabase + msgTransferDatabase controller.MsgTransferDatabase conversationRpcClient *rpcclient.ConversationRpcClient groupRpcClient *rpcclient.GroupRpcClient } -func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase, +func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase, conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) { historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) if err != nil { return nil, err } var och OnlineHistoryRedisConsumerHandler - och.msgDatabase = database + och.msgTransferDatabase = database b := batcher.New[sarama.ConsumerMessage]( batcher.WithSize(size), @@ -88,6 +91,7 @@ func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database cont och.conversationRpcClient = conversationRpcClient och.groupRpcClient = groupRpcClient och.historyConsumerGroup = historyConsumerGroup + return &och, err } func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) { @@ -96,6 +100,7 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID ctx = withAggregationCtx(ctx, ctxMessages) log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages), "key", val.Key()) + och.doSetReadSeq(ctx, ctxMessages) storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList := och.categorizeMessageLists(ctxMessages) @@ -109,6 +114,60 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID och.handleNotification(ctx, val.Key(), conversationIDNotification, storageNotificationList, notStorageNotificationList) } +func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context, msgs []*ContextMsg) { + type seqKey struct { + conversationID string + userID string + } + var readSeq map[seqKey]int64 + for _, msg := range msgs { + if msg.message.ContentType != constant.HasReadReceipt { + continue + } + var elem sdkws.NotificationElem + if err := json.Unmarshal(msg.message.Content, &elem); err != nil { + log.ZError(ctx, "handlerConversationRead Unmarshal NotificationElem msg err", err, "msg", msg) + continue + } + var tips sdkws.MarkAsReadTips + if err := json.Unmarshal([]byte(elem.Detail), &tips); err != nil { + log.ZError(ctx, "handlerConversationRead Unmarshal MarkAsReadTips msg err", err, "msg", msg) + continue + } + if len(tips.Seqs) > 0 { + for _, seq := range tips.Seqs { + if tips.HasReadSeq < seq { + tips.HasReadSeq = seq + } + } + clear(tips.Seqs) + tips.Seqs = nil + } + if tips.HasReadSeq < 0 { + continue + } + if readSeq == nil { + readSeq = make(map[seqKey]int64) + } + key := seqKey{ + conversationID: tips.ConversationID, + userID: tips.MarkAsReadUserID, + } + if readSeq[key] > tips.HasReadSeq { + continue + } + readSeq[key] = tips.HasReadSeq + } + if readSeq == nil { + return + } + for key, seq := range readSeq { + if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil { + log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq) + } + } +} + func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg { var ctxMessages []*ContextMsg for i := 0; i < len(consumerMessages); i++ { @@ -179,6 +238,11 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [ } func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) { + log.ZInfo(ctx, "handle storage msg") + for _, storageMsg := range storageList { + log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String()) + } + och.toPushTopic(ctx, key, conversationID, notStorageList) var storageMessageList []*sdkws.MsgData for _, msg := range storageList { @@ -186,21 +250,25 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key } if len(storageMessageList) > 0 { msg := storageMessageList[0] - lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) - if err != nil && errs.Unwrap(err) != redis.Nil { + lastSeq, isNewConversation, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) { log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) return } + log.ZInfo(ctx, "BatchInsertChat2Cache end") + if isNewConversation { switch msg.SessionType { case constant.ReadGroupChatType: - log.ZInfo(ctx, "group chat first create conversation", "conversationID", + log.ZDebug(ctx, "group chat first create conversation", "conversationID", conversationID) userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID) if err != nil { log.ZWarn(ctx, "get group member ids error", err, "conversationID", conversationID) } else { + log.ZInfo(ctx, "GetGroupMemberIDs end") + if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, msg.GroupID, userIDs); err != nil { log.ZWarn(ctx, "single chat first create conversation error", err, @@ -219,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key } } - log.ZDebug(ctx, "success incr to next topic") - err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) + log.ZInfo(ctx, "success incr to next topic") + err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) if err != nil { log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) } + log.ZInfo(ctx, "MsgToMongoMQ end") + och.toPushTopic(ctx, key, conversationID, storageList) + log.ZInfo(ctx, "toPushTopic end") } } @@ -237,14 +308,14 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con storageMessageList = append(storageMessageList, msg.message) } if len(storageMessageList) > 0 { - lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + lastSeq, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) if err != nil { log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID, "storageList", storageMessageList) return } log.ZDebug(ctx, "success to next topic", "conversationID", conversationID) - err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) + err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) if err != nil { log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) @@ -253,9 +324,10 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con } } -func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(_ context.Context, key, conversationID string, msgs []*ContextMsg) { +func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) { for _, v := range msgs { - och.msgDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) + log.ZDebug(ctx, "push msg to topic", "msg", v.message.String()) + _, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) } } @@ -280,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group - log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset", + log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) { session.MarkMessage(lastMessage, "") diff --git a/internal/msgtransfer/online_msg_to_mongo_handler.go b/internal/msgtransfer/online_msg_to_mongo_handler.go index e5651012c6..82002c26b9 100644 --- a/internal/msgtransfer/online_msg_to_mongo_handler.go +++ b/internal/msgtransfer/online_msg_to_mongo_handler.go @@ -29,10 +29,10 @@ import ( type OnlineHistoryMongoConsumerHandler struct { historyConsumerGroup *kafka.MConsumerGroup - msgDatabase controller.CommonMsgDatabase + msgTransferDatabase controller.MsgTransferDatabase } -func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase) (*OnlineHistoryMongoConsumerHandler, error) { +func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) { historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true) if err != nil { return nil, err @@ -40,7 +40,7 @@ func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database cont mc := &OnlineHistoryMongoConsumerHandler{ historyConsumerGroup: historyConsumerGroup, - msgDatabase: database, + msgTransferDatabase: database, } return mc, nil } @@ -57,8 +57,8 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) return } - log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) - err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) + log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) + err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) if err != nil { log.ZError( ctx, @@ -77,7 +77,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont for _, msg := range msgFromMQ.MsgData { seqs = append(seqs, msg.Seq) } - err = mc.msgDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) + err = mc.msgTransferDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) if err != nil { log.ZError( ctx, @@ -91,13 +91,13 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont } } -func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } -func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } +func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } +func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim( sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim, -) error { // a instance in the consumer group +) error { // an instance in the consumer group log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) for msg := range claim.Messages() { diff --git a/internal/push/a_test.go b/internal/push/a_test.go new file mode 100644 index 0000000000..8b2d864071 --- /dev/null +++ b/internal/push/a_test.go @@ -0,0 +1,29 @@ +package push + +import ( + "github.com/openimsdk/protocol/sdkws" + "testing" +) + +func TestName(t *testing.T) { + var c ConsumerHandler + c.readCh = make(chan *sdkws.MarkAsReadTips) + + go c.loopRead() + + go func() { + for i := 0; ; i++ { + seq := int64(i + 1) + if seq%3 == 0 { + seq = 1 + } + c.readCh <- &sdkws.MarkAsReadTips{ + ConversationID: "c100", + MarkAsReadUserID: "u100", + HasReadSeq: seq, + } + } + }() + + select {} +} diff --git a/internal/push/offlinepush/dummy/push.go b/internal/push/offlinepush/dummy/push.go index 028e7edd34..09831cabfa 100644 --- a/internal/push/offlinepush/dummy/push.go +++ b/internal/push/offlinepush/dummy/push.go @@ -17,6 +17,7 @@ package dummy import ( "context" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/tools/log" ) func NewClient() *Dummy { @@ -27,5 +28,6 @@ type Dummy struct { } func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { + log.ZDebug(ctx, "dummy push") return nil } diff --git a/internal/push/offlinepush/getui/push.go b/internal/push/offlinepush/getui/push.go index 27b19e8fe0..e266f9c464 100644 --- a/internal/push/offlinepush/getui/push.go +++ b/internal/push/offlinepush/getui/push.go @@ -18,11 +18,11 @@ import ( "context" "crypto/sha256" "encoding/hex" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "strconv" "sync" "time" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/tools/errs" @@ -91,6 +91,15 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri for i, v := range s.GetSplitResult() { go func(index int, userIDs []string) { defer wg.Done() + for i := 0; i < len(userIDs); i += maxNum { + end := i + maxNum + if end > len(userIDs) { + end = len(userIDs) + } + if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil { + log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) + } + } if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil { log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) } diff --git a/internal/push/offlinepush_handler.go b/internal/push/offlinepush_handler.go new file mode 100644 index 0000000000..bf69aed3e2 --- /dev/null +++ b/internal/push/offlinepush_handler.go @@ -0,0 +1,122 @@ +package push + +import ( + "context" + + "github.com/IBM/sarama" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/protocol/constant" + pbpush "github.com/openimsdk/protocol/push" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" + "github.com/openimsdk/tools/utils/jsonutil" + "google.golang.org/protobuf/proto" +) + +type OfflinePushConsumerHandler struct { + OfflinePushConsumerGroup *kafka.MConsumerGroup + offlinePusher offlinepush.OfflinePusher +} + +func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) { + var offlinePushConsumerHandler OfflinePushConsumerHandler + var err error + offlinePushConsumerHandler.offlinePusher = offlinePusher + offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID, + []string{config.KafkaConfig.ToOfflinePushTopic}, true) + if err != nil { + return nil, err + } + return &offlinePushConsumerHandler, nil +} + +func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } +func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } +func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + for msg := range claim.Messages() { + ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg) + o.handleMsg2OfflinePush(ctx, msg.Value) + sess.MarkMessage(msg, "") + } + return nil +} + +func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) { + offlinePushMsg := pbpush.PushMsgReq{} + if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil { + log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg)) + return + } + if offlinePushMsg.MsgData == nil || offlinePushMsg.UserIDs == nil { + log.ZError(ctx, "offline push msg is empty", errs.New("offlinePushMsg is empty"), "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData) + return + } + log.ZInfo(ctx, "receive to OfflinePush MQ", "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData) + + err := o.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs) + if err != nil { + log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String()) + } +} + +func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) { + type AtTextElem struct { + Text string `json:"text,omitempty"` + AtUserList []string `json:"atUserList,omitempty"` + IsAtSelf bool `json:"isAtSelf"` + } + + opts = &options.Opts{Signal: &options.Signal{}} + if msg.OfflinePushInfo != nil { + opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount + opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound + opts.Ex = msg.OfflinePushInfo.Ex + } + + if msg.OfflinePushInfo != nil { + title = msg.OfflinePushInfo.Title + content = msg.OfflinePushInfo.Desc + } + if title == "" { + switch msg.ContentType { + case constant.Text: + fallthrough + case constant.Picture: + fallthrough + case constant.Voice: + fallthrough + case constant.Video: + fallthrough + case constant.File: + title = constant.ContentType2PushContent[int64(msg.ContentType)] + case constant.AtText: + ac := AtTextElem{} + _ = jsonutil.JsonStringToStruct(string(msg.Content), &ac) + case constant.SignalingNotification: + title = constant.ContentType2PushContent[constant.SignalMsg] + default: + title = constant.ContentType2PushContent[constant.Common] + } + } + if content == "" { + content = title + } + return +} + +func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error { + title, content, opts, err := o.getOfflinePushInfos(msg) + if err != nil { + return err + } + err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts) + if err != nil { + prommetrics.MsgOfflinePushFailedCounter.Inc() + return err + } + return nil +} diff --git a/internal/push/onlinepusher.go b/internal/push/onlinepusher.go index a61399fb6b..9521a84a07 100644 --- a/internal/push/onlinepusher.go +++ b/internal/push/onlinepusher.go @@ -19,20 +19,20 @@ type OnlinePusher interface { pushToUserIDs *[]string) []string } -type emptyOnlinePUsher struct{} +type emptyOnlinePusher struct{} -func newEmptyOnlinePUsher() *emptyOnlinePUsher { - return &emptyOnlinePUsher{} +func newEmptyOnlinePusher() *emptyOnlinePusher { + return &emptyOnlinePusher{} } -func (emptyOnlinePUsher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, +func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { - log.ZWarn(ctx, "emptyOnlinePUsher GetConnsAndOnlinePush", nil) + log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil) return nil, nil } -func (u emptyOnlinePUsher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, +func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { - log.ZWarn(ctx, "emptyOnlinePUsher GetOnlinePushFailedUserIDs", nil) + log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil) return nil } @@ -45,7 +45,7 @@ func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) Onli case "etcd": return NewDefaultAllNode(disCov, config) default: - return newEmptyOnlinePUsher() + return newEmptyOnlinePusher() } } diff --git a/internal/push/push.go b/internal/push/push.go index 1a04bbea26..850f91d22e 100644 --- a/internal/push/push.go +++ b/internal/push/push.go @@ -2,6 +2,7 @@ package push import ( "context" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" @@ -17,12 +18,12 @@ type pushServer struct { disCov discovery.SvcDiscoveryRegistry offlinePusher offlinepush.OfflinePusher pushCh *ConsumerHandler + offlinePushCh *OfflinePushConsumerHandler } type Config struct { RpcConfig config.Push RedisConfig config.Redis - MongodbConfig config.Mongo KafkaConfig config.Kafka NotificationConfig config.Notification Share config.Share @@ -55,18 +56,30 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - database := controller.NewPushDatabase(cacheModel) - consumer, err := NewConsumerHandler(config, offlinePusher, rdb, client) + database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig) + + consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client) + if err != nil { + return err + } + + offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher) if err != nil { return err } + pbpush.RegisterPushMsgServiceServer(server, &pushServer{ database: database, disCov: client, offlinePusher: offlinePusher, pushCh: consumer, + offlinePushCh: offlinePushConsumer, }) + go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer) + + go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer) + return nil } diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go index 249622a592..4ecf20de52 100644 --- a/internal/push/push_handler.go +++ b/internal/push/push_handler.go @@ -1,33 +1,20 @@ -// Copyright Β© 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package push import ( "context" "encoding/json" + "github.com/IBM/sarama" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" - pbchat "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msggateway" pbpush "github.com/openimsdk/protocol/push" "github.com/openimsdk/protocol/sdkws" @@ -40,12 +27,16 @@ import ( "github.com/openimsdk/tools/utils/timeutil" "github.com/redis/go-redis/v9" "google.golang.org/protobuf/proto" + "math/rand" + "strconv" + "time" ) type ConsumerHandler struct { pushConsumerGroup *kafka.MConsumerGroup offlinePusher offlinepush.OfflinePusher onlinePusher OnlinePusher + pushDatabase controller.PushDatabase onlineCache *rpccache.OnlineCache groupLocalCache *rpccache.GroupLocalCache conversationLocalCache *rpccache.ConversationLocalCache @@ -56,7 +47,7 @@ type ConsumerHandler struct { config *Config } -func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, +func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) { var consumerHandler ConsumerHandler var err error @@ -65,7 +56,9 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, if err != nil { return nil, err } + userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + consumerHandler.offlinePusher = offlinePusher consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) @@ -75,42 +68,45 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.config = config - consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil) + consumerHandler.pushDatabase = database + consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil) + if err != nil { + return nil, err + } return &consumerHandler, nil } func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { - msgFromMQ := pbchat.PushMsgDataToMQ{} + msgFromMQ := pbpush.PushMsgReq{} if err := proto.Unmarshal(msg, &msgFromMQ); err != nil { log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg)) return } - pbData := &pbpush.PushMsgReq{ - MsgData: msgFromMQ.MsgData, - ConversationID: msgFromMQ.ConversationID, - } + sec := msgFromMQ.MsgData.SendTime / 1000 nowSec := timeutil.GetCurrentTimestampBySecond() - log.ZDebug(ctx, "push msg", "msg", pbData.String(), "sec", sec, "nowSec", nowSec) + if nowSec-sec > 10 { - return + prommetrics.MsgLoneTimePushCounter.Inc() + log.ZWarn(ctx, "it’s been a while since the message was sent", nil, "msg", msgFromMQ.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec) } var err error + switch msgFromMQ.MsgData.SessionType { case constant.ReadGroupChatType: - err = c.Push2Group(ctx, pbData.MsgData.GroupID, pbData.MsgData) + err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData) default: var pushUserIDList []string - isSenderSync := datautil.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) - if !isSenderSync || pbData.MsgData.SendID == pbData.MsgData.RecvID { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID) + isSenderSync := datautil.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync) + if !isSenderSync || msgFromMQ.MsgData.SendID == msgFromMQ.MsgData.RecvID { + pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID) } else { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) + pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID, msgFromMQ.MsgData.SendID) } - err = c.Push2User(ctx, pushUserIDList, pbData.MsgData) + err = c.Push2User(ctx, pushUserIDList, msgFromMQ.MsgData) } if err != nil { - log.ZWarn(ctx, "push failed", err, "msg", pbData.String()) + log.ZWarn(ctx, "push failed", err, "msg", msgFromMQ.String()) } } @@ -119,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + c.onlineCache.Lock.Lock() + for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver { + c.onlineCache.Cond.Wait() + } + c.onlineCache.Lock.Unlock() + ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10)) + log.ZInfo(ctx, "begin consume messages") + for msg := range claim.Messages() { ctx := c.pushConsumerGroup.GetContextFromMsg(msg) c.handleMs2PsChat(ctx, msg.Value) @@ -129,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) { - log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) + log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) + defer func(duration time.Time) { + t := time.Since(duration) + log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t) + }(time.Now()) if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { return err } + log.ZInfo(ctx, "webhookBeforeOnlinePush end") + wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs) if err != nil { return err } - log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) + log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) if !c.shouldPushOffline(ctx, msg) { return nil } + log.ZInfo(ctx, "shouldPushOffline end") for _, v := range wsResults { //message sender do not need offline push @@ -154,17 +165,17 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg * return nil } } - offlinePUshUserID := []string{msg.RecvID} + offlinePushUserID := []string{msg.RecvID} //receiver offline push if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, - offlinePUshUserID, msg, nil); err != nil { + offlinePushUserID, msg, nil); err != nil { return err } - - err = c.offlinePushMsg(ctx, msg, offlinePUshUserID) + log.ZInfo(ctx, "webhookBeforeOfflinePush end") + err = c.offlinePushMsg(ctx, msg, offlinePushUserID) if err != nil { - log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePUshUserID", offlinePUshUserID, "msg", msg) + log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg) return nil } @@ -183,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat } func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) { - var ( - onlineUserIDs []string - offlineUserIDs []string - ) - for _, userID := range pushToUserIDs { - online, err := c.onlineCache.GetUserOnline(ctx, userID) - if err != nil { - return nil, err - } - if online { - onlineUserIDs = append(onlineUserIDs, userID) - } else { - offlineUserIDs = append(offlineUserIDs, userID) - } + onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs) + if err != nil { + return nil, err } + log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs) var result []*msggateway.SingleMsgToUserResults if len(onlineUserIDs) > 0 { @@ -216,57 +217,70 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws. } func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { - log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) + log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) + defer func(duration time.Time) { + t := time.Since(duration) + log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t) + }(time.Now()) var pushToUserIDs []string if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg, &pushToUserIDs); err != nil { return err } + log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end") err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg) if err != nil { return err } + log.ZInfo(ctx, "groupMessagesHandler end") wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) if err != nil { return err } - log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg) + log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg) if !c.shouldPushOffline(ctx, msg) { return nil } needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs) - + log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end") //filter some user, like don not disturb or don't need offline push etc. needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs) if err != nil { return err } + log.ZInfo(ctx, "filterGroupMessageOfflinePush end") + // Use offline push messaging if len(needOfflinePushUserIDs) > 0 { - var offlinePushUserIDs []string - err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) - if err != nil { - return err - } - - if len(offlinePushUserIDs) > 0 { - needOfflinePushUserIDs = offlinePushUserIDs - } + c.asyncOfflinePush(ctx, needOfflinePushUserIDs, msg) + } - err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs) - if err != nil { - log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) - return nil - } + return nil +} +func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) { + var offlinePushUserIDs []string + err := c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) + if err != nil { + log.ZWarn(ctx, "webhookBeforeOfflinePush failed", err, "msg", msg) + return } - return nil + if len(offlinePushUserIDs) > 0 { + needOfflinePushUserIDs = offlinePushUserIDs + } + if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil { + log.ZError(ctx, "Msg To OfflinePush MQ error", err, "needOfflinePushUserIDs", + needOfflinePushUserIDs, "msg", msg) + prommetrics.SingleChatMsgProcessFailedCounter.Inc() + return + } } + func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) { if len(*pushToUserIDs) == 0 { *pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID) @@ -300,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri if unmarshalNotificationElem(msg.Content, &tips) != nil { return err } - log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) + log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) if len(c.config.Share.IMAdminUserID) > 0 { ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0]) } @@ -384,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten } return } + func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) @@ -392,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, } return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) } + func unmarshalNotificationElem(bytes []byte, t any) error { var notification sdkws.NotificationElem if err := json.Unmarshal(bytes, ¬ification); err != nil { diff --git a/internal/rpc/conversation/conversaion.go b/internal/rpc/conversation/conversaion.go index 4cf20f919c..6f77164e38 100644 --- a/internal/rpc/conversation/conversaion.go +++ b/internal/rpc/conversation/conversaion.go @@ -221,11 +221,11 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers return resp, nil } -// nolint func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) { if req.Conversation == nil { return nil, errs.ErrArgs.WrapMsg("conversation must not be nil") } + if req.Conversation.ConversationType == constant.WriteGroupChatType { groupInfo, err := c.groupRpcClient.GetGroupInfo(ctx, req.Conversation.GroupID) if err != nil { @@ -235,98 +235,141 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed") } } - var unequal int - var conv dbModel.Conversation - if len(req.UserIDs) == 1 { - cs, err := c.conversationDatabase.FindConversations(ctx, req.UserIDs[0], []string{req.Conversation.ConversationID}) + + conversationMap := make(map[string]*dbModel.Conversation) + var needUpdateUsersList []string + + for _, userID := range req.UserIDs { + conversationList, err := c.conversationDatabase.FindConversations(ctx, userID, []string{req.Conversation.ConversationID}) if err != nil { return nil, err } - if len(cs) == 0 { - return nil, errs.ErrRecordNotFound.WrapMsg("conversation not found") + if len(conversationList) != 0 { + conversationMap[userID] = conversationList[0] + } else { + needUpdateUsersList = append(needUpdateUsersList, userID) } - conv = *cs[0] } + var conversation dbModel.Conversation conversation.ConversationID = req.Conversation.ConversationID conversation.ConversationType = req.Conversation.ConversationType conversation.UserID = req.Conversation.UserID conversation.GroupID = req.Conversation.GroupID + m := make(map[string]any) - if req.Conversation.RecvMsgOpt != nil { - m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value - if req.Conversation.RecvMsgOpt.Value != conv.RecvMsgOpt { - unequal++ + + setConversationFieldsFunc := func() { + if req.Conversation.RecvMsgOpt != nil { + m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value } - } - if req.Conversation.AttachedInfo != nil { - m["attached_info"] = req.Conversation.AttachedInfo.Value - if req.Conversation.AttachedInfo.Value != conv.AttachedInfo { - unequal++ + if req.Conversation.AttachedInfo != nil { + m["attached_info"] = req.Conversation.AttachedInfo.Value } - } - if req.Conversation.Ex != nil { - m["ex"] = req.Conversation.Ex.Value - if req.Conversation.Ex.Value != conv.Ex { - unequal++ + if req.Conversation.Ex != nil { + m["ex"] = req.Conversation.Ex.Value } - } - if req.Conversation.IsPinned != nil { - m["is_pinned"] = req.Conversation.IsPinned.Value - if req.Conversation.IsPinned.Value != conv.IsPinned { - unequal++ + if req.Conversation.IsPinned != nil { + m["is_pinned"] = req.Conversation.IsPinned.Value } - } - if req.Conversation.GroupAtType != nil { - m["group_at_type"] = req.Conversation.GroupAtType.Value - if req.Conversation.GroupAtType.Value != conv.GroupAtType { - unequal++ + if req.Conversation.GroupAtType != nil { + m["group_at_type"] = req.Conversation.GroupAtType.Value } - } - if req.Conversation.MsgDestructTime != nil { - m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value - if req.Conversation.MsgDestructTime.Value != conv.MsgDestructTime { - unequal++ + if req.Conversation.MsgDestructTime != nil { + m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value + } + if req.Conversation.MsgDestructTime != nil { + m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value + } + if req.Conversation.BurnDuration != nil { + m["burn_duration"] = req.Conversation.BurnDuration.Value } } - if req.Conversation.IsMsgDestruct != nil { - m["is_msg_destruct"] = req.Conversation.IsMsgDestruct.Value - if req.Conversation.IsMsgDestruct.Value != conv.IsMsgDestruct { - unequal++ + + // set need set field in conversation + setConversationFieldsFunc() + + for userID := range conversationMap { + unequal := len(m) + + if req.Conversation.RecvMsgOpt != nil { + if req.Conversation.RecvMsgOpt.Value == conversationMap[userID].RecvMsgOpt { + unequal-- + } + } + + if req.Conversation.AttachedInfo != nil { + if req.Conversation.AttachedInfo.Value == conversationMap[userID].AttachedInfo { + unequal-- + } + } + + if req.Conversation.Ex != nil { + if req.Conversation.Ex.Value == conversationMap[userID].Ex { + unequal-- + } + } + if req.Conversation.IsPinned != nil { + if req.Conversation.IsPinned.Value == conversationMap[userID].IsPinned { + unequal-- + } + } + + if req.Conversation.GroupAtType != nil { + if req.Conversation.GroupAtType.Value == conversationMap[userID].GroupAtType { + unequal-- + } + } + + if req.Conversation.MsgDestructTime != nil { + if req.Conversation.MsgDestructTime.Value == conversationMap[userID].MsgDestructTime { + unequal-- + } + } + + if req.Conversation.IsMsgDestruct != nil { + if req.Conversation.IsMsgDestruct.Value == conversationMap[userID].IsMsgDestruct { + unequal-- + } + } + + if req.Conversation.BurnDuration != nil { + if req.Conversation.BurnDuration.Value == conversationMap[userID].BurnDuration { + unequal-- + } + } + + if unequal > 0 { + needUpdateUsersList = append(needUpdateUsersList, userID) } } + if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType { var conversations []*dbModel.Conversation for _, ownerUserID := range req.UserIDs { - conversation2 := conversation - conversation2.OwnerUserID = ownerUserID - conversation2.IsPrivateChat = req.Conversation.IsPrivateChat.Value - conversations = append(conversations, &conversation2) + transConversation := conversation + transConversation.OwnerUserID = ownerUserID + transConversation.IsPrivateChat = req.Conversation.IsPrivateChat.Value + conversations = append(conversations, &transConversation) } if err := c.conversationDatabase.SyncPeerUserPrivateConversationTx(ctx, conversations); err != nil { return nil, err } + for _, userID := range req.UserIDs { c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID) } - } - - if req.Conversation.BurnDuration != nil { - m["burn_duration"] = req.Conversation.BurnDuration.Value - if req.Conversation.BurnDuration.Value != conv.BurnDuration { - unequal++ - } - } - - if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, req.UserIDs, &conversation, m); err != nil { - return nil, err - } + } else { + if len(m) != 0 && len(needUpdateUsersList) != 0 { + if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, needUpdateUsersList, &conversation, m); err != nil { + return nil, err + } - if unequal > 0 { - for _, v := range req.UserIDs { - c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) + for _, v := range needUpdateUsersList { + c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) + } } } @@ -392,6 +435,14 @@ func (c *conversationServer) SetConversationMaxSeq(ctx context.Context, req *pbc return &pbconversation.SetConversationMaxSeqResp{}, nil } +func (c *conversationServer) SetConversationMinSeq(ctx context.Context, req *pbconversation.SetConversationMinSeqReq) (*pbconversation.SetConversationMinSeqResp, error) { + if err := c.conversationDatabase.UpdateUsersConversationField(ctx, req.OwnerUserID, req.ConversationID, + map[string]any{"min_seq": req.MinSeq}); err != nil { + return nil, err + } + return &pbconversation.SetConversationMinSeqResp{}, nil +} + func (c *conversationServer) GetConversationIDs(ctx context.Context, req *pbconversation.GetConversationIDsReq) (*pbconversation.GetConversationIDsResp, error) { conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID) if err != nil { @@ -634,11 +685,11 @@ func (c *conversationServer) GetConversationsNeedDestructMsgs(ctx context.Contex conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, pagination) if err != nil { - log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber) + // log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber) continue } - log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs) + // log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs) if len(conversationIDs) == 0 { continue } diff --git a/internal/rpc/group/callback.go b/internal/rpc/group/callback.go index f877aa64a8..5e3dc9b9c3 100644 --- a/internal/rpc/group/callback.go +++ b/internal/rpc/group/callback.go @@ -358,3 +358,74 @@ func (s *groupServer) webhookAfterSetGroupInfo(ctx context.Context, after *confi } s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after) } + +func (s *groupServer) webhookBeforeSetGroupInfoEX(ctx context.Context, before *config.BeforeConfig, req *group.SetGroupInfoEXReq) error { + return webhook.WithCondition(ctx, before, func(ctx context.Context) error { + cbReq := &callbackstruct.CallbackBeforeSetGroupInfoEXReq{ + CallbackCommand: callbackstruct.CallbackBeforeSetGroupInfoCommand, + GroupID: req.GroupInfoForSetEX.GroupID, + GroupName: req.GroupInfoForSetEX.GroupName, + Notification: req.GroupInfoForSetEX.Notification, + Introduction: req.GroupInfoForSetEX.Introduction, + FaceURL: req.GroupInfoForSetEX.FaceURL, + } + + if req.GroupInfoForSetEX.Ex != nil { + cbReq.Ex = req.GroupInfoForSetEX.Ex + } + log.ZDebug(ctx, "debug CallbackBeforeSetGroupInfoEX", "ex", cbReq.Ex) + + if req.GroupInfoForSetEX.NeedVerification != nil { + cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification + } + if req.GroupInfoForSetEX.LookMemberInfo != nil { + cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo + } + if req.GroupInfoForSetEX.ApplyMemberFriend != nil { + cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend + } + + resp := &callbackstruct.CallbackBeforeSetGroupInfoEXResp{} + + if err := s.webhookClient.SyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, before); err != nil { + return err + } + + datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupID, &resp.GroupID) + datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupName, &resp.GroupName) + datautil.NotNilReplace(&req.GroupInfoForSetEX.FaceURL, &resp.FaceURL) + datautil.NotNilReplace(&req.GroupInfoForSetEX.Introduction, &resp.Introduction) + datautil.NotNilReplace(&req.GroupInfoForSetEX.Ex, &resp.Ex) + datautil.NotNilReplace(&req.GroupInfoForSetEX.NeedVerification, &resp.NeedVerification) + datautil.NotNilReplace(&req.GroupInfoForSetEX.LookMemberInfo, &resp.LookMemberInfo) + datautil.NotNilReplace(&req.GroupInfoForSetEX.ApplyMemberFriend, &resp.ApplyMemberFriend) + + return nil + }) +} + +func (s *groupServer) webhookAfterSetGroupInfoEX(ctx context.Context, after *config.AfterConfig, req *group.SetGroupInfoEXReq) { + cbReq := &callbackstruct.CallbackAfterSetGroupInfoEXReq{ + CallbackCommand: callbackstruct.CallbackAfterSetGroupInfoCommand, + GroupID: req.GroupInfoForSetEX.GroupID, + GroupName: req.GroupInfoForSetEX.GroupName, + Notification: req.GroupInfoForSetEX.Notification, + Introduction: req.GroupInfoForSetEX.Introduction, + FaceURL: req.GroupInfoForSetEX.FaceURL, + } + + if req.GroupInfoForSetEX.Ex != nil { + cbReq.Ex = req.GroupInfoForSetEX.Ex + } + if req.GroupInfoForSetEX.NeedVerification != nil { + cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification + } + if req.GroupInfoForSetEX.LookMemberInfo != nil { + cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo + } + if req.GroupInfoForSetEX.ApplyMemberFriend != nil { + cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend + } + + s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoEXResp{}, after) +} diff --git a/internal/rpc/group/db_map.go b/internal/rpc/group/db_map.go index b4b503b950..08895f9c5f 100644 --- a/internal/rpc/group/db_map.go +++ b/internal/rpc/group/db_map.go @@ -54,6 +54,39 @@ func UpdateGroupInfoMap(ctx context.Context, group *sdkws.GroupInfoForSet) map[s return m } +func UpdateGroupInfoEXMap(ctx context.Context, group *sdkws.GroupInfoForSetEX) map[string]any { + m := make(map[string]any) + + if group.GroupName != "" { + m["group_name"] = group.GroupName + } + if group.Notification != nil { + m["notification"] = group.Notification.Value + m["notification_update_time"] = time.Now() + m["notification_user_id"] = mcontext.GetOpUserID(ctx) + } + if group.Introduction != nil { + m["introduction"] = group.Introduction.Value + } + if group.FaceURL != nil { + m["face_url"] = group.FaceURL.Value + } + if group.NeedVerification != nil { + m["need_verification"] = group.NeedVerification.Value + } + if group.LookMemberInfo != nil { + m["look_member_info"] = group.LookMemberInfo.Value + } + if group.ApplyMemberFriend != nil { + m["apply_member_friend"] = group.ApplyMemberFriend.Value + } + if group.Ex != nil { + m["ex"] = group.Ex.Value + } + + return m +} + func UpdateGroupStatusMap(status int) map[string]any { return map[string]any{ "status": status, diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index aa12c9d0f1..f5f0e3c85e 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -105,13 +105,20 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg database := controller.NewGroupDatabase(rdb, &config.LocalCacheConfig, groupDB, groupMemberDB, groupRequestDB, mgocli.GetTx(), grouphash.NewGroupHashFromGroupServer(&gs)) gs.db = database gs.user = userRpcClient - gs.notification = NewGroupNotificationSender(database, &msgRpcClient, &userRpcClient, config, func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) { - users, err := userRpcClient.GetUsersInfo(ctx, userIDs) - if err != nil { - return nil, err - } - return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil - }) + gs.notification = NewGroupNotificationSender( + database, + &msgRpcClient, + &userRpcClient, + &conversationRpcClient, + config, + func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) { + users, err := userRpcClient.GetUsersInfo(ctx, userIDs) + if err != nil { + return nil, err + } + return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil + }, + ) localcache.InitLocalCache(&config.LocalCacheConfig) gs.conversationRpcClient = conversationRpcClient gs.msgRpcClient = msgRpcClient @@ -121,8 +128,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg return nil } -func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) { - members, err := s.db.FindGroupMemberUser(ctx, nil, req.UserID) +func (g *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) { + members, err := g.db.FindGroupMemberUser(ctx, nil, req.UserID) if err != nil { return nil, err } @@ -134,22 +141,22 @@ func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgro groupIDs = append(groupIDs, member.GroupID) } for _, groupID := range groupIDs { - if err := s.db.MemberGroupIncrVersion(ctx, groupID, []string{req.UserID}, model.VersionStateUpdate); err != nil { + if err := g.db.MemberGroupIncrVersion(ctx, groupID, []string{req.UserID}, model.VersionStateUpdate); err != nil { return nil, err } } for _, groupID := range groupIDs { - s.notification.GroupMemberInfoSetNotification(ctx, groupID, req.UserID) + g.notification.GroupMemberInfoSetNotification(ctx, groupID, req.UserID) } - if err = s.db.DeleteGroupMemberHash(ctx, groupIDs); err != nil { + if err = g.db.DeleteGroupMemberHash(ctx, groupIDs); err != nil { return nil, err } return &pbgroup.NotificationUserInfoUpdateResp{}, nil } -func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error { - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - groupMember, err := s.db.TakeGroupMember(ctx, groupID, mcontext.GetOpUserID(ctx)) +func (g *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + groupMember, err := g.db.TakeGroupMember(ctx, groupID, mcontext.GetOpUserID(ctx)) if err != nil { return err } @@ -160,11 +167,11 @@ func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error return nil } -func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string, complete bool) (map[string]*sdkws.PublicUserInfo, error) { +func (g *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string, complete bool) (map[string]*sdkws.PublicUserInfo, error) { if len(userIDs) == 0 { return map[string]*sdkws.PublicUserInfo{}, nil } - users, err := s.user.GetPublicUserInfos(ctx, userIDs, complete) + users, err := g.user.GetPublicUserInfos(ctx, userIDs, complete) if err != nil { return nil, err } @@ -173,16 +180,16 @@ func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string }), nil } -func (s *groupServer) IsNotFound(err error) bool { +func (g *groupServer) IsNotFound(err error) bool { return errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) } -func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { +func (g *groupServer) GenGroupID(ctx context.Context, groupID *string) error { if *groupID != "" { - _, err := s.db.TakeGroup(ctx, *groupID) + _, err := g.db.TakeGroup(ctx, *groupID) if err == nil { return servererrs.ErrGroupIDExisted.WrapMsg("group id existed " + *groupID) - } else if s.IsNotFound(err) { + } else if g.IsNotFound(err) { return nil } else { return err @@ -193,10 +200,10 @@ func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { bi := big.NewInt(0) bi.SetString(id[0:8], 16) id = bi.String() - _, err := s.db.TakeGroup(ctx, id) + _, err := g.db.TakeGroup(ctx, id) if err == nil { continue - } else if s.IsNotFound(err) { + } else if g.IsNotFound(err) { *groupID = id return nil } else { @@ -206,14 +213,14 @@ func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { return servererrs.ErrData.WrapMsg("group id gen error") } -func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupReq) (*pbgroup.CreateGroupResp, error) { +func (g *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupReq) (*pbgroup.CreateGroupResp, error) { if req.GroupInfo.GroupType != constant.WorkingGroup { return nil, errs.ErrArgs.WrapMsg(fmt.Sprintf("group type only supports %d", constant.WorkingGroup)) } if req.OwnerUserID == "" { return nil, errs.ErrArgs.WrapMsg("no group owner") } - if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } @@ -227,7 +234,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, errs.ErrArgs.WrapMsg("group member repeated") } - userMap, err := s.user.GetUsersInfoMap(ctx, userIDs) + userMap, err := g.user.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } @@ -236,13 +243,13 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, servererrs.ErrUserIDNotFound.WrapMsg("user not found") } - if err := s.webhookBeforeCreateGroup(ctx, &s.config.WebhooksConfig.BeforeCreateGroup, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeCreateGroup(ctx, &g.config.WebhooksConfig.BeforeCreateGroup, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } var groupMembers []*model.GroupMember group := convert.Pb2DBGroupInfo(req.GroupInfo) - if err := s.GenGroupID(ctx, &group.GroupID); err != nil { + if err := g.GenGroupID(ctx, &group.GroupID); err != nil { return nil, err } @@ -271,11 +278,11 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR joinGroupFunc(userID, constant.GroupOrdinaryUsers) } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, []*model.Group{group}, groupMembers); err != nil { + if err := g.db.CreateGroup(ctx, []*model.Group{group}, groupMembers); err != nil { return nil, err } resp := &pbgroup.CreateGroupResp{GroupInfo: &sdkws.GroupInfo{}} @@ -285,17 +292,24 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR tips := &sdkws.GroupCreatedTips{ Group: resp.GroupInfo, OperationTime: group.CreateTime.UnixMilli(), - GroupOwnerUser: s.groupMemberDB2PB(groupMembers[0], userMap[groupMembers[0].UserID].AppMangerLevel), + GroupOwnerUser: g.groupMemberDB2PB(groupMembers[0], userMap[groupMembers[0].UserID].AppMangerLevel), } for _, member := range groupMembers { member.Nickname = userMap[member.UserID].Nickname - tips.MemberList = append(tips.MemberList, s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel)) + tips.MemberList = append(tips.MemberList, g.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel)) if member.UserID == opUserID { - tips.OpUser = s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel) + tips.OpUser = g.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel) break } } - s.notification.GroupCreatedNotification(ctx, tips) + g.notification.GroupCreatedNotification(ctx, tips) + + if req.GroupInfo.Notification != "" { + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{ + Group: tips.Group, + OpUser: tips.OpUser, + }) + } reqCallBackAfter := &pbgroup.CreateGroupReq{ MemberUserIDs: userIDs, @@ -304,16 +318,16 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR AdminUserIDs: req.AdminUserIDs, } - s.webhookAfterCreateGroup(ctx, &s.config.WebhooksConfig.AfterCreateGroup, reqCallBackAfter) + g.webhookAfterCreateGroup(ctx, &g.config.WebhooksConfig.AfterCreateGroup, reqCallBackAfter) return resp, nil } -func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJoinedGroupListReq) (*pbgroup.GetJoinedGroupListResp, error) { - if err := authverify.CheckAccessV3(ctx, req.FromUserID, s.config.Share.IMAdminUserID); err != nil { +func (g *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJoinedGroupListReq) (*pbgroup.GetJoinedGroupListResp, error) { + if err := authverify.CheckAccessV3(ctx, req.FromUserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } - total, members, err := s.db.PageGetJoinGroup(ctx, req.FromUserID, req.Pagination) + total, members, err := g.db.PageGetJoinGroup(ctx, req.FromUserID, req.Pagination) if err != nil { return nil, err } @@ -325,19 +339,19 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo groupIDs := datautil.Slice(members, func(e *model.GroupMember) string { return e.GroupID }) - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -355,14 +369,14 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo return &resp, nil } -func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.InviteUserToGroupReq) (*pbgroup.InviteUserToGroupResp, error) { +func (g *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.InviteUserToGroupReq) (*pbgroup.InviteUserToGroupResp, error) { if len(req.InvitedUserIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("user empty") } if datautil.Duplicate(req.InvitedUserIDs) { return nil, errs.ErrArgs.WrapMsg("userID duplicate") } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -371,7 +385,7 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed checking group status found it dismissed") } - userMap, err := s.user.GetUsersInfoMap(ctx, req.InvitedUserIDs) + userMap, err := g.user.GetUsersInfoMap(ctx, req.InvitedUserIDs) if err != nil { return nil, err } @@ -382,24 +396,24 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite var groupMember *model.GroupMember var opUserID string - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { opUserID = mcontext.GetOpUserID(ctx) var err error - groupMember, err = s.db.TakeGroupMember(ctx, req.GroupID, opUserID) + groupMember, err = g.db.TakeGroupMember(ctx, req.GroupID, opUserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, groupMember); err != nil { + if err := g.PopulateGroupMember(ctx, groupMember); err != nil { return nil, err } } - if err := s.webhookBeforeInviteUserToGroup(ctx, &s.config.WebhooksConfig.BeforeInviteUserToGroup, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeInviteUserToGroup(ctx, &g.config.WebhooksConfig.BeforeInviteUserToGroup, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } if group.NeedVerification == constant.AllNeedVerification { - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if !(groupMember.RoleLevel == constant.GroupOwner || groupMember.RoleLevel == constant.GroupAdmin) { var requests []*model.GroupRequest for _, userID := range req.InvitedUserIDs { @@ -412,11 +426,11 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite HandledTime: time.Unix(0, 0), }) } - if err := s.db.CreateGroupRequest(ctx, requests); err != nil { + if err := g.db.CreateGroupRequest(ctx, requests); err != nil { return nil, err } for _, request := range requests { - s.notification.JoinGroupApplicationNotification(ctx, &pbgroup.JoinGroupReq{ + g.notification.JoinGroupApplicationNotification(ctx, &pbgroup.JoinGroupReq{ GroupID: request.GroupID, ReqMessage: request.ReqMsg, JoinSource: request.JoinSource, @@ -443,26 +457,26 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite groupMembers = append(groupMembers, member) } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, nil, groupMembers); err != nil { + if err := g.db.CreateGroup(ctx, nil, groupMembers); err != nil { return nil, err } - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, req.InvitedUserIDs); err != nil { + + if err = g.notification.MemberEnterNotification(ctx, req.GroupID, req.InvitedUserIDs...); err != nil { return nil, err } - s.notification.MemberInvitedNotification(ctx, req.GroupID, req.Reason, req.InvitedUserIDs) return &pbgroup.InviteUserToGroupResp{}, nil } -func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGroupAllMemberReq) (*pbgroup.GetGroupAllMemberResp, error) { - members, err := s.db.FindGroupMemberAll(ctx, req.GroupID) +func (g *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGroupAllMemberReq) (*pbgroup.GetGroupAllMemberResp, error) { + members, err := g.db.FindGroupMemberAll(ctx, req.GroupID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } var resp pbgroup.GetGroupAllMemberResp @@ -472,21 +486,21 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro return &resp, nil } -func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGroupMemberListReq) (*pbgroup.GetGroupMemberListResp, error) { +func (g *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGroupMemberListReq) (*pbgroup.GetGroupMemberListResp, error) { var ( total int64 members []*model.GroupMember err error ) if req.Keyword == "" { - total, members, err = s.db.PageGetGroupMember(ctx, req.GroupID, req.Pagination) + total, members, err = g.db.PageGetGroupMember(ctx, req.GroupID, req.Pagination) } else { - members, err = s.db.FindGroupMemberAll(ctx, req.GroupID) + members, err = g.db.FindGroupMemberAll(ctx, req.GroupID) } if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } if req.Keyword != "" { @@ -516,8 +530,8 @@ func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGr }, nil } -func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGroupMemberReq) (*pbgroup.KickGroupMemberResp, error) { - group, err := s.db.TakeGroup(ctx, req.GroupID) +func (g *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGroupMemberReq) (*pbgroup.KickGroupMemberResp, error) { + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -531,7 +545,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou if datautil.Contain(opUserID, req.KickedUserIDs...) { return nil, errs.ErrArgs.WrapMsg("opUserID in KickedUserIDs") } - owner, err := s.db.TakeGroupOwner(ctx, req.GroupID) + owner, err := g.db.TakeGroupOwner(ctx, req.GroupID) if err != nil { return nil, err } @@ -539,18 +553,18 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou return nil, errs.ErrArgs.WrapMsg("ownerUID can not Kick") } - members, err := s.db.FindGroupMembers(ctx, req.GroupID, append(req.KickedUserIDs, opUserID)) + members, err := g.db.FindGroupMembers(ctx, req.GroupID, append(req.KickedUserIDs, opUserID)) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } memberMap := make(map[string]*model.GroupMember) for i, member := range members { memberMap[member.UserID] = members[i] } - isAppManagerUid := authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) + isAppManagerUid := authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) opMember := memberMap[opUserID] for _, userID := range req.KickedUserIDs { member, ok := memberMap[userID] @@ -574,11 +588,11 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou } } } - num, err := s.db.FindGroupMemberNum(ctx, req.GroupID) + num, err := g.db.FindGroupMemberNum(ctx, req.GroupID) if err != nil { return nil, err } - ownerUserIDs, err := s.db.GetGroupRoleLevelMemberIDs(ctx, req.GroupID, constant.GroupOwner) + ownerUserIDs, err := g.db.GetGroupRoleLevelMemberIDs(ctx, req.GroupID, constant.GroupOwner) if err != nil { return nil, err } @@ -586,7 +600,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou if len(ownerUserIDs) > 0 { ownerUserID = ownerUserIDs[0] } - if err := s.db.DeleteGroupMember(ctx, group.GroupID, req.KickedUserIDs); err != nil { + if err := g.db.DeleteGroupMember(ctx, group.GroupID, req.KickedUserIDs); err != nil { return nil, err } tips := &sdkws.MemberKickedTips{ @@ -617,23 +631,23 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou for _, userID := range req.KickedUserIDs { tips.KickedUserList = append(tips.KickedUserList, convert.Db2PbGroupMember(memberMap[userID])) } - s.notification.MemberKickedNotification(ctx, tips) - if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, req.KickedUserIDs); err != nil { + g.notification.MemberKickedNotification(ctx, tips) + if err := g.deleteMemberAndSetConversationSeq(ctx, req.GroupID, req.KickedUserIDs); err != nil { return nil, err } - s.webhookAfterKickGroupMember(ctx, &s.config.WebhooksConfig.AfterKickGroupMember, req) + g.webhookAfterKickGroupMember(ctx, &g.config.WebhooksConfig.AfterKickGroupMember, req) return &pbgroup.KickGroupMemberResp{}, nil } -func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetGroupMembersInfoReq) (*pbgroup.GetGroupMembersInfoResp, error) { +func (g *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetGroupMembersInfoReq) (*pbgroup.GetGroupMembersInfoResp, error) { if len(req.UserIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("userIDs empty") } if req.GroupID == "" { return nil, errs.ErrArgs.WrapMsg("groupID empty") } - members, err := s.getGroupMembersInfo(ctx, req.GroupID, req.UserIDs) + members, err := g.getGroupMembersInfo(ctx, req.GroupID, req.UserIDs) if err != nil { return nil, err } @@ -642,15 +656,15 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG }, nil } -func (s *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { +func (g *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { if len(userIDs) == 0 { return nil, nil } - members, err := s.db.FindGroupMembers(ctx, groupID, userIDs) + members, err := g.db.FindGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo { @@ -659,8 +673,8 @@ func (s *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, u } // GetGroupApplicationList handles functions that get a list of group requests. -func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.GetGroupApplicationListReq) (*pbgroup.GetGroupApplicationListResp, error) { - groupIDs, err := s.db.FindUserManagedGroupID(ctx, req.FromUserID) +func (g *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.GetGroupApplicationListReq) (*pbgroup.GetGroupApplicationListResp, error) { + groupIDs, err := g.db.FindUserManagedGroupID(ctx, req.FromUserID) if err != nil { return nil, err } @@ -668,7 +682,7 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. if len(groupIDs) == 0 { return resp, nil } - total, groupRequests, err := s.db.PageGroupRequest(ctx, groupIDs, req.Pagination) + total, groupRequests, err := g.db.PageGroupRequest(ctx, groupIDs, req.Pagination) if err != nil { return nil, err } @@ -682,11 +696,11 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. userIDs = append(userIDs, gr.UserID) } userIDs = datautil.Distinct(userIDs) - userMap, err := s.user.GetPublicUserInfoMap(ctx, userIDs, true) + userMap, err := g.user.GetPublicUserInfoMap(ctx, userIDs, true) if err != nil { return nil, err } - groups, err := s.db.FindGroup(ctx, datautil.Distinct(groupIDs)) + groups, err := g.db.FindGroup(ctx, datautil.Distinct(groupIDs)) if err != nil { return nil, err } @@ -696,15 +710,15 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. if ids := datautil.Single(datautil.Keys(groupMap), groupIDs); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) } - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -720,11 +734,11 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. return resp, nil } -func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsInfoReq) (*pbgroup.GetGroupsInfoResp, error) { +func (g *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsInfoReq) (*pbgroup.GetGroupsInfoResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupID is empty") } - groups, err := s.getGroupsInfo(ctx, req.GroupIDs) + groups, err := g.getGroupsInfo(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -733,23 +747,23 @@ func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsI }, nil } -func (s *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { +func (g *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { if len(groupIDs) == 0 { return nil, nil } - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -764,12 +778,12 @@ func (s *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]* }), nil } -func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) (*pbgroup.GroupApplicationResponseResp, error) { +func (g *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) (*pbgroup.GroupApplicationResponseResp, error) { if !datautil.Contain(req.HandleResult, constant.GroupResponseAgree, constant.GroupResponseRefuse) { return nil, errs.ErrArgs.WrapMsg("HandleResult unknown") } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - groupMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + groupMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -777,11 +791,11 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") } } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } - groupRequest, err := s.db.TakeGroupRequest(ctx, req.GroupID, req.FromUserID) + groupRequest, err := g.db.TakeGroupRequest(ctx, req.GroupID, req.FromUserID) if err != nil { return nil, err } @@ -789,12 +803,12 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup return nil, servererrs.ErrGroupRequestHandled.WrapMsg("group request already processed") } var inGroup bool - if _, err := s.db.TakeGroupMember(ctx, req.GroupID, req.FromUserID); err == nil { + if _, err := g.db.TakeGroupMember(ctx, req.GroupID, req.FromUserID); err == nil { inGroup = true // Already in group - } else if !s.IsNotFound(err) { + } else if !g.IsNotFound(err) { return nil, err } - if _, err := s.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil { + if _, err := g.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil { return nil, err } var member *model.GroupMember @@ -812,38 +826,37 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup OperatorUserID: mcontext.GetOpUserID(ctx), } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{member}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{member}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } } log.ZDebug(ctx, "GroupApplicationResponse", "inGroup", inGroup, "HandleResult", req.HandleResult, "member", member) - if err := s.db.HandlerGroupRequest(ctx, req.GroupID, req.FromUserID, req.HandledMsg, req.HandleResult, member); err != nil { + if err := g.db.HandlerGroupRequest(ctx, req.GroupID, req.FromUserID, req.HandledMsg, req.HandleResult, member); err != nil { return nil, err } switch req.HandleResult { case constant.GroupResponseAgree: - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, []string{req.FromUserID}); err != nil { - return nil, err - } - s.notification.GroupApplicationAcceptedNotification(ctx, req) + g.notification.GroupApplicationAcceptedNotification(ctx, req) if member == nil { log.ZDebug(ctx, "GroupApplicationResponse", "member is nil") } else { - s.notification.MemberEnterNotification(ctx, req.GroupID, req.FromUserID) + if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, groupRequest.InviterUserID, req.FromUserID); err != nil { + return nil, err + } } case constant.GroupResponseRefuse: - s.notification.GroupApplicationRejectedNotification(ctx, req) + g.notification.GroupApplicationRejectedNotification(ctx, req) } return &pbgroup.GroupApplicationResponseResp{}, nil } -func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) (*pbgroup.JoinGroupResp, error) { - user, err := s.user.GetUserInfo(ctx, req.InviterUserID) +func (g *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) (*pbgroup.JoinGroupResp, error) { + user, err := g.user.GetUserInfo(ctx, req.InviterUserID) if err != nil { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -859,14 +872,14 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) Ex: req.Ex, } - if err := s.webhookBeforeApplyJoinGroup(ctx, &s.config.WebhooksConfig.BeforeApplyJoinGroup, reqCall); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeApplyJoinGroup(ctx, &g.config.WebhooksConfig.BeforeApplyJoinGroup, reqCall); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - _, err = s.db.TakeGroupMember(ctx, req.GroupID, req.InviterUserID) + _, err = g.db.TakeGroupMember(ctx, req.GroupID, req.InviterUserID) if err == nil { return nil, errs.ErrArgs.Wrap() - } else if !s.IsNotFound(err) && errs.Unwrap(err) != errs.ErrRecordNotFound { + } else if !g.IsNotFound(err) && errs.Unwrap(err) != errs.ErrRecordNotFound { return nil, err } log.ZDebug(ctx, "JoinGroup.groupInfo", "group", group, "eq", group.NeedVerification == constant.Directly) @@ -881,19 +894,18 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) MuteEndTime: time.UnixMilli(0), } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{groupMember}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{groupMember}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, nil, []*model.GroupMember{groupMember}); err != nil { + if err := g.db.CreateGroup(ctx, nil, []*model.GroupMember{groupMember}); err != nil { return nil, err } - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, []string{req.InviterUserID}); err != nil { + if err = g.notification.MemberEnterNotification(ctx, req.GroupID, req.InviterUserID); err != nil { return nil, err } - s.notification.MemberEnterNotification(ctx, req.GroupID, req.InviterUserID) - s.webhookAfterJoinGroup(ctx, &s.config.WebhooksConfig.AfterJoinGroup, req) + g.webhookAfterJoinGroup(ctx, &g.config.WebhooksConfig.AfterJoinGroup, req) return &pbgroup.JoinGroupResp{}, nil } @@ -907,74 +919,74 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) HandledTime: time.Unix(0, 0), Ex: req.Ex, } - if err = s.db.CreateGroupRequest(ctx, []*model.GroupRequest{&groupRequest}); err != nil { + if err = g.db.CreateGroupRequest(ctx, []*model.GroupRequest{&groupRequest}); err != nil { return nil, err } - s.notification.JoinGroupApplicationNotification(ctx, req) + g.notification.JoinGroupApplicationNotification(ctx, req) return &pbgroup.JoinGroupResp{}, nil } -func (s *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) { +func (g *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) { if req.UserID == "" { req.UserID = mcontext.GetOpUserID(ctx) } else { - if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } } - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } if member.RoleLevel == constant.GroupOwner { return nil, errs.ErrNoPermission.WrapMsg("group owner can't quit") } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - err = s.db.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID}) + err = g.db.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID}) if err != nil { return nil, err } - s.notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(member, 0)) - if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil { + g.notification.MemberQuitNotification(ctx, g.groupMemberDB2PB(member, 0)) + if err := g.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil { return nil, err } - s.webhookAfterQuitGroup(ctx, &s.config.WebhooksConfig.AfterQuitGroup, req) + g.webhookAfterQuitGroup(ctx, &g.config.WebhooksConfig.AfterQuitGroup, req) return &pbgroup.QuitGroupResp{}, nil } -func (s *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { +func (g *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { conevrsationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := s.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) + maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) if err != nil { return err } - return s.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) + return g.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) } -func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) { +func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) { var opMember *model.GroupMember - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { var err error - opMember, err = s.db.TakeGroupMember(ctx, req.GroupInfoForSet.GroupID, mcontext.GetOpUserID(ctx)) + opMember, err = g.db.TakeGroupMember(ctx, req.GroupInfoForSet.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } if !(opMember.RoleLevel == constant.GroupOwner || opMember.RoleLevel == constant.GroupAdmin) { return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") } - if err := s.PopulateGroupMember(ctx, opMember); err != nil { + if err := g.PopulateGroupMember(ctx, opMember); err != nil { return nil, err } } - if err := s.webhookBeforeSetGroupInfo(ctx, &s.config.WebhooksConfig.BeforeSetGroupInfo, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeSetGroupInfo(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfo, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) if err != nil { return nil, err } @@ -982,35 +994,35 @@ func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf return nil, servererrs.ErrDismissedAlready.Wrap() } - count, err := s.db.FindGroupMemberNum(ctx, group.GroupID) + count, err := g.db.FindGroupMemberNum(ctx, group.GroupID) if err != nil { return nil, err } - owner, err := s.db.TakeGroupOwner(ctx, group.GroupID) + owner, err := g.db.TakeGroupOwner(ctx, group.GroupID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owner); err != nil { + if err := g.PopulateGroupMember(ctx, owner); err != nil { return nil, err } update := UpdateGroupInfoMap(ctx, req.GroupInfoForSet) if len(update) == 0 { return &pbgroup.SetGroupInfoResp{}, nil } - if err := s.db.UpdateGroup(ctx, group.GroupID, update); err != nil { + if err := g.db.UpdateGroup(ctx, group.GroupID, update); err != nil { return nil, err } - group, err = s.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) + group, err = g.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) if err != nil { return nil, err } tips := &sdkws.GroupInfoSetTips{ - Group: s.groupDB2PB(group, owner.UserID, count), + Group: g.groupDB2PB(group, owner.UserID, count), MuteTime: 0, OpUser: &sdkws.GroupMemberFullInfo{}, } if opMember != nil { - tips.OpUser = s.groupMemberDB2PB(opMember, 0) + tips.OpUser = g.groupMemberDB2PB(opMember, 0) } num := len(update) if req.GroupInfoForSet.Notification != "" { @@ -1021,33 +1033,146 @@ func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf ConversationType: constant.ReadGroupChatType, GroupID: req.GroupInfoForSet.GroupID, } - resp, err := s.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID}) + resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID}) if err != nil { - log.ZWarn(ctx, "GetGroupMemberIDs", err) + log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err) return } conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} - if err := s.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { - log.ZWarn(ctx, "SetConversations", err, resp.UserIDs, conversation) + if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) } }() - s.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) } if req.GroupInfoForSet.GroupName != "" { num-- - s.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) + g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) } if num > 0 { - s.notification.GroupInfoSetNotification(ctx, tips) + g.notification.GroupInfoSetNotification(ctx, tips) } - s.webhookAfterSetGroupInfo(ctx, &s.config.WebhooksConfig.AfterSetGroupInfo, req) + g.webhookAfterSetGroupInfo(ctx, &g.config.WebhooksConfig.AfterSetGroupInfo, req) return &pbgroup.SetGroupInfoResp{}, nil } -func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) { - group, err := s.db.TakeGroup(ctx, req.GroupID) +func (g *groupServer) SetGroupInfoEX(ctx context.Context, req *pbgroup.SetGroupInfoEXReq) (*pbgroup.SetGroupInfoEXResp, error) { + var opMember *model.GroupMember + + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + var err error + + opMember, err = g.db.TakeGroupMember(ctx, req.GroupInfoForSetEX.GroupID, mcontext.GetOpUserID(ctx)) + if err != nil { + return nil, err + } + + if !(opMember.RoleLevel == constant.GroupOwner || opMember.RoleLevel == constant.GroupAdmin) { + return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") + } + + if err := g.PopulateGroupMember(ctx, opMember); err != nil { + return nil, err + } + } + + if err := g.webhookBeforeSetGroupInfoEX(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfoEX, req); err != nil && err != servererrs.ErrCallbackContinue { + return nil, err + } + + group, err := g.db.TakeGroup(ctx, req.GroupInfoForSetEX.GroupID) + if err != nil { + return nil, err + } + if group.Status == constant.GroupStatusDismissed { + return nil, servererrs.ErrDismissedAlready.Wrap() + } + + count, err := g.db.FindGroupMemberNum(ctx, group.GroupID) + if err != nil { + return nil, err + } + + owner, err := g.db.TakeGroupOwner(ctx, group.GroupID) + if err != nil { + return nil, err + } + + if err := g.PopulateGroupMember(ctx, owner); err != nil { + return nil, err + } + + updatedData := UpdateGroupInfoEXMap(ctx, req.GroupInfoForSetEX) + if len(updatedData) == 0 { + return &pbgroup.SetGroupInfoEXResp{}, nil + } + + if err := g.db.UpdateGroup(ctx, group.GroupID, updatedData); err != nil { + return nil, err + } + + group, err = g.db.TakeGroup(ctx, req.GroupInfoForSetEX.GroupID) + if err != nil { + return nil, err + } + + tips := &sdkws.GroupInfoSetTips{ + Group: g.groupDB2PB(group, owner.UserID, count), + MuteTime: 0, + OpUser: &sdkws.GroupMemberFullInfo{}, + } + + if opMember != nil { + tips.OpUser = g.groupMemberDB2PB(opMember, 0) + } + + num := len(updatedData) + if req.GroupInfoForSetEX.Notification != nil { + num-- + + if req.GroupInfoForSetEX.Notification.Value != "" { + func() { + conversation := &pbconversation.ConversationReq{ + ConversationID: msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupInfoForSetEX.GroupID), + ConversationType: constant.ReadGroupChatType, + GroupID: req.GroupInfoForSetEX.GroupID, + } + + resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSetEX.GroupID}) + if err != nil { + log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err) + return + } + + conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} + + if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) + } + }() + + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) + } + } + + if req.GroupInfoForSetEX.GroupName != "" { + num-- + g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) + } + + if num > 0 { + g.notification.GroupInfoSetNotification(ctx, tips) + } + + g.webhookAfterSetGroupInfoEX(ctx, &g.config.WebhooksConfig.AfterSetGroupInfoEX, req) + + return &pbgroup.SetGroupInfoEXResp{}, nil +} + +func (g *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) { + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -1057,11 +1182,11 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans if req.OldOwnerUserID == req.NewOwnerUserID { return nil, errs.ErrArgs.WrapMsg("OldOwnerUserID == NewOwnerUserID") } - members, err := s.db.FindGroupMembers(ctx, req.GroupID, []string{req.OldOwnerUserID, req.NewOwnerUserID}) + members, err := g.db.FindGroupMembers(ctx, req.GroupID, []string{req.OldOwnerUserID, req.NewOwnerUserID}) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } memberMap := datautil.SliceToMap(members, func(e *model.GroupMember) string { return e.UserID }) @@ -1076,33 +1201,33 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans if newOwner == nil { return nil, errs.ErrArgs.WrapMsg("NewOwnerUser not in group " + req.NewOwnerUserID) } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if !(mcontext.GetOpUserID(ctx) == oldOwner.UserID && oldOwner.RoleLevel == constant.GroupOwner) { return nil, errs.ErrNoPermission.WrapMsg("no permission transfer group owner") } } - if err := s.db.TransferGroupOwner(ctx, req.GroupID, req.OldOwnerUserID, req.NewOwnerUserID, newOwner.RoleLevel); err != nil { + if err := g.db.TransferGroupOwner(ctx, req.GroupID, req.OldOwnerUserID, req.NewOwnerUserID, newOwner.RoleLevel); err != nil { return nil, err } - s.webhookAfterTransferGroupOwner(ctx, &s.config.WebhooksConfig.AfterTransferGroupOwner, req) + g.webhookAfterTransferGroupOwner(ctx, &g.config.WebhooksConfig.AfterTransferGroupOwner, req) - s.notification.GroupOwnerTransferredNotification(ctx, req) + g.notification.GroupOwnerTransferredNotification(ctx, req) return &pbgroup.TransferGroupOwnerResp{}, nil } -func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) (*pbgroup.GetGroupsResp, error) { +func (g *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) (*pbgroup.GetGroupsResp, error) { var ( group []*model.Group err error ) var resp pbgroup.GetGroupsResp if req.GroupID != "" { - group, err = s.db.FindGroup(ctx, []string{req.GroupID}) + group, err = g.db.FindGroup(ctx, []string{req.GroupID}) resp.Total = uint32(len(group)) } else { var total int64 - total, group, err = s.db.SearchGroup(ctx, req.GroupName, req.Pagination) + total, group, err = g.db.SearchGroup(ctx, req.GroupName, req.Pagination) resp.Total = uint32(total) } @@ -1114,7 +1239,7 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) return e.GroupID }) - ownerMembers, err := s.db.FindGroupsOwner(ctx, groupIDs) + ownerMembers, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } @@ -1122,7 +1247,7 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) ownerMemberMap := datautil.SliceToMap(ownerMembers, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } @@ -1140,14 +1265,14 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) return &resp, nil } -func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGroupMembersCMSReq) (*pbgroup.GetGroupMembersCMSResp, error) { - total, members, err := s.db.SearchGroupMember(ctx, req.UserName, req.GroupID, req.Pagination) +func (g *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGroupMembersCMSReq) (*pbgroup.GetGroupMembersCMSResp, error) { + total, members, err := g.db.SearchGroupMember(ctx, req.UserName, req.GroupID, req.Pagination) if err != nil { return nil, err } var resp pbgroup.GetGroupMembersCMSResp resp.Total = uint32(total) - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } resp.Members = datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo { @@ -1156,12 +1281,12 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr return &resp, nil } -func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgroup.GetUserReqApplicationListReq) (*pbgroup.GetUserReqApplicationListResp, error) { - user, err := s.user.GetPublicUserInfo(ctx, req.UserID) +func (g *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgroup.GetUserReqApplicationListReq) (*pbgroup.GetUserReqApplicationListResp, error) { + user, err := g.user.GetPublicUserInfo(ctx, req.UserID) if err != nil { return nil, err } - total, requests, err := s.db.PageGroupRequestUser(ctx, req.UserID, req.Pagination) + total, requests, err := g.db.PageGroupRequestUser(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } @@ -1171,24 +1296,24 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string { return e.GroupID })) - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } groupMap := datautil.SliceToMap(groups, func(e *model.Group) string { return e.GroupID }) - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } @@ -1204,44 +1329,44 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou }, nil } -func (s *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGroupReq) (*pbgroup.DismissGroupResp, error) { - owner, err := s.db.TakeGroupOwner(ctx, req.GroupID) +func (g *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGroupReq) (*pbgroup.DismissGroupResp, error) { + owner, err := g.db.TakeGroupOwner(ctx, req.GroupID) if err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if owner.UserID != mcontext.GetOpUserID(ctx) { return nil, errs.ErrNoPermission.WrapMsg("not group owner") } } - if err := s.PopulateGroupMember(ctx, owner); err != nil { + if err := g.PopulateGroupMember(ctx, owner); err != nil { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } if !req.DeleteMember && group.Status == constant.GroupStatusDismissed { return nil, servererrs.ErrDismissedAlready.WrapMsg("group status is dismissed") } - if err := s.db.DismissGroup(ctx, req.GroupID, req.DeleteMember); err != nil { + if err := g.db.DismissGroup(ctx, req.GroupID, req.DeleteMember); err != nil { return nil, err } if !req.DeleteMember { - num, err := s.db.FindGroupMemberNum(ctx, req.GroupID) + num, err := g.db.FindGroupMemberNum(ctx, req.GroupID) if err != nil { return nil, err } tips := &sdkws.GroupDismissedTips{ - Group: s.groupDB2PB(group, owner.UserID, num), + Group: g.groupDB2PB(group, owner.UserID, num), OpUser: &sdkws.GroupMemberFullInfo{}, } if mcontext.GetOpUserID(ctx) == owner.UserID { - tips.OpUser = s.groupMemberDB2PB(owner, 0) + tips.OpUser = g.groupMemberDB2PB(owner, 0) } - s.notification.GroupDismissedNotification(ctx, tips) + g.notification.GroupDismissedNotification(ctx, tips) } - membersID, err := s.db.FindGroupMemberUserID(ctx, group.GroupID) + membersID, err := g.db.FindGroupMemberUserID(ctx, group.GroupID) if err != nil { return nil, err } @@ -1252,21 +1377,21 @@ func (s *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGrou GroupType: string(group.GroupType), } - s.webhookAfterDismissGroup(ctx, &s.config.WebhooksConfig.AfterDismissGroup, cbReq) + g.webhookAfterDismissGroup(ctx, &g.config.WebhooksConfig.AfterDismissGroup, cbReq) return &pbgroup.DismissGroupResp{}, nil } -func (s *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGroupMemberReq) (*pbgroup.MuteGroupMemberResp, error) { - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) +func (g *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGroupMemberReq) (*pbgroup.MuteGroupMemberResp, error) { + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - opMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + opMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -1284,23 +1409,23 @@ func (s *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGrou } } data := UpdateGroupMemberMutedTimeMap(time.Now().Add(time.Second * time.Duration(req.MutedSeconds))) - if err := s.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { + if err := g.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { return nil, err } - s.notification.GroupMemberMutedNotification(ctx, req.GroupID, req.UserID, req.MutedSeconds) + g.notification.GroupMemberMutedNotification(ctx, req.GroupID, req.UserID, req.MutedSeconds) return &pbgroup.MuteGroupMemberResp{}, nil } -func (s *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.CancelMuteGroupMemberReq) (*pbgroup.CancelMuteGroupMemberResp, error) { - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) +func (g *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.CancelMuteGroupMemberReq) (*pbgroup.CancelMuteGroupMemberResp, error) { + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - opMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + opMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -1318,36 +1443,36 @@ func (s *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.Ca } } data := UpdateGroupMemberMutedTimeMap(time.Unix(0, 0)) - if err := s.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { + if err := g.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { return nil, err } - s.notification.GroupMemberCancelMutedNotification(ctx, req.GroupID, req.UserID) + g.notification.GroupMemberCancelMutedNotification(ctx, req.GroupID, req.UserID) return &pbgroup.CancelMuteGroupMemberResp{}, nil } -func (s *groupServer) MuteGroup(ctx context.Context, req *pbgroup.MuteGroupReq) (*pbgroup.MuteGroupResp, error) { - if err := s.CheckGroupAdmin(ctx, req.GroupID); err != nil { +func (g *groupServer) MuteGroup(ctx context.Context, req *pbgroup.MuteGroupReq) (*pbgroup.MuteGroupResp, error) { + if err := g.CheckGroupAdmin(ctx, req.GroupID); err != nil { return nil, err } - if err := s.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupStatusMuted)); err != nil { + if err := g.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupStatusMuted)); err != nil { return nil, err } - s.notification.GroupMutedNotification(ctx, req.GroupID) + g.notification.GroupMutedNotification(ctx, req.GroupID) return &pbgroup.MuteGroupResp{}, nil } -func (s *groupServer) CancelMuteGroup(ctx context.Context, req *pbgroup.CancelMuteGroupReq) (*pbgroup.CancelMuteGroupResp, error) { - if err := s.CheckGroupAdmin(ctx, req.GroupID); err != nil { +func (g *groupServer) CancelMuteGroup(ctx context.Context, req *pbgroup.CancelMuteGroupReq) (*pbgroup.CancelMuteGroupResp, error) { + if err := g.CheckGroupAdmin(ctx, req.GroupID); err != nil { return nil, err } - if err := s.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupOk)); err != nil { + if err := g.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupOk)); err != nil { return nil, err } - s.notification.GroupCancelMutedNotification(ctx, req.GroupID) + g.notification.GroupCancelMutedNotification(ctx, req.GroupID) return &pbgroup.CancelMuteGroupResp{}, nil } -func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGroupMemberInfoReq) (*pbgroup.SetGroupMemberInfoResp, error) { +func (g *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGroupMemberInfoReq) (*pbgroup.SetGroupMemberInfoResp, error) { if len(req.Members) == 0 { return nil, errs.ErrArgs.WrapMsg("members empty") } @@ -1355,7 +1480,7 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if opUserID == "" { return nil, errs.ErrNoPermission.WrapMsg("no op user id") } - isAppManagerUid := authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) + isAppManagerUid := authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) for i := range req.Members { req.Members[i].FaceURL = nil } @@ -1385,7 +1510,7 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if _, ok := temp[opUserID]; !ok { userIDs = append(userIDs, opUserID) } - dbMembers, err := s.db.FindGroupMembers(ctx, groupID, userIDs) + dbMembers, err := g.db.FindGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err } @@ -1440,12 +1565,12 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr for i := 0; i < len(req.Members); i++ { - if err := s.webhookBeforeSetGroupMemberInfo(ctx, &s.config.WebhooksConfig.BeforeSetGroupMemberInfo, req.Members[i]); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeSetGroupMemberInfo(ctx, &g.config.WebhooksConfig.BeforeSetGroupMemberInfo, req.Members[i]); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } } - if err := s.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *common.BatchUpdateGroupMember { + if err := g.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *common.BatchUpdateGroupMember { return &common.BatchUpdateGroupMember{ GroupID: e.GroupID, UserID: e.UserID, @@ -1458,30 +1583,30 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if member.RoleLevel != nil { switch member.RoleLevel.Value { case constant.GroupAdmin: - s.notification.GroupMemberSetToAdminNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberSetToAdminNotification(ctx, member.GroupID, member.UserID) case constant.GroupOrdinaryUsers: - s.notification.GroupMemberSetToOrdinaryUserNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberSetToOrdinaryUserNotification(ctx, member.GroupID, member.UserID) } } if member.Nickname != nil || member.FaceURL != nil || member.Ex != nil { - s.notification.GroupMemberInfoSetNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberInfoSetNotification(ctx, member.GroupID, member.UserID) } } for i := 0; i < len(req.Members); i++ { - s.webhookAfterSetGroupMemberInfo(ctx, &s.config.WebhooksConfig.AfterSetGroupMemberInfo, req.Members[i]) + g.webhookAfterSetGroupMemberInfo(ctx, &g.config.WebhooksConfig.AfterSetGroupMemberInfo, req.Members[i]) } return &pbgroup.SetGroupMemberInfoResp{}, nil } -func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.GetGroupAbstractInfoReq) (*pbgroup.GetGroupAbstractInfoResp, error) { +func (g *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.GetGroupAbstractInfoReq) (*pbgroup.GetGroupAbstractInfoResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupIDs empty") } if datautil.Duplicate(req.GroupIDs) { return nil, errs.ErrArgs.WrapMsg("groupIDs duplicate") } - groups, err := s.db.FindGroup(ctx, req.GroupIDs) + groups, err := g.db.FindGroup(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -1490,7 +1615,7 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get })); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg("not found group " + strings.Join(ids, ",")) } - groupUserMap, err := s.db.MapGroupMemberUserID(ctx, req.GroupIDs) + groupUserMap, err := g.db.MapGroupMemberUserID(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -1505,15 +1630,15 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get }, nil } -func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.GetUserInGroupMembersReq) (*pbgroup.GetUserInGroupMembersResp, error) { +func (g *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.GetUserInGroupMembersReq) (*pbgroup.GetUserInGroupMembersResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupIDs empty") } - members, err := s.db.FindGroupMemberUser(ctx, req.GroupIDs, req.UserID) + members, err := g.db.FindGroupMemberUser(ctx, req.GroupIDs, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return &pbgroup.GetUserInGroupMembersResp{ @@ -1523,8 +1648,8 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge }, nil } -func (s *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetGroupMemberUserIDsReq) (*pbgroup.GetGroupMemberUserIDsResp, error) { - userIDs, err := s.db.FindGroupMemberUserID(ctx, req.GroupID) +func (g *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetGroupMemberUserIDsReq) (*pbgroup.GetGroupMemberUserIDsResp, error) { + userIDs, err := g.db.FindGroupMemberUserID(ctx, req.GroupID) if err != nil { return nil, err } @@ -1533,15 +1658,15 @@ func (s *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.Ge }, nil } -func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.GetGroupMemberRoleLevelReq) (*pbgroup.GetGroupMemberRoleLevelResp, error) { +func (g *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.GetGroupMemberRoleLevelReq) (*pbgroup.GetGroupMemberRoleLevelResp, error) { if len(req.RoleLevels) == 0 { return nil, errs.ErrArgs.WrapMsg("RoleLevels empty") } - members, err := s.db.FindGroupMemberRoleLevels(ctx, req.GroupID, req.RoleLevels) + members, err := g.db.FindGroupMemberRoleLevels(ctx, req.GroupID, req.RoleLevels) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return &pbgroup.GetGroupMemberRoleLevelResp{ @@ -1551,8 +1676,8 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup. }, nil } -func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *pbgroup.GetGroupUsersReqApplicationListReq) (*pbgroup.GetGroupUsersReqApplicationListResp, error) { - requests, err := s.db.FindGroupRequests(ctx, req.GroupID, req.UserIDs) +func (g *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *pbgroup.GetGroupUsersReqApplicationListReq) (*pbgroup.GetGroupUsersReqApplicationListResp, error) { + requests, err := g.db.FindGroupRequests(ctx, req.GroupID, req.UserIDs) if err != nil { return nil, err } @@ -1562,7 +1687,7 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req * groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string { return e.GroupID })) - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } @@ -1572,17 +1697,17 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req * if ids := datautil.Single(groupIDs, datautil.Keys(groupMap)); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 9815167e9b..64e922fe2b 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -16,25 +16,28 @@ package group import ( "context" + "errors" "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" + "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" - - "github.com/openimsdk/open-im-server/v3/pkg/authverify" - "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" "github.com/openimsdk/protocol/constant" pbgroup "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/stringutil" + "go.mongodb.org/mongo-driver/mongo" ) // GroupApplicationReceiver @@ -43,12 +46,22 @@ const ( adminReceiver ) -func NewGroupNotificationSender(db controller.GroupDatabase, msgRpcClient *rpcclient.MessageRpcClient, userRpcClient *rpcclient.UserRpcClient, config *Config, fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error)) *GroupNotificationSender { +func NewGroupNotificationSender( + db controller.GroupDatabase, + msgRpcClient *rpcclient.MessageRpcClient, + userRpcClient *rpcclient.UserRpcClient, + conversationRpcClient *rpcclient.ConversationRpcClient, + config *Config, + fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error), +) *GroupNotificationSender { return &GroupNotificationSender{ NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)), getUsersInfo: fn, db: db, config: config, + + conversationRpcClient: conversationRpcClient, + msgRpcClient: msgRpcClient, } } @@ -57,6 +70,9 @@ type GroupNotificationSender struct { getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) db controller.GroupDatabase config *Config + + conversationRpcClient *rpcclient.ConversationRpcClient + msgRpcClient *rpcclient.MessageRpcClient } func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error { @@ -212,10 +228,13 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap } */ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { + return g.fillOpUserByUserID(ctx, mcontext.GetOpUserID(ctx), opUser, groupID) +} + +func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error { if opUser == nil { return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil") } - userID := mcontext.GetOpUserID(ctx) if groupID != "" { if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) { *opUser = &sdkws.GroupMemberFullInfo{ @@ -228,7 +247,7 @@ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws member, err := g.db.TakeGroupMember(ctx, groupID, userID) if err == nil { *opUser = g.groupMemberDB2PB(member, 0) - } else if !errs.ErrRecordNotFound.Is(err) { + } else if !(errors.Is(err, mongo.ErrNoDocuments) || errs.ErrRecordNotFound.Is(err)) { return err } } @@ -494,50 +513,67 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context, g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) } -func (g *GroupNotificationSender) MemberInvitedNotification(ctx context.Context, groupID, reason string, invitedUserIDList []string) { +func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { var err error defer func() { if err != nil { log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err) } }() - var group *sdkws.GroupInfo - group, err = g.getGroupInfo(ctx, groupID) - if err != nil { - return + + if !g.config.RpcConfig.EnableHistoryForNewMembers { + conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) + maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + if err != nil { + return err + } + if _, err = g.msgRpcClient.SetUserConversationsMinSeq(ctx, &msg.SetUserConversationsMinSeqReq{ + UserIDs: entrantUserID, + ConversationID: conversationID, + Seq: maxSeq, + }); err != nil { + return err + } } - var users []*sdkws.GroupMemberFullInfo - users, err = g.getGroupMembers(ctx, groupID, invitedUserIDList) - if err != nil { - return + if err := g.conversationRpcClient.GroupChatFirstCreateConversation(ctx, groupID, entrantUserID); err != nil { + return err } - tips := &sdkws.MemberInvitedTips{Group: group, InvitedUserList: users} - err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID) - g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) - g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips) -} -func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID string) { - var err error - defer func() { - if err != nil { - log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err) - } - }() var group *sdkws.GroupInfo group, err = g.getGroupInfo(ctx, groupID) if err != nil { - return + return err } - var user *sdkws.GroupMemberFullInfo - user, err = g.getGroupMember(ctx, groupID, entrantUserID) + users, err := g.getGroupMembers(ctx, groupID, entrantUserID) if err != nil { - return + return err + } + + tips := &sdkws.MemberInvitedTips{ + Group: group, + InvitedUserList: users, + } + opUserID := mcontext.GetOpUserID(ctx) + if err = g.fillOpUserByUserID(ctx, opUserID, &tips.OpUser, tips.Group.GroupID); err != nil { + return nil + } + switch { + case invitedOpUserID == "": + case invitedOpUserID == opUserID: + tips.InviterUser = tips.OpUser + default: + if err = g.fillOpUserByUserID(ctx, invitedOpUserID, &tips.InviterUser, tips.Group.GroupID); err != nil { + return err + } } - tips := &sdkws.MemberEnterTips{Group: group, EntrantUser: user} g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) - g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberEnterNotification, tips) + g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips) + return nil +} + +func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID ...string) error { + return g.GroupApplicationAgreeMemberEnterNotification(ctx, groupID, "", entrantUserID...) } func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { diff --git a/internal/rpc/msg/clear.go b/internal/rpc/msg/clear.go index 6be551eada..c5bd36b445 100644 --- a/internal/rpc/msg/clear.go +++ b/internal/rpc/msg/clear.go @@ -30,8 +30,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg. msgNum int start = time.Now() ) + clearMsg := func(ctx context.Context) (bool, error) { - msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, 100) + docIDs, err := m.MsgDatabase.GetDocIDs(ctx) + if err != nil { + return false, err + } + + msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, docIDs, 5000) if err != nil { return false, err } @@ -55,19 +61,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg. return true, nil } - for { - keep, err := clearMsg(ctx) - if err != nil { - log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) - return nil, err - } - if !keep { - log.ZInfo(ctx, "clear msg success", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) - break - } - - log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + _, err = clearMsg(ctx) + if err != nil { + log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + return nil, err } + + log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + return &msg.ClearMsgResp{}, nil } diff --git a/internal/rpc/msg/seq.go b/internal/rpc/msg/seq.go index 1ebec4a719..4d9eb6db9c 100644 --- a/internal/rpc/msg/seq.go +++ b/internal/rpc/msg/seq.go @@ -53,3 +53,12 @@ func (m *msgServer) GetMsgByConversationIDs(ctx context.Context, req *pbmsg.GetM } return &pbmsg.GetMsgByConversationIDsResp{MsgDatas: Msgs}, nil } + +func (m *msgServer) SetUserConversationsMinSeq(ctx context.Context, req *pbmsg.SetUserConversationsMinSeqReq) (*pbmsg.SetUserConversationsMinSeqResp, error) { + for _, userID := range req.UserIDs { + if err := m.MsgDatabase.SetUserConversationsMinSeqs(ctx, userID, map[string]int64{req.ConversationID: req.Seq}); err != nil { + return nil, err + } + } + return &pbmsg.SetUserConversationsMinSeqResp{}, nil +} diff --git a/internal/rpc/msg/sync_msg.go b/internal/rpc/msg/sync_msg.go index f5b5ebda53..ea4c487b9e 100644 --- a/internal/rpc/msg/sync_msg.go +++ b/internal/rpc/msg/sync_msg.go @@ -16,16 +16,15 @@ package msg import ( "context" - "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" - "github.com/openimsdk/tools/utils/datautil" - "github.com/openimsdk/tools/utils/timeutil" - "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" + "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/utils/datautil" + "github.com/openimsdk/tools/utils/timeutil" ) func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) { @@ -86,6 +85,35 @@ func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessag return resp, nil } +func (m *msgServer) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) { + resp := &msg.GetSeqMessageResp{ + Msgs: make(map[string]*sdkws.PullMsgs), + NotificationMsgs: make(map[string]*sdkws.PullMsgs), + } + for _, conv := range req.Conversations { + _, _, msgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, conv.ConversationID, conv.Seqs) + if err != nil { + return nil, err + } + var pullMsgs *sdkws.PullMsgs + if ok := false; conversationutil.IsNotificationConversationID(conv.ConversationID) { + pullMsgs, ok = resp.NotificationMsgs[conv.ConversationID] + if !ok { + pullMsgs = &sdkws.PullMsgs{} + resp.NotificationMsgs[conv.ConversationID] = pullMsgs + } + } else { + pullMsgs, ok = resp.Msgs[conv.ConversationID] + if !ok { + pullMsgs = &sdkws.PullMsgs{} + resp.Msgs[conv.ConversationID] = pullMsgs + } + } + pullMsgs.Msgs = append(pullMsgs.Msgs, msgs...) + } + return resp, nil +} + func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil { return nil, err @@ -104,6 +132,12 @@ func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sd log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs) return nil, err } + // avoid pulling messages from sessions with a large number of max seq values of 0 + for conversationID, seq := range maxSeqs { + if seq == 0 { + delete(maxSeqs, conversationID) + } + } resp := new(sdkws.GetMaxSeqResp) resp.MaxSeqs = maxSeqs return resp, nil diff --git a/internal/rpc/relation/friend.go b/internal/rpc/relation/friend.go index 3d29ad3379..9130589321 100644 --- a/internal/rpc/relation/friend.go +++ b/internal/rpc/relation/friend.go @@ -121,7 +121,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), - queue: memamq.NewMemoryQueue(128, 1024*8), + queue: memamq.NewMemoryQueue(16, 1024*1024), }) return nil } @@ -312,16 +312,20 @@ func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *rel if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { return nil, err } + total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } + resp = &relation.GetPaginationFriendsApplyToResp{} resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) if err != nil { return nil, err } + resp.Total = int32(total) + return resp, nil } diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go index f96eb73905..fb6a1157e1 100644 --- a/internal/rpc/third/s3.go +++ b/internal/rpc/third/s3.go @@ -290,6 +290,7 @@ func (t *thirdServer) apiAddress(prefix, name string) string { func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) { var conf config.Third expireTime := time.UnixMilli(req.ExpireTime) + var deltotal int findPagination := &sdkws.RequestPagination{ PageNumber: 1, ShowNumber: 1000, @@ -311,10 +312,8 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO return nil, errs.Wrap(err) } if int(count) < 1 && t.minio != nil { - thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key) - if err != nil { - return nil, errs.Wrap(err) - } + thumbnailKey, _ := t.getMinioImageThumbnailKey(ctx, key) + t.s3dataBase.DeleteObject(ctx, thumbnailKey) t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...) t.s3dataBase.DeleteObject(ctx, key) @@ -329,7 +328,9 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO if total < int64(findPagination.ShowNumber) { break } + deltotal += int(total) } + log.ZDebug(ctx, "DeleteOutdatedData", "delete Total", deltotal) return &third.DeleteOutdatedDataResp{}, nil } diff --git a/internal/rpc/user/online.go b/internal/rpc/user/online.go index 99b272006f..4e7823306f 100644 --- a/internal/rpc/user/online.go +++ b/internal/rpc/user/online.go @@ -2,6 +2,8 @@ package user import ( "context" + "github.com/openimsdk/tools/utils/datautil" + "github.com/openimsdk/protocol/constant" pbuser "github.com/openimsdk/protocol/user" ) @@ -80,3 +82,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse } return &pbuser.SetUserOnlineStatusResp{}, nil } + +func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) { + resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor) + if err != nil { + return nil, err + } + resp := &pbuser.GetAllOnlineUsersResp{ + StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)), + NextCursor: nextCursor, + } + for userID, plats := range resMap { + resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{ + UserID: userID, + Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)), + PlatformIDs: plats, + }) + } + return resp, nil +} diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index a6952bd6db..8b22c8f9b7 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -17,6 +17,11 @@ package user import ( "context" "errors" + "math/rand" + "strings" + "sync" + "time" + "github.com/openimsdk/open-im-server/v3/internal/rpc/relation" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" @@ -29,10 +34,6 @@ import ( "github.com/openimsdk/protocol/group" friendpb "github.com/openimsdk/protocol/relation" "github.com/openimsdk/tools/db/redisutil" - "math/rand" - "strings" - "sync" - "time" "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" @@ -147,41 +148,35 @@ func (s *userServer) UpdateUserInfo(ctx context.Context, req *pbuser.UpdateUserI return nil, err } s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) - //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID) - //if err != nil { - // return nil, err - //} - //if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" { - // if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID,oldUser); err != nil { - // return nil, err - // } - //} - //for _, friendID := range friends { - // s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID) - //} + s.webhookAfterUpdateUserInfo(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfo, req) if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { return nil, err } return resp, nil } + func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserInfoExReq) (resp *pbuser.UpdateUserInfoExResp, err error) { resp = &pbuser.UpdateUserInfoExResp{} err = authverify.CheckAccessV3(ctx, req.UserInfo.UserID, s.config.Share.IMAdminUserID) if err != nil { return nil, err } + if err = s.webhookBeforeUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfoEx, req); err != nil { return nil, err } + oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID) if err != nil { return nil, err } + data := convert.UserPb2DBMapEx(req.UserInfo) if err = s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil { return nil, err } + s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID) //if err != nil { @@ -199,6 +194,7 @@ func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUse if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { return nil, err } + return resp, nil } func (s *userServer) SetGlobalRecvMessageOpt(ctx context.Context, req *pbuser.SetGlobalRecvMessageOptReq) (resp *pbuser.SetGlobalRecvMessageOptResp, err error) { diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go index b1d59800ce..dbb4e34f61 100644 --- a/internal/tools/cron_task.go +++ b/internal/tools/cron_task.go @@ -25,7 +25,6 @@ import ( pbconversation "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/msg" - "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mw" "google.golang.org/grpc" @@ -59,10 +58,10 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return err } - thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) - if err != nil { - return err - } + // thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) + // if err != nil { + // return err + // } conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) if err != nil { @@ -71,7 +70,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error { msgClient := msg.NewMsgClient(msgConn) conversationClient := pbconversation.NewConversationClient(conversationConn) - thirdClient := third.NewThirdClient(thirdConn) + // thirdClient := third.NewThirdClient(thirdConn) crontab := cron.New() @@ -80,12 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error { now := time.Now() deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords)) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli())) - log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) + log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) + if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil { log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now)) return } - log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) + log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) } if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil { return errs.Wrap(err) @@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error { msgDestructFunc := func() { now := time.Now() ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli())) - log.ZInfo(ctx, "msg destruct cron start", "now", now) + log.ZDebug(ctx, "msg destruct cron start", "now", now) conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{}) if err != nil { @@ -108,29 +108,29 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return } } - log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now)) + log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now)) } if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil { return errs.Wrap(err) } - // scheduled delete outdated file Objects and their datas in specific time. - deleteObjectFunc := func() { - now := time.Now() - deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) - ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) - log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) - if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { - log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) - return - } - log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) - } - if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { - return errs.Wrap(err) - } - - log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) + // // scheduled delete outdated file Objects and their datas in specific time. + // deleteObjectFunc := func() { + // now := time.Now() + // deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) + // ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) + // log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) + // if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { + // log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) + // return + // } + // log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) + // } + // if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { + // return errs.Wrap(err) + // } + + log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) crontab.Start() <-ctx.Done() return nil diff --git a/pkg/apistruct/msg_test.go b/pkg/apistruct/msg_test.go new file mode 100644 index 0000000000..28f878a9fd --- /dev/null +++ b/pkg/apistruct/msg_test.go @@ -0,0 +1 @@ +package apistruct diff --git a/pkg/callbackstruct/constant.go b/pkg/callbackstruct/constant.go index ab393dd36e..89062ee0a0 100644 --- a/pkg/callbackstruct/constant.go +++ b/pkg/callbackstruct/constant.go @@ -18,7 +18,9 @@ const ( CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand" CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand" CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand" + CallbackAfterSetGroupInfoEXCommand = "callbackAfterSetGroupInfoCommandEX" CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand" + CallbackBeforeSetGroupInfoEXCommand = "callbackBeforeSetGroupInfoEXCommand" CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand" CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand" CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand" diff --git a/pkg/callbackstruct/group.go b/pkg/callbackstruct/group.go index 23a73ebd23..7fefa5b926 100644 --- a/pkg/callbackstruct/group.go +++ b/pkg/callbackstruct/group.go @@ -17,6 +17,7 @@ package callbackstruct import ( "github.com/openimsdk/open-im-server/v3/pkg/apistruct" common "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/protocol/wrapperspb" ) type CallbackCommand string @@ -242,3 +243,48 @@ type CallbackAfterSetGroupInfoReq struct { type CallbackAfterSetGroupInfoResp struct { CommonCallbackResp } + +type CallbackBeforeSetGroupInfoEXReq struct { + CallbackCommand `json:"callbackCommand"` + OperationID string `json:"operationID"` + GroupID string `json:"groupID"` + GroupName string `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackBeforeSetGroupInfoEXResp struct { + CommonCallbackResp + GroupID string `json:"groupID"` + GroupName string `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackAfterSetGroupInfoEXReq struct { + CallbackCommand `json:"callbackCommand"` + OperationID string `json:"operationID"` + GroupID string `json:"groupID"` + GroupName string `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackAfterSetGroupInfoEXResp struct { + CommonCallbackResp +} diff --git a/pkg/common/cmd/push.go b/pkg/common/cmd/push.go index c9b8b1c245..ca22a697d2 100644 --- a/pkg/common/cmd/push.go +++ b/pkg/common/cmd/push.go @@ -37,7 +37,6 @@ func NewPushRpcCmd() *PushRpcCmd { ret.configMap = map[string]any{ OpenIMPushCfgFileName: &pushConfig.RpcConfig, RedisConfigFileName: &pushConfig.RedisConfig, - MongodbConfigFileName: &pushConfig.MongodbConfig, KafkaConfigFileName: &pushConfig.KafkaConfig, ShareFileName: &pushConfig.Share, NotificationFileName: &pushConfig.NotificationConfig, diff --git a/pkg/common/cmd/root.go b/pkg/common/cmd/root.go index b43f86557f..5edea43773 100644 --- a/pkg/common/cmd/root.go +++ b/pkg/common/cmd/root.go @@ -129,10 +129,11 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts { } func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error { - err := log.InitFromConfig( + err := log.InitLoggerFromConfig( cmdOpts.loggerPrefixName, r.processName, + "", "", r.log.RemainLogLevel, r.log.IsStdout, r.log.IsJson, diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index c6c672eb8d..59919208be 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -73,18 +73,21 @@ type Mongo struct { MaxRetry int `mapstructure:"maxRetry"` } type Kafka struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - ProducerAck string `mapstructure:"producerAck"` - CompressType string `mapstructure:"compressType"` - Address []string `mapstructure:"address"` - ToRedisTopic string `mapstructure:"toRedisTopic"` - ToMongoTopic string `mapstructure:"toMongoTopic"` - ToPushTopic string `mapstructure:"toPushTopic"` - ToRedisGroupID string `mapstructure:"toRedisGroupID"` - ToMongoGroupID string `mapstructure:"toMongoGroupID"` - ToPushGroupID string `mapstructure:"toPushGroupID"` - Tls TLSConfig `mapstructure:"tls"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + ProducerAck string `mapstructure:"producerAck"` + CompressType string `mapstructure:"compressType"` + Address []string `mapstructure:"address"` + ToRedisTopic string `mapstructure:"toRedisTopic"` + ToMongoTopic string `mapstructure:"toMongoTopic"` + ToPushTopic string `mapstructure:"toPushTopic"` + ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"` + ToRedisGroupID string `mapstructure:"toRedisGroupID"` + ToMongoGroupID string `mapstructure:"toMongoGroupID"` + ToPushGroupID string `mapstructure:"toPushGroupID"` + ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"` + + Tls TLSConfig `mapstructure:"tls"` } type TLSConfig struct { EnableTLS bool `mapstructure:"enableTLS"` @@ -97,8 +100,9 @@ type TLSConfig struct { type API struct { Api struct { - ListenIP string `mapstructure:"listenIP"` - Ports []int `mapstructure:"ports"` + ListenIP string `mapstructure:"listenIP"` + Ports []int `mapstructure:"ports"` + CompressionLevel int `mapstructure:"compressionLevel"` } `mapstructure:"api"` Prometheus struct { Enable bool `mapstructure:"enable"` @@ -220,6 +224,7 @@ type Push struct { BadgeCount bool `mapstructure:"badgeCount"` Production bool `mapstructure:"production"` } `mapstructure:"iosPush"` + FullUserCache bool `mapstructure:"fullUserCache"` } type Auth struct { @@ -258,7 +263,8 @@ type Group struct { ListenIP string `mapstructure:"listenIP"` Ports []int `mapstructure:"ports"` } `mapstructure:"rpc"` - Prometheus Prometheus `mapstructure:"prometheus"` + Prometheus Prometheus `mapstructure:"prometheus"` + EnableHistoryForNewMembers bool `mapstructure:"enableHistoryForNewMembers"` } type Msg struct { @@ -335,7 +341,8 @@ type Redis struct { Password string `mapstructure:"password"` ClusterMode bool `mapstructure:"clusterMode"` DB int `mapstructure:"storage"` - MaxRetry int `mapstructure:"MaxRetry"` + MaxRetry int `mapstructure:"maxRetry"` + PoolSize int `mapstructure:"poolSize"` } type BeforeConfig struct { @@ -421,6 +428,8 @@ type Webhooks struct { BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"` AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"` BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"` + AfterSetGroupInfoEX AfterConfig `mapstructure:"afterSetGroupInfoEX"` + BeforeSetGroupInfoEX BeforeConfig `mapstructure:"beforeSetGroupInfoEX"` AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"` BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"` AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"` @@ -471,6 +480,7 @@ func (r *Redis) Build() *redisutil.Config { Password: r.Password, DB: r.DB, MaxRetry: r.MaxRetry, + PoolSize: r.PoolSize, } } diff --git a/pkg/common/config/load_config_test.go b/pkg/common/config/load_config_test.go index 256214565b..a0345fc7a5 100644 --- a/pkg/common/config/load_config_test.go +++ b/pkg/common/config/load_config_test.go @@ -36,3 +36,26 @@ func TestLoadOpenIMRpcUserConfig(t *testing.T) { //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112" assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports) } + +func TestLoadNotificationConfig(t *testing.T) { + var noti Notification + err := LoadConfig("../../../config/notification.yml", "IMENV_NOTIFICATION", ¬i) + assert.Nil(t, err) + assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title) +} + +func TestLoadOpenIMThirdConfig(t *testing.T) { + var third Third + err := LoadConfig("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", &third) + assert.Nil(t, err) + assert.Equal(t, "enabled", third.Object.Enable) + assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint) + assert.Equal(t, "my_bucket_name", third.Object.Oss.Bucket) + assert.Equal(t, "https://my_bucket_name.oss-cn-chengdu.aliyuncs.com", third.Object.Oss.BucketURL) + assert.Equal(t, "AKID1234567890", third.Object.Oss.AccessKeyID) + assert.Equal(t, "abc123xyz789", third.Object.Oss.AccessKeySecret) + assert.Equal(t, "session_token_value", third.Object.Oss.SessionToken) // Uncomment if session token is needed + assert.Equal(t, true, third.Object.Oss.PublicRead) + + // Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true +} diff --git a/pkg/common/prommetrics/grpc_push.go b/pkg/common/prommetrics/grpc_push.go index 0b6c3e76f3..5c966310f7 100644 --- a/pkg/common/prommetrics/grpc_push.go +++ b/pkg/common/prommetrics/grpc_push.go @@ -23,4 +23,8 @@ var ( Name: "msg_offline_push_failed_total", Help: "The number of msg failed offline pushed", }) + MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "msg_long_time_push_total", + Help: "The number of messages with a push time exceeding 10 seconds", + }) ) diff --git a/pkg/common/prommetrics/rpc.go b/pkg/common/prommetrics/rpc.go index dc16322dab..7162fa7e80 100644 --- a/pkg/common/prommetrics/rpc.go +++ b/pkg/common/prommetrics/rpc.go @@ -47,9 +47,17 @@ func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Co case share.RpcRegisterName.MessageGateway: return []prometheus.Collector{OnlineUserGauge} case share.RpcRegisterName.Msg: - return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter} + return []prometheus.Collector{ + SingleChatMsgProcessSuccessCounter, + SingleChatMsgProcessFailedCounter, + GroupChatMsgProcessSuccessCounter, + GroupChatMsgProcessFailedCounter, + } case share.RpcRegisterName.Push: - return []prometheus.Collector{MsgOfflinePushFailedCounter} + return []prometheus.Collector{ + MsgOfflinePushFailedCounter, + MsgLoneTimePushCounter, + } case share.RpcRegisterName.Auth: return []prometheus.Collector{UserLoginCounter} case share.RpcRegisterName.User: diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index 4091a5f6e6..fb8782d304 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -25,7 +25,6 @@ import ( "os" "os/signal" "strconv" - "sync" "syscall" "time" @@ -35,7 +34,6 @@ import ( "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mw" - "github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/utils/network" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -54,6 +52,7 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort, "prometheusPorts", prometheusConfig.Ports) rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) + listener, err := net.Listen( "tcp", rpcTcpAddr, @@ -61,7 +60,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo if err != nil { return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) } - defer listener.Close() client, err := kdisc.NewDiscoveryRegister(discovery, share) if err != nil { @@ -92,10 +90,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo } srv := grpc.NewServer(options...) - once := sync.Once{} - defer func() { - once.Do(srv.GracefulStop) - }() err = rpcFn(ctx, config, client, srv) if err != nil { @@ -113,9 +107,8 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo } var ( - netDone = make(chan struct{}, 2) - netErr error - httpServer *http.Server + netDone = make(chan struct{}, 2) + netErr error ) if prometheusConfig.Enable { go func() { @@ -152,18 +145,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo signal.Notify(sigs, syscall.SIGTERM) select { case <-sigs: - program.SIGTERMExit() - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { return err } - ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - err := httpServer.Shutdown(ctx) - if err != nil { - return errs.WrapMsg(err, "shutdown err") - } return nil case <-netDone: return netErr diff --git a/pkg/common/storage/cache/cachekey/online.go b/pkg/common/storage/cache/cachekey/online.go index 164e5f2f46..40f09cb5ae 100644 --- a/pkg/common/storage/cache/cachekey/online.go +++ b/pkg/common/storage/cache/cachekey/online.go @@ -1,6 +1,9 @@ package cachekey -import "time" +import ( + "strings" + "time" +) const ( OnlineKey = "ONLINE:" @@ -11,3 +14,7 @@ const ( func GetOnlineKey(userID string) string { return OnlineKey + userID } + +func GetOnlineKeyUserID(key string) string { + return strings.TrimPrefix(key, OnlineKey) +} diff --git a/pkg/common/storage/cache/online.go b/pkg/common/storage/cache/online.go index 7669c8a118..d21ae616a6 100644 --- a/pkg/common/storage/cache/online.go +++ b/pkg/common/storage/cache/online.go @@ -5,4 +5,5 @@ import "context" type OnlineCache interface { GetOnline(ctx context.Context, userID string) ([]int32, error) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error + GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) } diff --git a/pkg/common/storage/cache/redis/batch.go b/pkg/common/storage/cache/redis/batch.go index 4d65c59298..1810ac9939 100644 --- a/pkg/common/storage/cache/redis/batch.go +++ b/pkg/common/storage/cache/redis/batch.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "github.com/dtm-labs/rockscache" + "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" "golang.org/x/sync/singleflight" @@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac } bs, err := json.Marshal(value) if err != nil { + log.ZError(ctx, "marshal failed", err) return nil, err } cacheIndex[index] = string(bs) @@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac return cacheIndex, nil }) if err != nil { - return nil, err + return nil, errs.WrapMsg(err, "FetchBatch2 failed") } for index, data := range indexCache { if data == "" { @@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac } var value V if err := json.Unmarshal([]byte(data), &value); err != nil { - return nil, err + return nil, errs.WrapMsg(err, "Unmarshal failed") } if cb, ok := any(&value).(BatchCacheCallback[K]); ok { cb.BatchCache(keyId[keys[index]]) diff --git a/pkg/common/storage/cache/redis/batch_handler.go b/pkg/common/storage/cache/redis/batch_handler.go index f9923e198e..420ebdf777 100644 --- a/pkg/common/storage/cache/redis/batch_handler.go +++ b/pkg/common/storage/cache/redis/batch_handler.go @@ -28,6 +28,10 @@ import ( "time" ) +const ( + rocksCacheTimeout = 11 * time.Second +) + // BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache. type BatchDeleterRedis struct { redisClient redis.UniversalClient @@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) { // GetRocksCacheOptions returns the default configuration options for RocksCache. func GetRocksCacheOptions() *rockscache.Options { opts := rockscache.NewDefaultOptions() + opts.LockExpire = rocksCacheTimeout + opts.WaitReplicasTimeout = rocksCacheTimeout opts.StrongConsistency = true opts.RandomExpireAdjustment = 0.2 @@ -118,7 +124,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { t, err = fn(ctx) if err != nil { - log.ZError(ctx, "getCache query database failed", err, "key", key) + //log.ZError(ctx, "getCache query database failed", err, "key", key) return "", err } bs, err := json.Marshal(t) diff --git a/pkg/common/storage/cache/redis/online.go b/pkg/common/storage/cache/redis/online.go index a012e1cd2d..b6c90264e1 100644 --- a/pkg/common/storage/cache/redis/online.go +++ b/pkg/common/storage/cache/redis/online.go @@ -2,12 +2,15 @@ package redis import ( "context" + "fmt" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/protocol/constant" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" "strconv" + "strings" "time" ) @@ -48,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err return platformIDs, nil } +func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) { + result := make(map[string][]int32) + + keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result() + if err != nil { + return nil, 0, err + } + + for _, key := range keys { + userID := cachekey.GetOnlineKeyUserID(key) + strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result() + if err != nil { + return nil, 0, err + } + + values := make([]int32, 0, len(strValues)) + for _, value := range strValues { + intValue, err := strconv.Atoi(value) + if err != nil { + return nil, 0, errs.Wrap(err) + } + values = append(values, int32(intValue)) + } + + result[userID] = values + } + + return result, nextCursor, nil +} + func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { script := ` local key = KEYS[1] @@ -66,11 +99,10 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o local change = (num1 ~= num2) or (num2 ~= num3) if change then local members = redis.call("ZRANGE", key, 0, -1) - table.insert(members, KEYS[2]) - redis.call("PUBLISH", KEYS[3], table.concat(members, ":")) - return 1 + table.insert(members, "1") + return members else - return 0 + return {"0"} end ` now := time.Now() @@ -82,12 +114,24 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o for _, platformID := range online { argv = append(argv, platformID) } - keys := []string{s.getUserOnlineKey(userID), userID, s.channelName} - status, err := s.rdb.Eval(ctx, script, keys, argv).Result() + keys := []string{s.getUserOnlineKey(userID)} + platformIDs, err := s.rdb.Eval(ctx, script, keys, argv).StringSlice() if err != nil { log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline) return err } - log.ZDebug(ctx, "redis SetUserOnline", "userID", userID, "online", online, "offline", offline, "status", status) + if len(platformIDs) == 0 { + return errs.ErrInternalServer.WrapMsg("SetUserOnline redis lua invalid return value") + } + if platformIDs[len(platformIDs)-1] != "0" { + log.ZDebug(ctx, "redis SetUserOnline push", "userID", userID, "online", online, "offline", offline, "platformIDs", platformIDs[:len(platformIDs)-1]) + platformIDs[len(platformIDs)-1] = userID + msg := strings.Join(platformIDs, ":") + if err := s.rdb.Publish(ctx, s.channelName, msg).Err(); err != nil { + return errs.Wrap(err) + } + } else { + log.ZDebug(ctx, "redis SetUserOnline not push", "userID", userID, "online", online, "offline", offline) + } return nil } diff --git a/pkg/common/storage/cache/redis/online_test.go b/pkg/common/storage/cache/redis/online_test.go new file mode 100644 index 0000000000..0306f6f5d7 --- /dev/null +++ b/pkg/common/storage/cache/redis/online_test.go @@ -0,0 +1,51 @@ +package redis + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/tools/db/redisutil" + "testing" + "time" +) + +/* +address: [ 172.16.8.48:7001, 172.16.8.48:7002, 172.16.8.48:7003, 172.16.8.48:7004, 172.16.8.48:7005, 172.16.8.48:7006 ] +username: +password: passwd123 +clusterMode: true +db: 0 +maxRetry: 10 +*/ +func TestName111111(t *testing.T) { + conf := config.Redis{ + Address: []string{ + "172.16.8.124:7001", + "172.16.8.124:7002", + "172.16.8.124:7003", + "172.16.8.124:7004", + "172.16.8.124:7005", + "172.16.8.124:7006", + }, + ClusterMode: true, + Password: "passwd123", + //Address: []string{"localhost:16379"}, + //Password: "openIM123", + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*1000) + defer cancel() + rdb, err := redisutil.NewRedisClient(ctx, conf.Build()) + if err != nil { + panic(err) + } + online := NewUserOnline(rdb) + + userID := "a123456" + t.Log(online.GetOnline(ctx, userID)) + t.Log(online.SetUserOnline(ctx, userID, []int32{1, 2, 3, 4}, nil)) + t.Log(online.GetOnline(ctx, userID)) + +} + +func TestName111(t *testing.T) { + +} diff --git a/pkg/common/storage/cache/redis/seq_user.go b/pkg/common/storage/cache/redis/seq_user.go index edbc66b21b..0cedfeee12 100644 --- a/pkg/common/storage/cache/redis/seq_user.go +++ b/pkg/common/storage/cache/redis/seq_user.go @@ -74,17 +74,22 @@ func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID s } func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { - if seq%s.readSeqWriteRatio == 0 { - if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } + dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID) + if err != nil { + return err } - if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { - return errs.Wrap(err) + if dbSeq < seq { + if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { + return errs.Wrap(err) + } } return nil } +func (s *seqUserCacheRedis) SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error { + return s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq) +} + func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error { keys := make([]string, 0, len(seqs)) for conversationID, seq := range seqs { @@ -128,13 +133,6 @@ func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string, if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil { return err } - for conversationID, seq := range seqs { - if seq%s.readSeqWriteRatio == 0 { - if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } - } - } return nil } diff --git a/pkg/common/storage/cache/seq_user.go b/pkg/common/storage/cache/seq_user.go index 61dbc0ab45..cef414e16e 100644 --- a/pkg/common/storage/cache/seq_user.go +++ b/pkg/common/storage/cache/seq_user.go @@ -9,6 +9,7 @@ type SeqUser interface { SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error + SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) diff --git a/pkg/common/storage/controller/friend.go b/pkg/common/storage/controller/friend.go index 94cb7d661d..88a5fc863d 100644 --- a/pkg/common/storage/controller/friend.go +++ b/pkg/common/storage/controller/friend.go @@ -160,7 +160,7 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string, if err != nil { return err } - opUserID := mcontext.GetOperationID(ctx) + opUserID := mcontext.GetOpUserID(ctx) friends := make([]*model.Friend, 0, len(friendUserIDs)*2) myFriendsSet := datautil.SliceSetAny(myFriends, func(friend *model.Friend) string { return friend.FriendUserID diff --git a/pkg/common/storage/controller/msg.go b/pkg/common/storage/controller/msg.go index 49268e0493..fdd06d3ff3 100644 --- a/pkg/common/storage/controller/msg.go +++ b/pkg/common/storage/controller/msg.go @@ -18,14 +18,14 @@ import ( "context" "encoding/json" "errors" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "strings" "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" - "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/protocol/constant" pbmsg "github.com/openimsdk/protocol/msg" @@ -46,16 +46,10 @@ const ( // CommonMsgDatabase defines the interface for message database operations. type CommonMsgDatabase interface { - // BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation. - BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error // RevokeMsg revokes a message in a conversation. RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error // MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers. MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error - // DeleteMessagesFromCache deletes message caches from Redis by sequence numbers. - DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error - // BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache. - BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error) // GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers. GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) // GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers. @@ -89,16 +83,16 @@ type CommonMsgDatabase interface { // to mq MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error - MsgToPushMQ(ctx context.Context, key, conversarionID string, msg2mq *sdkws.MsgData) (int32, int64, error) - MsgToMongoMQ(ctx context.Context, key, conversarionID string, msgs []*sdkws.MsgData, lastSeq int64) error RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) // clear msg - GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) + GetBeforeMsg(ctx context.Context, ts int64, docIds []string, limit int) ([]*model.MsgDocModel, error) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) + + GetDocIDs(ctx context.Context) ([]string, error) } func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) { @@ -110,22 +104,12 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser if err != nil { return nil, err } - producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic) - if err != nil { - return nil, err - } - producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic) - if err != nil { - return nil, err - } return &commonMsgDatabase{ msgDocDatabase: msgDocModel, msg: msg, seqUser: seqUser, seqConversation: seqConversation, producer: producerToRedis, - producerToMongo: producerToMongo, - producerToPush: producerToPush, }, nil } @@ -136,8 +120,6 @@ type commonMsgDatabase struct { seqConversation cache.SeqConversationCache seqUser cache.SeqUser producer *kafka.Producer - producerToMongo *kafka.Producer - producerToPush *kafka.Producer } func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error { @@ -145,23 +127,6 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd return err } -func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) { - partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID}) - if err != nil { - log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq) - return 0, 0, err - } - return partition, offset, nil -} - -func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error { - if len(messages) > 0 { - _, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages}) - return err - } - return nil -} - func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { if len(fields) == 0 { return nil @@ -263,52 +228,6 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI return nil } -func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error { - if len(msgList) == 0 { - return errs.ErrArgs.WrapMsg("msgList is empty") - } - msgs := make([]any, len(msgList)) - for i, msg := range msgList { - if msg == nil { - continue - } - var offlinePushModel *model.OfflinePushModel - if msg.OfflinePushInfo != nil { - offlinePushModel = &model.OfflinePushModel{ - Title: msg.OfflinePushInfo.Title, - Desc: msg.OfflinePushInfo.Desc, - Ex: msg.OfflinePushInfo.Ex, - IOSPushSound: msg.OfflinePushInfo.IOSPushSound, - IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount, - } - } - msgs[i] = &model.MsgDataModel{ - SendID: msg.SendID, - RecvID: msg.RecvID, - GroupID: msg.GroupID, - ClientMsgID: msg.ClientMsgID, - ServerMsgID: msg.ServerMsgID, - SenderPlatformID: msg.SenderPlatformID, - SenderNickname: msg.SenderNickname, - SenderFaceURL: msg.SenderFaceURL, - SessionType: msg.SessionType, - MsgFrom: msg.MsgFrom, - ContentType: msg.ContentType, - Content: string(msg.Content), - Seq: msg.Seq, - SendTime: msg.SendTime, - CreateTime: msg.CreateTime, - Status: msg.Status, - Options: msg.Options, - OfflinePush: offlinePushModel, - AtUserIDList: msg.AtUserIDList, - AttachedInfo: msg.AttachedInfo, - Ex: msg.Ex, - } - } - return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq) -} - func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error { return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq) } @@ -328,56 +247,6 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI return nil } -func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { - return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) -} - -func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { - for userID, seq := range userSeqMap { - if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } - } - return nil -} - -func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) { - lenList := len(msgs) - if int64(lenList) > db.msgTable.GetSingleGocMsgNum() { - return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() - } - if lenList < 1 { - return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap() - } - currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs))) - if err != nil { - log.ZError(ctx, "storage.seq.Malloc", err) - return 0, false, err - } - isNew = currentMaxSeq == 0 - lastMaxSeq := currentMaxSeq - userSeqMap := make(map[string]int64) - for _, m := range msgs { - currentMaxSeq++ - m.Seq = currentMaxSeq - userSeqMap[m.SendID] = m.Seq - } - - failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs) - if err != nil { - prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum)) - log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID) - } else { - prommetrics.MsgInsertRedisSuccessCounter.Inc() - } - err = db.setHasReadSeqs(ctx, conversationID, userSeqMap) - if err != nil { - log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) - prommetrics.SeqSetFailedCounter.Inc() - } - return lastMaxSeq, isNew, errs.Wrap(err) -} - func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) { for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { // log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs) @@ -912,8 +781,25 @@ func (db *commonMsgDatabase) ConvertMsgsDocLen(ctx context.Context, conversation db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs) } -func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { - return db.msgDocDatabase.GetBeforeMsg(ctx, ts, limit) +func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { + var msgs []*model.MsgDocModel + for i := 0; i < len(docIDs); i += 1000 { + end := i + 1000 + if end > len(docIDs) { + end = len(docIDs) + } + + res, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, docIDs[i:end], limit) + if err != nil { + return nil, err + } + msgs = append(msgs, res...) + + if len(msgs) >= limit { + return msgs[:limit], nil + } + } + return msgs, nil } func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) { @@ -936,8 +822,10 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d return index, err } if len(index) == notNull { + log.ZDebug(ctx, "Delete db in Doc", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) return index, db.msgDocDatabase.DeleteDoc(ctx, doc.DocID) } else { + log.ZDebug(ctx, "delete db in index", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) return index, db.msgDocDatabase.DeleteMsgByIndex(ctx, doc.DocID, index) } } @@ -955,3 +843,7 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin } return db.seqConversation.SetMinSeq(ctx, conversationID, seq) } + +func (db *commonMsgDatabase) GetDocIDs(ctx context.Context) ([]string, error) { + return db.msgDocDatabase.GetDocIDs(ctx) +} diff --git a/pkg/common/storage/controller/msg_transfer.go b/pkg/common/storage/controller/msg_transfer.go new file mode 100644 index 0000000000..5e540a2c33 --- /dev/null +++ b/pkg/common/storage/controller/msg_transfer.go @@ -0,0 +1,286 @@ +package controller + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + pbmsg "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" + "go.mongodb.org/mongo-driver/mongo" +) + +type MsgTransferDatabase interface { + // BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation. + BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error + // DeleteMessagesFromCache deletes message caches from Redis by sequence numbers. + DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error + + // BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache. + BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error) + SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error + + // to mq + MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) + MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error +} + +func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) { + conf, err := kafka.BuildProducerConfig(*kafkaConf.Build()) + if err != nil { + return nil, err + } + producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic) + if err != nil { + return nil, err + } + producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic) + if err != nil { + return nil, err + } + return &msgTransferDatabase{ + msgDocDatabase: msgDocModel, + msg: msg, + seqUser: seqUser, + seqConversation: seqConversation, + producerToMongo: producerToMongo, + producerToPush: producerToPush, + }, nil +} + +type msgTransferDatabase struct { + msgDocDatabase database.Msg + msgTable model.MsgDocModel + msg cache.MsgCache + seqConversation cache.SeqConversationCache + seqUser cache.SeqUser + producerToMongo *kafka.Producer + producerToPush *kafka.Producer +} + +func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error { + if len(msgList) == 0 { + return errs.ErrArgs.WrapMsg("msgList is empty") + } + msgs := make([]any, len(msgList)) + for i, msg := range msgList { + if msg == nil { + continue + } + var offlinePushModel *model.OfflinePushModel + if msg.OfflinePushInfo != nil { + offlinePushModel = &model.OfflinePushModel{ + Title: msg.OfflinePushInfo.Title, + Desc: msg.OfflinePushInfo.Desc, + Ex: msg.OfflinePushInfo.Ex, + IOSPushSound: msg.OfflinePushInfo.IOSPushSound, + IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount, + } + } + msgs[i] = &model.MsgDataModel{ + SendID: msg.SendID, + RecvID: msg.RecvID, + GroupID: msg.GroupID, + ClientMsgID: msg.ClientMsgID, + ServerMsgID: msg.ServerMsgID, + SenderPlatformID: msg.SenderPlatformID, + SenderNickname: msg.SenderNickname, + SenderFaceURL: msg.SenderFaceURL, + SessionType: msg.SessionType, + MsgFrom: msg.MsgFrom, + ContentType: msg.ContentType, + Content: string(msg.Content), + Seq: msg.Seq, + SendTime: msg.SendTime, + CreateTime: msg.CreateTime, + Status: msg.Status, + Options: msg.Options, + OfflinePush: offlinePushModel, + AtUserIDList: msg.AtUserIDList, + AttachedInfo: msg.AttachedInfo, + Ex: msg.Ex, + } + } + return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq) +} + +func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { + if len(fields) == 0 { + return nil + } + num := db.msgTable.GetSingleGocMsgNum() + // num = 100 + for i, field := range fields { // Check the type of the field + var ok bool + switch key { + case updateKeyMsg: + var msg *model.MsgDataModel + msg, ok = field.(*model.MsgDataModel) + if msg != nil && msg.Seq != firstSeq+int64(i) { + return errs.ErrInternalServer.WrapMsg("seq is invalid") + } + case updateKeyRevoke: + _, ok = field.(*model.RevokeModel) + default: + return errs.ErrInternalServer.WrapMsg("key is invalid") + } + if !ok { + return errs.ErrInternalServer.WrapMsg("field type is invalid") + } + } + // Returns true if the document exists in the database, false if the document does not exist in the database + updateMsgModel := func(seq int64, i int) (bool, error) { + var ( + res *mongo.UpdateResult + err error + ) + docID := db.msgTable.GetDocID(conversationID, seq) + index := db.msgTable.GetMsgIndex(seq) + field := fields[i] + switch key { + case updateKeyMsg: + res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "msg", field) + case updateKeyRevoke: + res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "revoke", field) + } + if err != nil { + return false, err + } + return res.MatchedCount > 0, nil + } + tryUpdate := true + for i := 0; i < len(fields); i++ { + seq := firstSeq + int64(i) // Current sequence number + if tryUpdate { + matched, err := updateMsgModel(seq, i) + if err != nil { + return err + } + if matched { + continue // The current data has been updated, skip the current data + } + } + doc := model.MsgDocModel{ + DocID: db.msgTable.GetDocID(conversationID, seq), + Msg: make([]*model.MsgInfoModel, num), + } + var insert int // Inserted data number + for j := i; j < len(fields); j++ { + seq = firstSeq + int64(j) + if db.msgTable.GetDocID(conversationID, seq) != doc.DocID { + break + } + insert++ + switch key { + case updateKeyMsg: + doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{ + Msg: fields[j].(*model.MsgDataModel), + } + case updateKeyRevoke: + doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{ + Revoke: fields[j].(*model.RevokeModel), + } + } + } + for i, msgInfo := range doc.Msg { + if msgInfo == nil { + msgInfo = &model.MsgInfoModel{} + doc.Msg[i] = msgInfo + } + if msgInfo.DelList == nil { + doc.Msg[i].DelList = []string{} + } + } + if err := db.msgDocDatabase.Create(ctx, &doc); err != nil { + if mongo.IsDuplicateKeyError(err) { + i-- // already inserted + tryUpdate = true // next block use update mode + continue + } + return err + } + tryUpdate = false // The current block is inserted successfully, and the next block is inserted preferentially + i += insert - 1 // Skip the inserted data + } + return nil +} + +func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { + return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) +} + +func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) { + lenList := len(msgs) + if int64(lenList) > db.msgTable.GetSingleGocMsgNum() { + return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() + } + if lenList < 1 { + return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap() + } + currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs))) + if err != nil { + log.ZError(ctx, "storage.seq.Malloc", err) + return 0, false, err + } + isNew = currentMaxSeq == 0 + lastMaxSeq := currentMaxSeq + userSeqMap := make(map[string]int64) + for _, m := range msgs { + currentMaxSeq++ + m.Seq = currentMaxSeq + userSeqMap[m.SendID] = m.Seq + } + + failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs) + if err != nil { + prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum)) + log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID) + } else { + prommetrics.MsgInsertRedisSuccessCounter.Inc() + } + err = db.setHasReadSeqs(ctx, conversationID, userSeqMap) + if err != nil { + log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) + prommetrics.SeqSetFailedCounter.Inc() + } + return lastMaxSeq, isNew, errs.Wrap(err) +} + +func (db *msgTransferDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { + for userID, seq := range userSeqMap { + if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { + return err + } + } + return nil +} + +func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error { + return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq) +} + +func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) { + partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID}) + if err != nil { + log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq) + return 0, 0, err + } + return partition, offset, nil +} + +func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error { + if len(messages) > 0 { + _, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages}) + if err != nil { + log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq) + return err + } + } + return nil +} diff --git a/pkg/common/storage/controller/push.go b/pkg/common/storage/controller/push.go index 199a0ba678..91ef126fe5 100644 --- a/pkg/common/storage/controller/push.go +++ b/pkg/common/storage/controller/push.go @@ -17,21 +17,45 @@ package controller import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/protocol/push" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" ) type PushDatabase interface { DelFcmToken(ctx context.Context, userID string, platformID int) error + MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error } type pushDataBase struct { - cache cache.ThirdCache + cache cache.ThirdCache + producerToOfflinePush *kafka.Producer } -func NewPushDatabase(cache cache.ThirdCache) PushDatabase { - return &pushDataBase{cache: cache} +func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase { + conf, err := kafka.BuildProducerConfig(*kafkaConf.Build()) + if err != nil { + return nil + } + producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic) + if err != nil { + return nil + } + return &pushDataBase{ + cache: cache, + producerToOfflinePush: producerToOfflinePush, + } } func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error { return p.cache.DelFcmToken(ctx, userID, platformID) } + +func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error { + _, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs}) + log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String()) + return err +} diff --git a/pkg/common/storage/database/mgo/msg.go b/pkg/common/storage/database/mgo/msg.go index 7dc308a7c4..fc1fe47eab 100644 --- a/pkg/common/storage/database/mgo/msg.go +++ b/pkg/common/storage/database/mgo/msg.go @@ -8,6 +8,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/tools/utils/datautil" + "golang.org/x/exp/rand" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" @@ -117,9 +118,9 @@ func (m *MsgMgo) GetMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin } func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) { - indexs := make([]int64, 0, len(seqs)) + indexes := make([]int64, 0, len(seqs)) for _, seq := range seqs { - indexs = append(indexs, m.model.GetMsgIndex(seq)) + indexes = append(indexes, m.model.GetMsgIndex(seq)) } pipeline := mongo.Pipeline{ bson.D{{Key: "$match", Value: bson.D{ @@ -130,7 +131,7 @@ func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin {Key: "doc_id", Value: 1}, {Key: "msgs", Value: bson.D{ {Key: "$map", Value: bson.D{ - {Key: "input", Value: indexs}, + {Key: "input", Value: indexes}, {Key: "as", Value: "index"}, {Key: "in", Value: bson.D{ {Key: "$arrayElemAt", Value: bson.A{"$msgs", "$$index"}}, @@ -1226,10 +1227,53 @@ func (m *MsgMgo) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string } } -func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { +func (m *MsgMgo) GetDocIDs(ctx context.Context) ([]string, error) { + limit := 5000 + var skip int + var docIDs []string + var offset int + + count, err := m.coll.CountDocuments(ctx, bson.M{}) + if err != nil { + return nil, err + } + + if count < int64(limit) { + skip = 0 + } else { + rand.Seed(uint64(time.Now().UnixMilli())) + skip = rand.Intn(int(count / int64(limit))) + offset = skip * limit + } + log.ZDebug(ctx, "offset", "skip", skip, "offset", offset) + res, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ + { + "$project": bson.M{ + "doc_id": 1, + }, + }, + { + "$skip": offset, + }, + { + "$limit": limit, + }, + }) + + for _, doc := range res { + docIDs = append(docIDs, doc.DocID) + } + + return docIDs, errs.Wrap(err) +} + +func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { return mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ { "$match": bson.M{ + "doc_id": bson.M{ + "$in": docIDs, + }, "msgs.msg.send_time": bson.M{ "$lt": ts, }, diff --git a/pkg/common/storage/database/mgo/seq_user.go b/pkg/common/storage/database/mgo/seq_user.go index 9faad416ae..244de30000 100644 --- a/pkg/common/storage/database/mgo/seq_user.go +++ b/pkg/common/storage/database/mgo/seq_user.go @@ -115,5 +115,12 @@ func (s *seqUserMongo) GetUserReadSeqs(ctx context.Context, userID string, conve } func (s *seqUserMongo) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { + dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID) + if err != nil { + return err + } + if dbSeq > seq { + return nil + } return s.setSeq(ctx, conversationID, userID, seq, "read_seq") } diff --git a/pkg/common/storage/database/mgo/user.go b/pkg/common/storage/database/mgo/user.go index 8978e64ebf..ee92b75544 100644 --- a/pkg/common/storage/database/mgo/user.go +++ b/pkg/common/storage/database/mgo/user.go @@ -167,6 +167,10 @@ func (u *UserMgo) DeleteUserCommand(ctx context.Context, userID string, Type int filter := bson.M{"userID": userID, "type": Type, "uuid": UUID} result, err := collection.DeleteOne(ctx, filter) + // when err is not nil, result might be nil + if err != nil { + return errs.Wrap(err) + } if result.DeletedCount == 0 { // No records found to update return errs.Wrap(errs.ErrRecordNotFound) diff --git a/pkg/common/storage/database/msg.go b/pkg/common/storage/database/msg.go index 84f3a9e3e2..23a99f5b96 100644 --- a/pkg/common/storage/database/msg.go +++ b/pkg/common/storage/database/msg.go @@ -16,10 +16,11 @@ package database import ( "context" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/protocol/msg" "go.mongodb.org/mongo-driver/mongo" - "time" ) type Msg interface { @@ -44,5 +45,7 @@ type Msg interface { DeleteDoc(ctx context.Context, docID string) error DeleteMsgByIndex(ctx context.Context, docID string, index []int) error - GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) + GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) + + GetDocIDs(ctx context.Context) ([]string, error) } diff --git a/pkg/common/storage/model/msg.go b/pkg/common/storage/model/msg.go index 8095665d2f..e16233973b 100644 --- a/pkg/common/storage/model/msg.go +++ b/pkg/common/storage/model/msg.go @@ -92,15 +92,15 @@ type GroupCount struct { Count int64 `bson:"count"` } -func (MsgDocModel) TableName() string { +func (*MsgDocModel) TableName() string { return MsgTableName } -func (MsgDocModel) GetSingleGocMsgNum() int64 { +func (*MsgDocModel) GetSingleGocMsgNum() int64 { return singleGocMsgNum } -func (MsgDocModel) GetSingleGocMsgNum5000() int64 { +func (*MsgDocModel) GetSingleGocMsgNum5000() int64 { return singleGocMsgNum5000 } @@ -108,12 +108,12 @@ func (m *MsgDocModel) IsFull() bool { return m.Msg[len(m.Msg)-1].Msg != nil } -func (m MsgDocModel) GetDocID(conversationID string, seq int64) string { +func (m *MsgDocModel) GetDocID(conversationID string, seq int64) string { seqSuffix := (seq - 1) / singleGocMsgNum return m.indexGen(conversationID, seqSuffix) } -func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 { +func (m *MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 { t := make(map[string][]int64) for i := 0; i < len(seqs); i++ { docID := m.GetDocID(conversationID, seqs[i]) @@ -127,15 +127,15 @@ func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[st return t } -func (MsgDocModel) GetMsgIndex(seq int64) int64 { +func (*MsgDocModel) GetMsgIndex(seq int64) int64 { return (seq - 1) % singleGocMsgNum } -func (MsgDocModel) indexGen(conversationID string, seqSuffix int64) string { +func (*MsgDocModel) indexGen(conversationID string, seqSuffix int64) string { return conversationID + ":" + strconv.FormatInt(seqSuffix, 10) } -func (MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) { +func (*MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) { for _, v := range seqs { msgModel := new(sdkws.MsgData) msgModel.Seq = v diff --git a/pkg/localcache/lru/lru.go b/pkg/localcache/lru/lru.go index 2fedffc48b..726535c48c 100644 --- a/pkg/localcache/lru/lru.go +++ b/pkg/localcache/lru/lru.go @@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V] type LRU[K comparable, V any] interface { Get(key K, fetch func() (V, error)) (V, error) + Set(key K, value V) SetHas(key K, value V) bool + GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) Del(key K) bool Stop() } diff --git a/pkg/localcache/lru/lru_expiration.go b/pkg/localcache/lru/lru_expiration.go index d27e670574..df6bacbf43 100644 --- a/pkg/localcache/lru/lru_expiration.go +++ b/pkg/localcache/lru/lru_expiration.go @@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct { target Target } +func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + //TODO implement me + panic("implement me") +} + func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { x.lock.Lock() v, ok := x.core.Get(key) @@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool { return false } +func (x *ExpirationLRU[K, V]) Set(key K, value V) { + x.lock.Lock() + defer x.lock.Unlock() + x.core.Add(key, &expirationLruItem[V]{value: value}) +} + func (x *ExpirationLRU[K, V]) Stop() { } diff --git a/pkg/localcache/lru/lru_lazy.go b/pkg/localcache/lru/lru_lazy.go index e935c687c4..84aa980e8a 100644 --- a/pkg/localcache/lru/lru_lazy.go +++ b/pkg/localcache/lru/lru_lazy.go @@ -88,18 +88,75 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return v.value, v.err } -//func (x *LayLRU[K, V]) Set(key K, value V) { -// x.lock.Lock() -// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) -// x.lock.Unlock() -//} -// +func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + var ( + err error + once sync.Once + ) + + x.lock.Lock() + res := make(map[K]V) + queries := make([]K, 0) + setVs := make(map[K]*layLruItem[V]) + for _, key := range keys { + v, ok := x.core.Get(key) + x.lock.Unlock() + if ok { + v.lock.Lock() + expires, value, err1 := v.expires, v.value, v.err + v.lock.Unlock() + if expires != 0 && expires > time.Now().UnixMilli() { + x.target.IncrGetHit() + res[key] = value + if err1 != nil { + once.Do(func() { + err = err1 + }) + } + continue + } + } + queries = append(queries, key) + } + values, err1 := fetch(queries) + if err1 != nil { + once.Do(func() { + err = err1 + }) + } + for key, val := range values { + v := &layLruItem[V]{} + v.value = val + + if err == nil { + v.expires = time.Now().Add(x.successTTL).UnixMilli() + x.target.IncrGetSuccess() + } else { + v.expires = time.Now().Add(x.failedTTL).UnixMilli() + x.target.IncrGetFailed() + } + setVs[key] = v + x.lock.Lock() + x.core.Add(key, v) + x.lock.Unlock() + res[key] = val + } + + return res, err +} + //func (x *LayLRU[K, V]) Has(key K) bool { // x.lock.Lock() // defer x.lock.Unlock() // return x.core.Contains(key) //} +func (x *LayLRU[K, V]) Set(key K, value V) { + x.lock.Lock() + defer x.lock.Unlock() + x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) +} + func (x *LayLRU[K, V]) SetHas(key K, value V) bool { x.lock.Lock() defer x.lock.Unlock() diff --git a/pkg/localcache/lru/lru_slot.go b/pkg/localcache/lru/lru_slot.go index 4538ca20e4..077219b75f 100644 --- a/pkg/localcache/lru/lru_slot.go +++ b/pkg/localcache/lru/lru_slot.go @@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct { hash func(k K) uint64 } +func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + var ( + slotKeys = make(map[uint64][]K) + vs = make(map[K]V) + ) + + for _, k := range keys { + index := x.getIndex(k) + slotKeys[index] = append(slotKeys[index], k) + } + + for k, v := range slotKeys { + batches, err := x.slots[k].GetBatch(v, fetch) + if err != nil { + return nil, err + } + for key, value := range batches { + vs[key] = value + } + } + return vs, nil +} + func (x *slotLRU[K, V]) getIndex(k K) uint64 { return x.hash(k) % x.n } @@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return x.slots[x.getIndex(key)].Get(key, fetch) } +func (x *slotLRU[K, V]) Set(key K, value V) { + x.slots[x.getIndex(key)].Set(key, value) +} + func (x *slotLRU[K, V]) SetHas(key K, value V) bool { return x.slots[x.getIndex(key)].SetHas(key, value) } diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go index 2a62c7bbd5..925d2a37ca 100644 --- a/pkg/rpccache/conversation.go +++ b/pkg/rpccache/conversation.go @@ -86,7 +86,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co if err == nil { log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val) } else { - log.ZError(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) + log.ZWarn(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) } }() var cache cacheProto[pbconversation.Conversation] diff --git a/pkg/rpccache/online.go b/pkg/rpccache/online.go index 2ffa1f1577..a02a0662d4 100644 --- a/pkg/rpccache/online.go +++ b/pkg/rpccache/online.go @@ -2,60 +2,197 @@ package rpccache import ( "context" + "fmt" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/user" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/useronline" + "github.com/openimsdk/tools/db/cacheutil" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/redis/go-redis/v9" - "math/rand" - "strconv" - "time" ) -func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache { +func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) { + l := &sync.Mutex{} x := &OnlineCache{ - user: user, - group: group, - local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { + user: user, + group: group, + fullUserCache: fullUserCache, + Lock: l, + Cond: sync.NewCond(l), + } + + ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10)) + + switch x.fullUserCache { + case true: + log.ZDebug(ctx, "fullUserCache is true") + x.mapCache = cacheutil.NewCache[string, []int32]() + go func() { + if err := x.initUsersOnlineStatus(ctx); err != nil { + log.ZError(ctx, "initUsersOnlineStatus failed", err) + } + }() + case false: + log.ZDebug(ctx, "fullUserCache is false") + x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {}) - }), + }) + x.CurrentPhase.Store(DoSubscribeOver) + x.Cond.Broadcast() } + go func() { - ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10)) - for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() { - userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) + x.doSubscribe(ctx, rdb, fn) + }() + return x, nil +} + +const ( + Begin uint32 = iota + DoOnlineStatusOver + DoSubscribeOver +) + +type OnlineCache struct { + user rpcclient.UserRpcClient + group *GroupLocalCache + + // fullUserCache if enabled, caches the online status of all users using mapCache; + // otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache. + fullUserCache bool + + lruCache lru.LRU[string, []int32] + mapCache *cacheutil.Cache[string, []int32] + + Lock *sync.Mutex + Cond *sync.Cond + CurrentPhase atomic.Uint32 +} + +func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) { + log.ZDebug(ctx, "init users online status begin") + + var ( + totalSet atomic.Int64 + maxTries = 5 + retryInterval = time.Second * 5 + + resp *user.GetAllOnlineUsersResp + ) + + defer func(t time.Time) { + log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load()) + o.CurrentPhase.Store(DoOnlineStatusOver) + o.Cond.Broadcast() + }(time.Now()) + + retryOperation := func(operation func() error, operationName string) error { + for i := 0; i < maxTries; i++ { + if err = operation(); err != nil { + log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err) + time.Sleep(retryInterval) + } else { + return nil + } + } + return err + } + + cursor := uint64(0) + for resp == nil || resp.NextCursor != 0 { + if err = retryOperation(func() error { + resp, err = o.user.GetAllOnlineUsers(ctx, cursor) if err != nil { - log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) - continue + return err + } + + for _, u := range resp.StatusList { + if u.Status == constant.Online { + o.setUserOnline(u.UserID, u.PlatformIDs) + } + totalSet.Add(1) } - storageCache := x.setUserOnline(userID, platformIDs) - log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) + cursor = resp.NextCursor + return nil + }, "getAllOnlineUsers"); err != nil { + return err + } + } + + return nil +} + +func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) { + o.Lock.Lock() + ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() + for o.CurrentPhase.Load() < DoOnlineStatusOver { + o.Cond.Wait() + } + o.Lock.Unlock() + log.ZInfo(ctx, "begin doSubscribe") + + doMessage := func(message *redis.Message) { + userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) + if err != nil { + log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) + return + } + log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs) + switch o.fullUserCache { + case true: + if len(platformIDs) == 0 { + // offline + o.mapCache.Delete(userID) + } else { + o.mapCache.Store(userID, platformIDs) + } + case false: + storageCache := o.setHasUserOnline(userID, platformIDs) + log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) if fn != nil { fn(ctx, userID, platformIDs) } } - }() - return x -} + } -type OnlineCache struct { - user rpcclient.UserRpcClient - group *GroupLocalCache - local lru.LRU[string, []int32] + if o.CurrentPhase.Load() == DoOnlineStatusOver { + for done := false; !done; { + select { + case message := <-ch: + doMessage(message) + default: + o.CurrentPhase.Store(DoSubscribeOver) + o.Cond.Broadcast() + done = true + } + } + } + + for message := range ch { + doMessage(message) + } } func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { - platformIDs, err := o.local.Get(userID, func() ([]int32, error) { + platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) { return o.user.GetUserOnlinePlatform(ctx, userID) }) if err != nil { log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID) return nil, err } - log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) + //log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) return platformIDs, nil } @@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) return platformIDs, nil } +// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) { +// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs) +// if err != nil { +// return nil, err +// } +// tmp := make([]int32, len(platformIDs)) +// copy(tmp, platformIDs) +// return platformIDs, nil +// } + func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) { platformIDs, err := o.getUserOnlinePlatform(ctx, userID) if err != nil { @@ -77,10 +224,68 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e return len(platformIDs) > 0, nil } +func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) { + platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) { + platformIDsMap := make(map[string][]int32) + + usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers) + if err != nil { + return nil, err + } + + for _, u := range usersStatus { + platformIDsMap[u.UserID] = u.PlatformIDs + } + + return platformIDsMap, nil + }) + if err != nil { + log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs) + return nil, err + } + return platformIDsMap, nil +} + +func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) { + t := time.Now() + + var ( + onlineUserIDs = make([]string, 0, len(userIDs)) + offlineUserIDs = make([]string, 0, len(userIDs)) + ) + + switch o.fullUserCache { + case true: + for _, userID := range userIDs { + if _, ok := o.mapCache.Load(userID); ok { + onlineUserIDs = append(onlineUserIDs, userID) + } else { + offlineUserIDs = append(offlineUserIDs, userID) + } + } + case false: + userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs) + if err != nil { + return nil, nil, err + } + + for key, value := range userOnlineMap { + if len(value) > 0 { + onlineUserIDs = append(onlineUserIDs, key) + } else { + offlineUserIDs = append(offlineUserIDs, key) + } + } + } + + log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t)) + return userIDs, offlineUserIDs, nil +} + //func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) { // onlineUserIDs := make([]string, 0, len(userIDs)) // for _, userID := range userIDs { -// online, err := o.GetUserOnline(ctx, userID) +// online, err := o.GetUserOnline(ctx, userID) // if err != nil { // return nil, err // } @@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e // return onlineUserIDs, nil //} -func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool { - return o.local.SetHas(userID, platformIDs) +func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) { + switch o.fullUserCache { + case true: + o.mapCache.Store(userID, platformIDs) + case false: + o.lruCache.Set(userID, platformIDs) + } +} + +func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool { + return o.lruCache.SetHas(userID, platformIDs) } diff --git a/pkg/rpcclient/conversation.go b/pkg/rpcclient/conversation.go index 8f95f86a6c..ccca856194 100644 --- a/pkg/rpcclient/conversation.go +++ b/pkg/rpcclient/conversation.go @@ -77,6 +77,11 @@ func (c *ConversationRpcClient) SetConversationMaxSeq(ctx context.Context, owner return err } +func (c *ConversationRpcClient) SetConversationMinSeq(ctx context.Context, ownerUserIDs []string, conversationID string, minSeq int64) error { + _, err := c.Client.SetConversationMinSeq(ctx, &pbconversation.SetConversationMinSeqReq{OwnerUserID: ownerUserIDs, ConversationID: conversationID, MinSeq: minSeq}) + return err +} + func (c *ConversationRpcClient) SetConversations(ctx context.Context, userIDs []string, conversation *pbconversation.ConversationReq) error { _, err := c.Client.SetConversations(ctx, &pbconversation.SetConversationsReq{UserIDs: userIDs, Conversation: conversation}) return err diff --git a/pkg/rpcclient/msg.go b/pkg/rpcclient/msg.go index 124cc49af3..9b26a7abda 100644 --- a/pkg/rpcclient/msg.go +++ b/pkg/rpcclient/msg.go @@ -17,21 +17,22 @@ package rpcclient import ( "context" "encoding/json" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/log" - "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mq/memamq" "github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/utils/idutil" "github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/timeutil" - "google.golang.org/grpc" - "google.golang.org/protobuf/proto" - "time" ) func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig { @@ -159,6 +160,15 @@ func (m *MessageRpcClient) SendMsg(ctx context.Context, req *msg.SendMsgReq) (*m return resp, nil } +// SetUserConversationsMinSeq set min seq +func (m *MessageRpcClient) SetUserConversationsMinSeq(ctx context.Context, req *msg.SetUserConversationsMinSeqReq) (*msg.SetUserConversationsMinSeqResp, error) { + resp, err := m.Client.SetUserConversationsMinSeq(ctx, req) + if err != nil { + return nil, err + } + return resp, nil +} + // GetMaxSeq retrieves the maximum sequence number from the gRPC client. // Errors during the gRPC call are wrapped to provide additional context. func (m *MessageRpcClient) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { @@ -174,6 +184,9 @@ func (m *MessageRpcClient) GetMaxSeqs(ctx context.Context, conversationIDs []str resp, err := m.Client.GetMaxSeqs(ctx, &msg.GetMaxSeqsReq{ ConversationIDs: conversationIDs, }) + if err != nil { + return nil, err + } return resp.MaxSeqs, err } @@ -182,6 +195,9 @@ func (m *MessageRpcClient) GetHasReadSeqs(ctx context.Context, userID string, co UserID: userID, ConversationIDs: conversationIDs, }) + if err != nil { + return nil, err + } return resp.MaxSeqs, err } @@ -190,6 +206,9 @@ func (m *MessageRpcClient) GetMsgByConversationIDs(ctx context.Context, docIDs [ ConversationIDs: docIDs, MaxSeqs: seqs, }) + if err != nil { + return nil, err + } return resp.MsgDatas, err } @@ -204,6 +223,19 @@ func (m *MessageRpcClient) PullMessageBySeqList(ctx context.Context, req *sdkws. return resp, nil } +func (m *MessageRpcClient) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *msg.GetConversationsHasReadAndMaxSeqReq) (*msg.GetConversationsHasReadAndMaxSeqResp, error) { + resp, err := m.Client.GetConversationsHasReadAndMaxSeq(ctx, req) + if err != nil { + // Wrap the error to provide more context if the gRPC call fails. + return nil, err + } + return resp, nil +} + +func (m *MessageRpcClient) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) { + return m.Client.GetSeqMessage(ctx, req) +} + func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID}) if err != nil { @@ -252,8 +284,8 @@ func WithUserRpcClient(userRpcClient *UserRpcClient) NotificationSenderOptions { } const ( - notificationWorkerCount = 2 - notificationBufferSize = 200 + notificationWorkerCount = 16 + notificationBufferSize = 1024 * 1024 * 2 ) func NewNotificationSender(conf *config.Notification, opts ...NotificationSenderOptions) *NotificationSender { @@ -280,7 +312,8 @@ func WithRpcGetUserName() NotificationOptions { } func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { - ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)}) + //ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)}) + ctx = context.WithoutCancel(ctx) ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(5)) defer cancel() n := sdkws.NotificationElem{Detail: jsonutil.StructToJsonString(m)} @@ -337,7 +370,9 @@ func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, co } func (s *NotificationSender) NotificationWithSessionType(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { - s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }) + if err := s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }); err != nil { + log.ZWarn(ctx, "Push to queue failed", err, "sendID", sendID, "recvID", recvID, "msg", jsonutil.StructToJsonString(m)) + } } func (s *NotificationSender) Notification(ctx context.Context, sendID, recvID string, contentType int32, m proto.Message, opts ...NotificationOptions) { diff --git a/pkg/rpcclient/user.go b/pkg/rpcclient/user.go index eabe77b942..375cc993cb 100644 --- a/pkg/rpcclient/user.go +++ b/pkg/rpcclient/user.go @@ -169,6 +169,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error { return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID) } +// GetAllUserID retrieves all user IDs with pagination options. +func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) { + resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) + if err != nil { + return nil, err + } + return resp, nil +} + // GetAllUserIDs retrieves all user IDs with pagination options. func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) @@ -215,3 +224,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string } return resp[0].PlatformIDs, nil } + +func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) { + return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor}) +} diff --git a/pkg/util/conversationutil/conversationutil.go b/pkg/util/conversationutil/conversationutil.go index 5683d8df89..f0a44ab1e1 100644 --- a/pkg/util/conversationutil/conversationutil.go +++ b/pkg/util/conversationutil/conversationutil.go @@ -19,6 +19,14 @@ func GenGroupConversationID(groupID string) string { return "sg_" + groupID } +func IsGroupConversationID(conversationID string) bool { + return strings.HasPrefix(conversationID, "sg_") +} + +func IsNotificationConversationID(conversationID string) bool { + return strings.HasPrefix(conversationID, "n_") +} + func GenConversationUniqueKeyForSingle(sendID, recvID string) string { l := []string{sendID, recvID} sort.Strings(l) diff --git a/scripts/create-topic.sh b/scripts/create-topic.sh index 206075fb83..bbc739287f 100755 --- a/scripts/create-topic.sh +++ b/scripts/create-topic.sh @@ -35,7 +35,7 @@ done echo "Kafka is ready. Creating topics..." -topics=("toRedis" "toMongo" "toPush") +topics=("toRedis" "toMongo" "toPush" "toOfflinePush") partitions=8 replicationFactor=1 diff --git a/start-config.yml b/start-config.yml index 21436d7a9a..1231b5d0d4 100644 --- a/start-config.yml +++ b/start-config.yml @@ -3,8 +3,8 @@ serviceBinaries: openim-crontask: 1 openim-rpc-user: 1 openim-msggateway: 1 - openim-push: 1 - openim-msgtransfer: 4 + openim-push: 8 + openim-msgtransfer: 8 openim-rpc-conversation: 1 openim-rpc-auth: 1 openim-rpc-group: 1 diff --git a/tools/check-component/main.go b/tools/check-component/main.go index 5fa84ac36f..4f4c08c16a 100644 --- a/tools/check-component/main.go +++ b/tools/check-component/main.go @@ -18,6 +18,12 @@ import ( "context" "flag" "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/cmd" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/tools/db/mongoutil" @@ -27,11 +33,6 @@ import ( "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/system/program" - "io/ioutil" - "log" - "os" - "path/filepath" - "time" ) const maxRetry = 180 @@ -65,7 +66,7 @@ func CheckMinIO(ctx context.Context, config *config.Minio) error { } func CheckKafka(ctx context.Context, conf *config.Kafka) error { - return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic}) + return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic, conf.ToOfflinePushTopic}) } func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka, *config.Minio, *config.Discovery, error) { diff --git a/tools/url2im/pkg/manage.go b/tools/url2im/pkg/manage.go index 5e1626da9f..3664baa25b 100644 --- a/tools/url2im/pkg/manage.go +++ b/tools/url2im/pkg/manage.go @@ -234,7 +234,7 @@ func (m *Manage) RunTask(ctx context.Context, task Task) (string, error) { } for i, currentPartSize := range part.PartSizes { md5Reader := NewMd5Reader(io.LimitReader(reader, currentPartSize)) - if m.doPut(ctx, m.api.Client, initiateMultipartUploadResp.Upload.Sign, uploadParts[i], md5Reader, currentPartSize); err != nil { + if err := m.doPut(ctx, m.api.Client, initiateMultipartUploadResp.Upload.Sign, uploadParts[i], md5Reader, currentPartSize); err != nil { return "", err } if md5val := md5Reader.Md5(); md5val != part.PartMd5s[i] {