diff --git a/.env b/.env index a71332d5ee..0c02bf9f6b 100644 --- a/.env +++ b/.env @@ -8,12 +8,12 @@ PROMETHEUS_IMAGE=prom/prometheus:v2.45.6 ALERTMANAGER_IMAGE=prom/alertmanager:v0.27.0 GRAFANA_IMAGE=grafana/grafana:11.0.1 -OPENIM_WEB_FRONT_IMAGE=openim/openim-web-front:release-v3.5.1 -OPENIM_ADMIN_FRONT_IMAGE=openim/openim-admin-front:release-v1.7 +OPENIM_WEB_FRONT_IMAGE=openim/openim-web-front:release-v3.8.1 +OPENIM_ADMIN_FRONT_IMAGE=openim/openim-admin-front:release-v1.8.2 #FRONT_IMAGE: use aliyun images -#OPENIM_WEB_FRONT_IMAGE=registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-web-front:release-v3.5.1 -#OPENIM_ADMIN_FRONT_IMAGE=registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-admin-front:release-v1.7 +#OPENIM_WEB_FRONT_IMAGE=registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-web-front:release-v3.8.1 +#OPENIM_ADMIN_FRONT_IMAGE=registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-admin-front:release-v1.8.2 DATA_DIR=./ diff --git a/.github/.codecov.yml b/.github/.codecov.yml index fab584a316..c14ba471f5 100644 --- a/.github/.codecov.yml +++ b/.github/.codecov.yml @@ -1,17 +1,3 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - coverage: status: project: @@ -28,6 +14,7 @@ coverage: paths: - test/* # only include coverage in "test/" folder informational: true # Always pass check + # internal: # declare a new status context "internal" # paths: # - internal/* # only include coverage in "internal/" folder diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000000..2158804b76 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,65 @@ +name: Bug Report +title: "[BUG] " +labels: ["bug"] +description: "Create a detailed report to help us identify and resolve issues." +# assignees: [] + +body: + - type: markdown + attributes: + value: "Thank you for taking the time to fill out the bug report. Please provide as much information as possible to help us understand and replicate the bug." + + - type: input + id: openim-server-version + attributes: + label: OpenIM Server Version + description: "Please provide the version number of OpenIM Server you are using." + placeholder: "e.g., 3.8.0" + validations: + required: true + + - type: dropdown + id: operating-system + attributes: + label: Operating System and CPU Architecture + description: "Please select the operating system and describe the CPU architecture." + options: + - Linux (AMD) + - Linux (ARM) + - Windows (AMD) + - Windows (ARM) + - macOS (AMD) + - macOS (ARM) + validations: + required: true + + - type: dropdown + id: deployment-method + attributes: + label: Deployment Method + description: "Please specify how OpenIM Server was deployed." + options: + - Source Code Deployment + - Docker Deployment + validations: + required: true + + - type: textarea + id: bug-description-reproduction + attributes: + label: Bug Description and Steps to Reproduce + description: "Provide a detailed description of the bug and a step-by-step guide on how to reproduce it." + placeholder: "Describe the bug in detail here...\n\nSteps to reproduce the bug on the server:\n1. Start the server with specific configurations (mention any relevant config details).\n2. Make an API call to '...' endpoint with the following payload '...'.\n3. Observe the behavior and note any error messages or logs.\n4. Mention any additional setup relevant to the bug (e.g., database version, external service dependencies)." + validations: + required: true + + - type: markdown + attributes: + value: "If possible, please add screenshots to help explain your problem." + + - type: textarea + id: screenshots-link + attributes: + label: Screenshots Link + description: "If applicable, please provide any links to screenshots here." + placeholder: "Paste your screenshot URL here, e.g., http://imgur.com/example" \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..deb8990831 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + # - name: "Bug Report" + # description: "Report a bug in the project" + # file: "bug-report.yml" + - name: πŸ“’ Connect on slack + url: https://join.slack.com/t/openimsdk/shared_invite/zt-1tmoj26uf-_FDy3dowVHBiGvLk9e5Xkg + about: Support OpenIM-related requests or issues, get in touch with developers and help on slack + - name: 🌐 OpenIM Blog + url: https://www.openim.io/ + about: Open the OpenIM community blog diff --git a/.github/ISSUE_TEMPLATE/deployment.yml b/.github/ISSUE_TEMPLATE/deployment.yml new file mode 100644 index 0000000000..8df6243228 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/deployment.yml @@ -0,0 +1,65 @@ +name: Deployment issue +title: "[Deployment] " +labels: ["deployment"] +description: "Create a detailed report to help us identify and resolve deployment issues." +# assignees: [] + +body: + - type: markdown + attributes: + value: "Thank you for taking the time to fill out the deployment issue report. Please provide as much information as possible to help us understand and resolve the issue." + + - type: input + id: openim-server-version + attributes: + label: OpenIM Server Version + description: "Please provide the version number of OpenIM Server you are using." + placeholder: "e.g., 3.8.0" + validations: + required: true + + - type: dropdown + id: operating-system + attributes: + label: Operating System and CPU Architecture + description: "Please select the operating system and describe the CPU architecture." + options: + - Linux (AMD) + - Linux (ARM) + - Windows (AMD) + - Windows (ARM) + - macOS (AMD) + - macOS (ARM) + validations: + required: true + + - type: dropdown + id: deployment-method + attributes: + label: Deployment Method + description: "Please specify how OpenIM Server was deployed." + options: + - Source Code Deployment + - Docker Deployment + validations: + required: true + + - type: textarea + id: issue-description-reproduction + attributes: + label: Issue Description and Steps to Reproduce + description: "Provide a detailed description of the issue and a step-by-step guide on how to reproduce it." + placeholder: "Describe the issue in detail here...\n\nSteps to reproduce the issue on the server:\n1. Start the server with specific configurations (mention any relevant config details).\n2. Make an API call to '...' endpoint with the following payload '...'.\n3. Observe the behavior and note any error messages or logs.\n4. Mention any additional setup relevant to the bug (e.g., database version, external service dependencies)." + validations: + required: true + + - type: markdown + attributes: + value: "If possible, please add screenshots to help explain your problem." + + - type: textarea + id: screenshots-link + attributes: + label: Screenshots Link + description: "If applicable, please provide any links to screenshots here." + placeholder: "Paste your screenshot URL here, e.g., http://imgur.com/example" \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000..e6f751e4eb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,20 @@ +--- +name: Documentation Update +about: Propose updates to documentation, including README files and other docs. +title: "[DOC]: " # Prefix for the title to help identify documentation issues +labels: documentation # Labels to be automatically added +assignees: '' # Optionally, specify maintainers or teams to be auto-assigned + +--- + +## Documentation Updates +Describe the documentation that needs to be updated or corrected. Please specify the files and sections if possible. + +## Motivation +Explain why these updates are necessary. What is missing, misleading, or outdated? + +## Suggested Changes +Detail the changes that you propose. If you are suggesting large changes, include examples or mockups of what the updated documentation should look like. + +## Additional Information +Include any other information that might be relevant, such as links to discussions or related issues in the repository. diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000000..18a96a965c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,43 @@ +name: Feature Request +title: "[FEATURE REQUEST] " +labels: ["feature request","enhancement"] +description: "Propose a new feature or improvement that you believe will help enhance the project." +# assignees: [] + +body: + - type: markdown + attributes: + value: "Thank you for taking the time to propose a feature request. Please fill in as much detail as possible to help us understand why this feature is necessary and how it should work." + + - type: textarea + id: feature-reason + attributes: + label: Why this feature? + description: "Explain why this feature is needed. What problem does it solve? How does it benefit the project and its users?" + placeholder: "Describe the need for this feature..." + validations: + required: true + + - type: textarea + id: solution-proposal + attributes: + label: Suggested Solution + description: "Describe your proposed solution for this feature. How do you envision it working?" + placeholder: "Detail your solution here..." + validations: + required: true + + - type: markdown + attributes: + value: "Please provide any other relevant information or screenshots that could help illustrate your idea." + + - type: textarea + id: additional-info + attributes: + label: Additional Information + description: "Include any additional information, links, or screenshots that might be relevant to your feature request." + placeholder: "Add more context or links to relevant resources..." + + - type: markdown + attributes: + value: "Thank you for contributing to the project! We appreciate your input and will review your suggestion as soon as possible." diff --git a/.github/ISSUE_TEMPLATE/other.yml b/.github/ISSUE_TEMPLATE/other.yml new file mode 100644 index 0000000000..025440229a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/other.yml @@ -0,0 +1,30 @@ +name: 🐧 Other +description: Use this for any other issues. Please do NOT create blank issues +title: "[Other]: " +labels: ["other"] +# assignees: [] + + +body: + - type: markdown + attributes: + value: "# Other issue" + - type: textarea + id: issuedescription + attributes: + label: What would you like to share? + description: Provide a clear and concise explanation of your issue. + validations: + required: true + - type: textarea + id: extrainfo + attributes: + label: Additional information + description: Is there anything else we should know about this issue? + validations: + required: false + - type: markdown + attributes: + value: | + You can also join our Discord community [here](https://join.slack.com/t/openimsdk/shared_invite/zt-1tmoj26uf-_FDy3dowVHBiGvLk9e5Xkg) + Feel free to check out other cool repositories of the openim Community [here](https://github.com/openimsdk) diff --git a/.github/ISSUE_TEMPLATE/rfc.md b/.github/ISSUE_TEMPLATE/rfc.md new file mode 100644 index 0000000000..760d89e534 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/rfc.md @@ -0,0 +1,26 @@ +--- +name: RFC - Feature Proposal +about: Submit a proposal for a significant feature to invite community discussion. +title: "[RFC]: " # Prefix for the title to help identify RFC proposals +labels: rfc, proposal # Labels to be automatically added +assignees: '' # Optionally, specify maintainers or teams to be auto-assigned + +--- + +## Proposal Overview +Briefly describe the content and objectives of your proposal. + +## Motivation +Why is this new feature necessary? What is the background of this problem? + +## Detailed Design +Describe the technical details of the proposal, including implementation steps, code snippets, or architecture diagrams. + +## Alternatives Considered +Have other alternatives been considered? Why is this approach preferred over others? + +## Impact +How will this proposal affect existing practices and community users? + +## Additional Information +Include any other relevant information such as related discussions, prior related work, etc. diff --git a/.github/code-language-detector.yml b/.github/code-language-detector.yml deleted file mode 100644 index 194c2474ad..0000000000 --- a/.github/code-language-detector.yml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright Β© 2024 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# https://github.com/marketplace/actions/code-language-detector -directory: ./ -file_types: - - .go - - .yaml - - .yml -languages: - - Chinese \ No newline at end of file diff --git a/.github/labels.yml b/.github/labels.yml deleted file mode 100644 index b85a824b42..0000000000 --- a/.github/labels.yml +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Refer to Kubernetes for size/* Settings -# https://github.com/Kubernetes/Kubernetes -XS: - name: size/XS - lines: 0 - color: 3CBF00 -S: - name: size/S - lines: 10 - color: 5D9801 -M: - name: size/M - lines: 30 - color: 7F7203 -L: - name: size/L - lines: 100 - color: A14C05 -XL: - name: size/XL - lines: 500 - color: C32607 -XXL: - name: size/XXL - lines: 1000 - color: E50009 - comment: | - # Whoa! Easy there, Partner! - This PR is too big. Please break it up into smaller PRs. \ No newline at end of file diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml deleted file mode 100644 index 55ee241d7b..0000000000 --- a/.github/release-drafter.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name-template: 'v$RESOLVED_VERSION 🌈' -tag-template: 'v$RESOLVED_VERSION' -categories: - - title: 'πŸš€ Features' - labels: - - 'feature' - - 'enhancement' - - title: 'πŸ› Bug Fixes' - labels: - - 'kind/fix' - - 'kind/feature' - - 'enhancement' - - 'kind/documentation' - - 'good first issue' - - title: '🧰 Maintenance' - label: 'chore' -change-template: '- $TITLE @$AUTHOR (#$NUMBER)' -change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks. -version-resolver: - major: - labels: - - 'major' - minor: - labels: - - 'minor' - patch: - labels: - - 'patch' - default: patch -template: | - ## Changes $PREVIOUS_TAG - - $CHANGES - - ## Contributors to this $REPOSITORY release - - $CONTRIBUTORS diff --git a/.github/standardizer.yml b/.github/standardizer.yml deleted file mode 100644 index fceb69df10..0000000000 --- a/.github/standardizer.yml +++ /dev/null @@ -1,50 +0,0 @@ -# https://github.com/marketplace/actions/conformity-checker-for-project -baseConfig: - searchDirectory: "./" - ignoreCase: false - -directoryNaming: - allowHyphens: true - allowUnderscores: false - mustBeLowercase: true - -fileNaming: - allowHyphens: true - allowUnderscores: true - mustBeLowercase: true - -ignoreFormats: - - "\\.log$" - - "\\.env$" - - "README\\.md$" - - "_test\\.go$" - - "\\.md$" - - _test\\.txt$ - - LICENSE - - Dockerfile - - CODEOWNERS - - Makefile - -ignoreDirectories: - - "vendor" - - ".git" - - "deployments" - - "node_modules" - - "logs" - - "CHANGELOG" - - "components" - - "_output" - - "tools/openim-web" - - "CHANGELOG" - - "examples/Test_directory" - - test/testdata - -fileTypeSpecificNaming: - ".yaml": - allowHyphens: true - allowUnderscores: false - mustBeLowercase: true - ".go": - allowHyphens: false - allowUnderscores: true - mustBeLowercase: true \ No newline at end of file diff --git a/.github/sync-release.yml b/.github/sync-release.yml index 3800b4c24b..18a80fda2c 100644 --- a/.github/sync-release.yml +++ b/.github/sync-release.yml @@ -1,18 +1,4 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OpenIMSDK/openim-docker: +openimsdk/openim-docker: - source: ./config dest: ./openim-server/release/config replace: true diff --git a/.github/sync.yml b/.github/sync.yml deleted file mode 100644 index ee667d415f..0000000000 --- a/.github/sync.yml +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# https://github.com/BetaHuhn/repo-file-sync-action -# Synchronization for the.github repository -OpenIMSDK/.github: - - source: LICENSE - dest: LICENSE - - source: scripts/LICENSE/ - dest: scripts/LICENSE/ - replace: false - -OpenIMSDK/community: - - source: LICENSE - dest: LICENSE - - source: scripts/LICENSE/ - dest: scripts/LICENSE/ - replace: false - - source: .github/workflows/ - dest: .github/workflows/ - -OpenIMSDK/openim-sdk-core: - - source: LICENSE - dest: LICENSE - - source: scripts/LICENSE/ - dest: scripts/LICENSE/ - replace: false - - source: .github/workflows/issue-robot.yml - dest: .github/workflows/issue-robot.yml - replace: false - - source: .github/workflows/stale.yml - dest: .github/workflows/stale.yml - replace: false - - source: .github/.codecov.yml - dest: .github/.codecov.yml - replace: false - -OpenIMSDK/OpenIM-Docs: - - source: .github/workflows/ - dest: .github/workflows/ - exclude: | - e2e-test.yml - sync.yml - - source: scripts/githooks/ - dest: scripts/githooks/ - replace: true - - source: .github/.codecov.yml - dest: .github/.codecov.yml - replace: false - -OpenIMSDK/OpenKF: - - source: LICENSE - dest: LICENSE - - source: scripts/LICENSE/ - dest: scripts/LICENSE/ - replace: false - - source: .github/workflows/issue-robot.yml - dest: .github/workflows/issue-robot.yml - replace: false - - source: .github/workflows/stale.yml - dest: .github/workflows/stale.yml - replace: false - - source: .github/.codecov.yml - dest: .github/.codecov.yml - replace: false - -OpenIMSDK/openim-docker: - - source: ./config - dest: ./openim-server/main/config - replace: true - - source: ./docs - dest: ./openim-server/main/docs - replace: true - - source: ./scripts - dest: ./openim-server/main/scripts - replace: true - - source: ./scripts - dest: ./scripts - replace: true - - source: ./Makefile - dest: ./Makefile - replace: true - -group: - # first group:common to all warehouses - # TODO: add the required warehouse here - - repos: | - OpenIMSDK/OpenKF@main - OpenIMSDK/openim-miniprogram-demo@main - OpenIMSDK/docs - OpenIMSDK/chat - OpenIMSDK/community - OpenIMSDK/openim-charts - OpenIMSDK/openim-sdk-cpp@main - files: - - source: LICENSE - dest: LICENSE - replace: false - - source: .github/workflows/issue-robot.yml - dest: .github/workflows/issue-robot.yml - replace: false - - source: .github/workflows/stale.yml - dest: .github/workflows/stale.yml - replace: false - - source: .github/workflows/project-progress.yml - dest: .github/workflows/project-progress.yml - replace: false - - source: .github/workflows/help-comment-issue.yml - dest: .github/workflows/help-comment-issue.yml - replace: false - - source: .github/.codecov.yml - dest: .github/.codecov.yml - replace: false - - source: .github/workflows/cla.yml - dest: .github/workflows/cla.yml - replace: false - - source: .github/workflows/auto-assign-issue.yml - dest: .github/workflows/auto-assign-issue.yml - replace: false - - source: .github/workflows/release.yml - dest: .github/workflows/release.yml - replace: false - - source: ./scripts/githooks/ - dest: ./scripts/githooks/ - replace: true diff --git a/.github/weekly-digest.yml b/.github/weekly-digest.yml deleted file mode 100644 index fb3614ad81..0000000000 --- a/.github/weekly-digest.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# https://github.com/apps/weekly-digest/installations/new -publishDay: sun -canPublishIssues: true -canPublishPullRequests: true -canPublishContributors: true -canPublishStargazers: true -canPublishCommits: true \ No newline at end of file diff --git a/.github/workflows/auto-assign-issue.yml b/.github/workflows/auto-assign-issue.yml index d92fc968cb..320174d8c9 100644 --- a/.github/workflows/auto-assign-issue.yml +++ b/.github/workflows/auto-assign-issue.yml @@ -1,17 +1,3 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - name: Assign issue to comment author on: issue_comment: @@ -20,8 +6,7 @@ jobs: assign-issue: if: | contains(github.event.comment.body, '/assign') || contains(github.event.comment.body, '/accept') && - !contains(github.event.comment.user.login, 'openimbot') && - !contains(github.event.comment.user.login, 'kubbot') + !contains(github.event.comment.user.login, 'openim-robot') runs-on: ubuntu-latest permissions: issues: write @@ -33,11 +18,12 @@ jobs: run: | export LETASE_MILESTONES=$(curl 'https://api.github.com/repos/$OWNER/$PEPO/milestones' | jq -r 'last(.[]).title') gh issue edit ${{ github.event.issue.number }} --add-assignee "${{ github.event.comment.user.login }}" - gh issue edit ${{ github.event.issue.number }} --add-label "triage/accepted" - gh issue edit ${{ github.event.issue.number }} --milestone "$LETASE_MILESTONES" + gh issue edit ${{ github.event.issue.number }} --add-label "accepted" gh issue comment $ISSUE --body "@${{ github.event.comment.user.login }} Glad to see you accepted this issue🀲, this issue has been assigned to you. I set the milestones for this issue to [$LETASE_MILESTONES](https://github.com/$OWNER/$PEPO/milestones), We are looking forward to your PR!" + + # gh issue edit ${{ github.event.issue.number }} --milestone "$LETASE_MILESTONES" env: - GH_TOKEN: ${{ secrets.REDBOT_GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets.BOT_TOKEN }} ISSUE: ${{ github.event.issue.html_url }} OWNER: ${{ github.repository_owner }} - REPO: ${{ github.event.repository.name }} \ No newline at end of file + REPO: ${{ github.event.repository.name }} diff --git a/.github/workflows/auto-gh-pr.yml b/.github/workflows/auto-gh-pr.yml deleted file mode 100644 index 45454275ed..0000000000 --- a/.github/workflows/auto-gh-pr.yml +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Auto PR to release - -on: - pull_request: - # types: - # - closed - issue_comment: - types: [created] - pull_request_review_comment: - types: [created] - -jobs: - sync-issue-to-pr: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Sync Issue to PR - if: github.event_name == 'pull_request' && github.event.pull_request.base.ref == 'main' - run: | - PR_BODY="${{ github.event.pull_request.body }}" - - ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oP 'Fixes #\K\d+') - if [[ -z "$ISSUE_NUMBER" ]]; then - echo "No Issue number found." - exit 1 - fi - - echo "Issue number found: $ISSUE_NUMBER" - - # Using GitHub CLI to get issue details - gh issue view "$ISSUE_NUMBER" --repo "${{ github.repository }}" --json labels,assignees,milestone,title > issue_data.json - - # Check if jq is installed - if ! command -v jq &> /dev/null; then - echo "Installing jq..." - sudo apt-get install -y jq - fi - - # Parse data with jq - LABELS=$(jq -r '.labels | map(.name) | join(",")' issue_data.json) - ASSIGNEES=$(jq -r '.assignees | map(.login) | join(",")' issue_data.json) - MILESTONE=$(jq -r '.milestone.title' issue_data.json) - - # Check if any of the fields are empty and set them to None - LABELS=${LABELS:-None} - ASSIGNEES=${ASSIGNEES:-None} - MILESTONE=${MILESTONE:-None} - - # Edit the PR with issue details, handling empty fields - gh pr edit "${{ github.event.pull_request.number }}" --repo "${{ github.repository }}" \ - ${LABELS:+--add-label "$LABELS"} \ - ${ASSIGNEES:+--add-assignee "$ASSIGNEES"} \ - ${MILESTONE:+--milestone "$MILESTONE"} - continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} diff --git a/.github/workflows/auto-invite.yml b/.github/workflows/auto-invite-comment.yml similarity index 74% rename from .github/workflows/auto-invite.yml rename to .github/workflows/auto-invite-comment.yml index 350de30abc..76fbcdfd33 100644 --- a/.github/workflows/auto-invite.yml +++ b/.github/workflows/auto-invite-comment.yml @@ -1,32 +1,18 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Invite users to join our group +name: Invite users to join OpenIM Community. on: issue_comment: types: - created jobs: issue_comment: - name: Invite users to join our group + name: Invite users to join OpenIM Community if: ${{ github.event.comment.body == '/invite' || github.event.comment.body == '/close' || github.event.comment.body == '/comment' }} runs-on: ubuntu-latest permissions: issues: write steps: - - name: Invite user to join our group + - name: Invite user to join OpenIM Community uses: peter-evans/create-or-update-comment@v4 with: token: ${{ secrets.BOT_GITHUB_TOKEN }} @@ -43,11 +29,11 @@ jobs: + Read our [blog](https://doc.rentsoft.cn/). Our blog is a great place to stay up-to-date with Open-IM-Server projects and trends. On the blog, we share our latest developments, tech trends, and other interesting information. + Add [Wechat](https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg) and indicate that you are a user or developer of Open-IM-Server. We will process your request as soon as possible. - - name: Close Issue - uses: peter-evans/close-issue@v3 - with: - token: ${{ secrets.BOT_GITHUB_TOKEN }} - issue-number: ${{ github.event.issue.number }} - comment: πŸ€– Auto-closing issue, if you still need help please reopen the issue or ask for help in the community above - labels: | - triage/accepted \ No newline at end of file + # - name: Close Issue + # uses: peter-evans/close-issue@v3 + # with: + # token: ${{ secrets.BOT_GITHUB_TOKEN }} + # issue-number: ${{ github.event.issue.number }} + # comment: πŸ€– Auto-closing issue, if you still need help please reopen the issue or ask for help in the community above + # labels: | + # accepted \ No newline at end of file diff --git a/.github/workflows/auto-tag.yml b/.github/workflows/auto-tag.yml deleted file mode 100644 index 3decba7abc..0000000000 --- a/.github/workflows/auto-tag.yml +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM Create Tag - -on: - issue_comment: - types: [created] - pull_request_review_comment: - types: [created] - -jobs: - create_tag: - runs-on: ubuntu-latest - if: startsWith(github.event.comment.body, '/create tag') - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Validate version number and get comment - id: validate - run: | - COMMENT="${{ github.event.comment.body }}" - VERSION=$(echo $COMMENT | cut -d ' ' -f 3) - TAG_COMMENT=$(echo $COMMENT | cut -d '"' -f 2) - if [[ $VERSION =~ ^v([0-9]+\.){2}[0-9]+$ ]]; then - echo "version=$VERSION" >> $GITHUB_STATE - echo "tag_comment=$TAG_COMMENT" >> $GITHUB_STATE - else - echo "Invalid version number." - exit 1 - fi - - - name: Create a new tag - env: - GH_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - run: | - source $GITHUB_STATE - git tag -a $VERSION -m "$tag_comment" - git push origin $VERSION - echo "tag_created=$VERSION" >> $GITHUB_OUTPUT diff --git a/.github/workflows/bot-auto-cherry-pick.yml b/.github/workflows/bot-auto-cherry-pick.yml deleted file mode 100644 index cdd7241e2c..0000000000 --- a/.github/workflows/bot-auto-cherry-pick.yml +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Github Rebot for Cherry Pick when PR is merged -on: - pull_request_target: - types: - - closed - -jobs: - comment: - runs-on: ubuntu-latest - steps: - - name: Comment cherry-pick command - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.BOT_GITHUB_TOKEN }} - script: | - const pr = context.payload.pull_request; - if (!pr.merged) { - console.log("PR is not merged. Skipping..."); - return; - } - if (!pr.milestone || !pr.milestone.title) { - console.log("Milestone is not set. Skipping..."); - return; - } - const milestone = pr.milestone.title; - const ref = `heads/release-${milestone}`; - let branchExists; - try { - await github.rest.git.getRef({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: ref - }); - branchExists = true; - } catch (error) { - if (error.status === 404) { - console.log(`Branch ${ref} does not exist. Skipping...`); - branchExists = false; - } else { - throw error; // Rethrow if it's another error - } - } - if (!branchExists) { - return; - } - const cherryPickCmd = `/cherry-pick release-${milestone}`; - console.log(`Adding comment: ${cherryPickCmd}`); - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: pr.number, - body: cherryPickCmd - }); \ No newline at end of file diff --git a/.github/workflows/bot-cherry-pick.yml b/.github/workflows/bot-cherry-pick.yml deleted file mode 100644 index 71597189c6..0000000000 --- a/.github/workflows/bot-cherry-pick.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Github Robot for Cherry Pick On Comment - -on: - issue_comment: - types: [created] - -jobs: - cherry-pick: - name: Cherry Pick - if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/cherry-pick') - runs-on: ubuntu-latest - - steps: - - name: Checkout the latest code - uses: actions/checkout@v4 - with: - token: ${{ secrets.BOT_GITHUB_TOKEN }} - fetch-depth: 0 # To ensure all history is available for cherry-picking - - - name: Automatic Cherry Pick - uses: vendoo/gha-cherry-pick@v1 - with: - # Assuming the cherry-pick commit SHA is passed in the comment like '/cherry-pick sha' - commit-sha: ${{ github.event.comment.body }} - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - - - name: Create a new branch for PR - run: | - PR_BRANCH="cherry-pick-${GITHUB_SHA}-to-${{ github.base_ref }}" - git checkout -b $PR_BRANCH - git push origin $PR_BRANCH - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - - - name: Create Pull Request - uses: actions/github-script@v5 - with: - script: | - const prTitle = "Cherry-pick to ${{ github.base_ref }}" - const prBody = "Automated cherry-pick of ${{ github.event.comment.body }}\n\n/cc @kubbot" - const base = "${{ github.base_ref }}" - const head = "cherry-pick-${{ github.sha }}-to-${{ github.base_ref }}" - const createPr = await github.rest.pulls.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title: prTitle, - body: prBody, - head: head, - base: base, - maintainer_can_modify: true, // Allows maintainers to edit the PR - }) - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml deleted file mode 100644 index d0b9dddbc4..0000000000 --- a/.github/workflows/build-docker-image.yml +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Publish Docker image - -on: - push: - branches: - - main - - release-* - paths-ignore: - - "docs/**" - - "README.md" - - "README_zh-CN.md" - - "**.md" - - "docs/**" - - "CONTRIBUTING.md" - tags: - - v* - pull_request: - types: [closed] - branches: - - main - - release-* - paths-ignore: - - "docs/**" - - "README.md" - - "README_zh-CN.md" - - "**.md" - - "docs/**" - - "CONTRIBUTING.md" - workflow_dispatch: - -env: - # Common versions - GO_VERSION: "1.20" - -jobs: - build-dockerhub: - runs-on: ubuntu-latest - if: ${{ !(github.event_name == 'pull_request' && github.event.pull_request.merged == false) }} - steps: - - name: Checkout main repository - uses: actions/checkout@v4 - with: - path: main-repo - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: ./main-repo - load: true - tags: "openim/openim-server:local" - - - name: Checkout compose repository - uses: actions/checkout@v4 - with: - repository: "openimsdk/openim-docker" - path: "compose-repo" - - - name: Get Internal IP Address - id: get-ip - run: | - IP=$(hostname -I | awk '{print $1}') - echo "The IP Address is: $IP" - echo "::set-output name=ip::$IP" - - - name: Update .env to use the local image - run: | - sed -i 's|OPENIM_SERVER_IMAGE=.*|OPENIM_SERVER_IMAGE=openim/openim-server:local|' ${{ github.workspace }}/compose-repo/.env - sed -i 's|MINIO_EXTERNAL_ADDRESS=.*|MINIO_EXTERNAL_ADDRESS=http://${{ steps.get-ip.outputs.ip }}:10005|' ${{ github.workspace }}/compose-repo/.env - - - name: Start services using Docker Compose - run: | - cd ${{ github.workspace }}/compose-repo - docker compose up -d - sleep 30 - - - name: Check openim-server health - run: | - timeout=300 - interval=30 - elapsed=0 - while [[ $elapsed -le $timeout ]]; do - if ! docker exec openim-server mage check; then - echo "openim-server is not ready, waiting..." - sleep $interval - elapsed=$(($elapsed + $interval)) - else - echo "Health check successful" - exit 0 - fi - done - echo "Health check failed after 5 minutes" - exit 1 - - - name: Check openim-chat health - if: success() - run: | - if ! docker exec openim-chat mage check; then - echo "openim-chat check failed" - exit 1 - else - echo "Health check successful" - exit 0 - fi - - # - name: Checkout e2e - # if: success() - # uses: actions/checkout@v4 - # with: - # repository: "openimsdk/test-e2e" - # path: e2e-repo - - # - name: Set up Python 3.9 - # uses: actions/setup-python@v4 - # with: - # python-version: '3.9' - - # - name: Install dependencies - # run: | - # sudo apt-get update - # sudo apt-get install -y xvfb libxi6 libgconf-2-4 - # cd ${{ github.workspace }}/e2e-repo - # pip install -r requirements.txt - - # - name: Run tests - # run: | - # cd ${{ github.workspace }}/e2e-repo - # xvfb-run --auto-servernum --server-args='-screen 0 1920x1080x24' pytest -v -s ./script - - - name: Extract metadata (tags, labels) for Docker - if: success() - id: meta - uses: docker/metadata-action@v5.5.1 - with: - images: | - openim/openim-server - ghcr.io/openimsdk/openim-server - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-server - # generate Docker tags based on the following events/attributes - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Log in to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Log in to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to Aliyun Container Registry - uses: docker/login-action@v2 - with: - registry: registry.cn-hangzhou.aliyuncs.com - username: ${{ secrets.ALIREGISTRY_USERNAME }} - password: ${{ secrets.ALIREGISTRY_TOKEN }} - - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: ./main-repo - push: true - # linux/ppc64le,linux/s390x - platforms: linux/amd64,linux/arm64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - diff --git a/.github/workflows/check-coverage.bak b/.github/workflows/check-coverage.bak deleted file mode 100644 index 09d43d7cdf..0000000000 --- a/.github/workflows/check-coverage.bak +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM Check Coverage - -on: - workflow_dispatch: - push: - branches: [ "main" ] - paths-ignore: - - "docs/**" - - "**/*.md" - - "**/*.yaml" - - "CONTRIBUTORS" - - "CHANGELOG/**" - pull_request: - branches: [ "*" ] - paths-ignore: - - "docs/**" - - "**/*.md" - - "**/*.yaml" - - "CONTRIBUTORS" - - "CHANGELOG/**" -env: - # Common versions - GO_VERSION: "1.20" - -jobs: - coverage: - runs-on: ubuntu-20.04 - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Golang with cache - uses: magnetikonline/action-golang-cache@v4 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Install Dependencies - run: sudo apt update && sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - - - name: Run Cover - run: make cover - continue-on-error: true - - - name: Upload Coverage to Codecov - uses: codecov/codecov-action@v4 diff --git a/.github/workflows/cla-assistant.yml b/.github/workflows/cla-assistant.yml new file mode 100644 index 0000000000..7d44b05eb4 --- /dev/null +++ b/.github/workflows/cla-assistant.yml @@ -0,0 +1,40 @@ +name: CLA Assistant +on: + issue_comment: + types: [created] + pull_request_target: + types: [opened,closed,synchronize] + +# explicitly configure permissions, in case your GITHUB_TOKEN workflow permissions are set to read-only in repository settings +permissions: + actions: write + contents: write # this can be 'read' if the signatures are in remote repository + pull-requests: write + statuses: write + +jobs: + CLA-Assistant: + runs-on: ubuntu-latest + steps: + - name: "CLA Assistant" + if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' + uses: contributor-assistant/github-action@v2.4.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ secrets.BOT_TOKEN }} + with: + path-to-signatures: 'signatures/cla.json' + path-to-document: 'https://github.com/OpenIM-Robot/cla/blob/main/README.md' # e.g. a CLA or a DCO document + branch: 'main' + allowlist: 'bot*,*bot,OpenIM-Robot' + + # the followings are the optional inputs - If the optional inputs are not given, then default values will be taken + remote-organization-name: OpenIM-Robot + remote-repository-name: cla + create-file-commit-message: 'Creating file for storing CLA Signatures' + # signed-commit-message: '$contributorName has signed the CLA in $owner/$repo#$pullRequestNo' + custom-notsigned-prcomment: 'πŸ’• Thank you for your contribution and please kindly read and sign our CLA. [CLA Docs](https://github.com/OpenIM-Robot/cla/blob/main/README.md)' + custom-pr-sign-comment: 'I have read the CLA Document and I hereby sign the CLA' + custom-allsigned-prcomment: 'πŸ€– All Contributors have signed the [CLA](https://github.com/OpenIM-Robot/cla/blob/main/README.md).
The signed information is recorded [**here**](https://github.com/OpenIM-Robot/cla/blob/main/signatures/cla.json)' + #lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true) + #use-dco-flag: true - If you are using DCO instead of CLA diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml deleted file mode 100644 index ae27c8a0a5..0000000000 --- a/.github/workflows/cla.yml +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM CLA Assistant -on: - issue_comment: - types: [created] - pull_request_target: - types: [opened,closed,synchronize] - -# explicitly configure permissions, in case your GITHUB_TOKEN workflow permissions are set to read-only in repository settings -permissions: - actions: write - contents: write - pull-requests: write - statuses: write - -env: - # Define Open-IM-Server variables here - OPEN_IM_SERVER_REMOTE_ORGANIZATION: openim-sigs - REMOTE_REPOSITORY: cla - OPEN_IM_SERVER_CLA_DOCUMENT: https://github.com/openim-sigs/cla/blob/main/README.md - OPEN_IM_SERVER_SIGNATURES_PATH: signatures/${{ github.event.repository.name }}/cla.json - - OPEN_IM_SERVER_ALLOWLIST: kubbot,openimbot,bot*,dependabot,sweep-ai,*bot,bot-*,bot/*,bot-/*,bot,*[bot] - -jobs: - CLAAssistant: - runs-on: ubuntu-latest - steps: - - name: "CLA Assistant" - if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.3.1 - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - PERSONAL_ACCESS_TOKEN: ${{ secrets.REDBOT_GITHUB_TOKEN }} - with: - path-to-signatures: ${{ env.OPEN_IM_SERVER_SIGNATURES_PATH }} - path-to-document: ${{ env.OPEN_IM_SERVER_CLA_DOCUMENT }} - branch: 'main' - allowlist: ${{ env.OPEN_IM_SERVER_ALLOWLIST }} - - remote-organization-name: ${{ env.OPEN_IM_SERVER_REMOTE_ORGANIZATION }} - remote-repository-name: ${{ env.REMOTE_REPOSITORY }} - - create-file-commit-message: 'πŸ“š Docs: Creating file for storing ${{ github.event.repository.name }} CLA Signatures' - custom-notsigned-prcomment: 'πŸ’• Thank you for your contribution and please kindly read and sign our [🎯https://github.com/openim-sigs/cla/blob/main/README.md](https://github.com/openim-sigs/cla/blob/main/README.md).
If you wish to sign the CRA, **Please copy and comment on the following sentence:**' - custom-pr-sign-comment: 'I have read the CLA Document and I hereby sign the CLA' - custom-allsigned-prcomment: 'πŸ€– All Contributors have signed the [${{ github.event.repository.name }} CLA](https://github.com/openim-sigs/cla/blob/main/README.md).
The signed information is recorded [πŸ€–here](https://github.com/openim-sigs/cla/tree/main/signatures/${{ github.event.repository.name }}/cla.json)' - # lock-pullrequest-aftermerge: false - if you don't want this bot to automatically lock the pull request after merging (default - true) - # use-dco-flag: true - If you are using DCO instead of CLA diff --git a/.github/workflows/code-language-detector.yml b/.github/workflows/code-language-detector.yml deleted file mode 100644 index 80ec947338..0000000000 --- a/.github/workflows/code-language-detector.yml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright Β© 2024 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Language Check Workflow Test - -on: [pull_request] - -jobs: - comment-language-detector: - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Code Language Detector - uses: kubecub/comment-lang-detector@v1.0.0 \ No newline at end of file diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 29f9382ccc..fd871e2b58 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,76 +1,67 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. -name: "OpenIM Code Scanning - Action" +name: "CodeQL" on: push: - branches: [main] + branches: [ main ] pull_request: - branches: [main] + # The branches below must be a subset of the branches above + branches: [ main ] schedule: - # β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ minute (0 - 59) - # β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ hour (0 - 23) - # β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ day of the month (1 - 31) - # β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ month (1 - 12 or JAN-DEC) - # β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ day of the week (0 - 6 or SUN-SAT) - # β”‚ β”‚ β”‚ β”‚ β”‚ - # β”‚ β”‚ β”‚ β”‚ β”‚ - # β”‚ β”‚ β”‚ β”‚ β”‚ - # * * * * * - - cron: '30 1 * * 0' + - cron: '18 19 * * 6' jobs: - CodeQL-Build: - # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest + analyze: + name: Analyze runs-on: ubuntu-latest - permissions: - # required for all workflows - security-events: write - - # only required for workflows in private repositories - actions: write - contents: write + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - - name: Checkout repository - uses: actions/checkout@v4 + - name: Checkout repository + uses: actions/checkout@v4 - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - # Override language selection by uncommenting this and choosing your languages - with: - languages: go + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main - # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). - # If this step fails, then you should remove it and run the build manually (see below). - - name: Autobuild - uses: github/codeql-action/autobuild@v3 + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 - # ℹ️ Command-line programs to run using the OS shell. - # πŸ“š See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + # ℹ️ Command-line programs to run using the OS shell. + # πŸ“š https://git.io/JvXDl - # ✏️ If the Autobuild fails above, remove it and uncomment the following - # three lines and modify them (or add more) to build your code if your - # project uses a compiled language + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language - # - run: | - # make bootstrap - # make release + #- run: | + # make bootstrap + # make release - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 \ No newline at end of file + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 \ No newline at end of file diff --git a/.github/workflows/comment-check.yml b/.github/workflows/comment-check.yml new file mode 100644 index 0000000000..e994b52596 --- /dev/null +++ b/.github/workflows/comment-check.yml @@ -0,0 +1,51 @@ +name: Non-English Comments Check + +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + non-english-comments-check: + runs-on: ubuntu-latest + + env: + # need ignore Dirs + EXCLUDE_DIRS: ".git docs tests scripts assets node_modules build" + # need ignore Files + EXCLUDE_FILES: "*.md *.txt *.html *.css *.min.js *.mdx" + + steps: + - uses: actions/checkout@v4 + + - name: Search for Non-English comments + run: | + set -e + # Define the regex pattern to match Chinese characters + pattern='[\p{Han}]' + + # Process the directories to be excluded + exclude_dirs="" + for dir in $EXCLUDE_DIRS; do + exclude_dirs="$exclude_dirs --exclude-dir=$dir" + done + + # Process the file types to be excluded + exclude_files="" + for file in $EXCLUDE_FILES; do + exclude_files="$exclude_files --exclude=$file" + done + + # Use grep to find all comments containing Non-English characters and save to file + grep -Pnr "$pattern" . $exclude_dirs $exclude_files > non_english_comments.txt || true + + - name: Output non-English comments are found + run: | + if [ -s non_english_comments.txt ]; then + echo "Non-English comments found in the following locations:" + cat non_english_comments.txt + exit 1 # terminate the workflow + else + echo "No Non_English comments found." + fi diff --git a/.github/workflows/create-branch-on-tag.bak b/.github/workflows/create-branch-on-tag.bak deleted file mode 100644 index fbacd261b1..0000000000 --- a/.github/workflows/create-branch-on-tag.bak +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Create Branch on Tag - -on: - push: - tags: - - 'v*.*.0' - -permissions: - contents: write - actions: write - -jobs: - create-branch: - runs-on: ubuntu-latest - steps: - - name: Check out code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Git - run: | - git config --global user.name 'kubbot' - git config --global user.email '3293172751yxy@gmail.com' - - - name: Install git-chglog - run: make install.git-chglog - - - name: Create Branch and Push - env: - TAG_NAME: ${{ github.ref_name }} - run: | - IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME" - if [[ "${VERSION_PARTS[2]}" = "0" ]]; then - BRANCH_NAME="release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}" - echo "Creating branch $BRANCH_NAME" - git checkout -b "$BRANCH_NAME" - git push origin "$BRANCH_NAME" - else - echo "Not a release tag. Skipping branch creation." - fi - continue-on-error: true - - - name: Create and Commit CHANGELOG - if: endsWith(github.ref_name, '.0') - run: | - git fetch --all - TAG_NAME=${GITHUB_REF#refs/tags/} - IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME" - git checkout main - cd CHANGELOG - git-chglog --tag-filter-pattern "v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.*" -o "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md" - git add "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md" - git commit -m "Update CHANGELOG for $TAG_NAME" || echo "No changes to commit." - continue-on-error: true - - - name: Push CHANGELOG to Main - if: steps.create-and-commit-changelog.outputs.changes == 'true' - uses: ad-m/github-push-action@v0.8.0 - with: - github_token: ${{ secrets.BOT_GITHUB_TOKEN }} - branch: main - continue-on-error: true diff --git a/.github/workflows/depsreview.yaml b/.github/workflows/depsreview.yaml deleted file mode 100644 index aff7e3d9e0..0000000000 --- a/.github/workflows/depsreview.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright Β© 2023 KubeCub open source community. All rights reserved. -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. - -name: OpenIM Dependency Review -on: [pull_request] - -permissions: - contents: read - -jobs: - dependency-review: - runs-on: ubuntu-latest - steps: - - name: 'Checkout Repository' - uses: actions/checkout@v4 - - name: 'Dependency Review' - uses: actions/dependency-review-action@v4 \ No newline at end of file diff --git a/.github/workflows/docker-buildx.bak b/.github/workflows/docker-buildx.bak deleted file mode 100644 index 7e7b8229ca..0000000000 --- a/.github/workflows/docker-buildx.bak +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Docker Buildx Images CI - -on: - schedule: - - cron: '30 1 * * *' - push: - branches: - - release-* - tags: - - v* - workflow_dispatch: - -jobs: - build-ghcr: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - install: true - - - name: Cache Docker layers - uses: actions/cache@v4 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Log in to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log in to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Log in to AliYun Docker Hub - uses: docker/login-action@v3 - with: - registry: registry.cn-hangzhou.aliyuncs.com - username: ${{ secrets.ALIREGISTRY_USERNAME }} - password: ${{ secrets.ALIREGISTRY_TOKEN }} - -################################################ -# build/ -# └── docker -# β”œβ”€β”€ openim-api -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-cmdutils -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-crontask -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-msggateway -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-msgtransfer -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-push -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-auth -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-conversation -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-friend -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-group -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-msg -# β”‚ └── Dockerfile -# β”œβ”€β”€ openim-rpc-third -# β”‚ └── Dockerfile -# └── openim-rpc-user -# └── Dockerfile -############################################# - - - name: Extract metadata (tags, labels) for Docker openim-api - id: meta1 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-api - openim/openim-api - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-api - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-api - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-api/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta1.outputs.tags }} - labels: ${{ steps.meta1.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-cmdutils - id: meta2 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-cmdutils - openim/openim-cmdutils - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-cmdutils - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-cmdutils - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-cmdutils/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta2.outputs.tags }} - labels: ${{ steps.meta2.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-crontask - id: meta3 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-crontask - openim/openim-crontask - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-crontask - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-crontask - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-crontask/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta3.outputs.tags }} - labels: ${{ steps.meta3.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-msggateway - id: meta4 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-msggateway - openim/openim-msggateway - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-msggateway - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-msggateway - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-msggateway/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta4.outputs.tags }} - labels: ${{ steps.meta4.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-msgtransfer - id: meta5 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-msgtransfer - openim/openim-msgtransfer - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-msgtransfer - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-msgtransfer - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-msgtransfer/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta5.outputs.tags }} - labels: ${{ steps.meta5.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-push - id: meta6 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-push - openim/openim-push - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-push - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-push - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-push/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta6.outputs.tags }} - labels: ${{ steps.meta6.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-auth - id: meta7 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-auth - openim/openim-rpc-auth - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-auth - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-auth - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-auth/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta7.outputs.tags }} - labels: ${{ steps.meta7.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-conversation - id: meta8 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-conversation - openim/openim-rpc-conversation - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-conversation - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-conversation - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-conversation/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta8.outputs.tags }} - labels: ${{ steps.meta8.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-friend - id: meta9 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-friend - openim/openim-rpc-friend - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-friend - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-friend - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-friend/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta9.outputs.tags }} - labels: ${{ steps.meta9.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-group - id: meta10 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-group - openim/openim-rpc-group - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-group - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-group - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-group/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta10.outputs.tags }} - labels: ${{ steps.meta10.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-msg - id: meta11 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-msg - openim/openim-rpc-msg - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-msg - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-msg - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-msg/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta11.outputs.tags }} - labels: ${{ steps.meta11.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-third - id: meta12 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-third - openim/openim-rpc-third - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-third - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-third - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-third/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta12.outputs.tags }} - labels: ${{ steps.meta12.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Extract metadata (tags, labels) for Docker openim-rpc-user - id: meta13 - uses: docker/metadata-action@v5.5.1 - with: - images: | - ghcr.io/openimsdk/openim-rpc-user - openim/openim-rpc-user - registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-rpc-user - tags: | - type=ref,event=tag - type=schedule - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=sha - - - name: Build and push Docker image for openim-rpc-user - uses: docker/build-push-action@v5 - with: - context: . - file: ./build/images/openim-rpc-user/Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta13.outputs.tags }} - labels: ${{ steps.meta13.outputs.labels }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache diff --git a/.github/workflows/e2e-test.bak b/.github/workflows/e2e-test.bak deleted file mode 100644 index 6231697c25..0000000000 --- a/.github/workflows/e2e-test.bak +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM E2E And API Test - -on: - workflow_dispatch: - pull_request: - push: - schedule: - # run e2e test every 4 hours - - cron: 0 */4 * * * - -env: - CALLBACK_ENABLE: true - -jobs: - build: - name: Test - runs-on: ubuntu-latest - env: - GO111MODULE: on - steps: - - - name: Set up Go 1.21 - uses: actions/setup-go@v5 - with: - go-version: 1.21 - id: go - - - name: Check out code into the Go module directory - uses: actions/checkout@v4 - - - name: Create e2e test - run: | - echo "...test e2e" - - execute-linux-systemd-scripts: - name: Execute OpenIM script on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - environment: - name: openim - strategy: - matrix: - go_version: ["1.20"] - os: ["ubuntu-latest"] - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Set up Go ${{ matrix.go_version }} - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go_version }} - id: go - - - name: Install Task - uses: arduino/setup-task@v1 - with: - version: '3.x' # If available, use the latest major version that's compatible - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Docker Operations - run: | - sudo docker compose up -d - sudo bash bootstrap.sh - sudo mage - sudo sleep 20 - - - name: Module Operations - run: | - echo "===========> Verifying go-gitlint is installed" - if [ ! -f ./_output/tools/go-gitlint ]; then - export GOBIN=$(pwd)/_output/tools - echo "===========> Installing The default installation path is /home/ubuntu/DF/open-im-server/_output/tools/go-gitlint" - sudo go install github.com/marmotedu/go-gitlint/cmd/go-gitlint@latest - echo "===========> go-gitlint is installed in /home/ubuntu/DF/open-im-server/_output/tools/go-gitlint" - fi - - - name: Build, Start(make build && make start) - run: | - sudo ./scripts/install/install.sh -i - - - name: Exec OpenIM System Status Chack - run: | - sudo ./scripts/install/install.sh -s - -# - name: Exec OpenIM API test (make test-api) - - name: Exec OpenIM test (make test) - run: | - mkdir -p ./tmp - touch ./tmp/test.md - echo "# OpenIM Test" >> ./tmp/test.md - echo "## OpenIM API Test" >> ./tmp/test.md - echo "
Command Output for OpenIM API Test" >> ./tmp/test.md - echo "
" >> ./tmp/test.md
-        echo "===========> Run api test"
-        ./scripts/install/test.sh
-        echo "===========> Run api test" >> ./tmp/test.md
-        ./scripts/install/test.sh >> ./tmp/test.md
-        echo "
" >> ./tmp/test.md - echo "
" >> ./tmp/test.md - - echo "===========> Run api test" - ./scripts/install/test.sh - - # - name: Exec OpenIM E2E Test (make test-e2e) - # run: | - # echo "" >> ./tmp/test.md - # echo "## OpenIM E2E Test" >> ./tmp/test.md - # echo "
Command Output for OpenIM E2E Test" >> ./tmp/test.md - # echo "
" >> ./tmp/test.md
- #       sudo make test-e2e | tee -a ./tmp/test.md
- #       echo "
" >> ./tmp/test.md - # echo "
" >> ./tmp/test.md - - # sudo make test-e2e - - - name: Comment PR with file - uses: thollander/actions-comment-pull-request@v2 - with: - filePath: ./tmp/test.md - comment_tag: nrt_file - reactions: eyes, rocket - mode: recreate - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - continue-on-error: true - - - name: Check outputs - run: | - echo "id : ${{ steps.nrt_message.outputs.id }}" - echo "body : ${{ steps.nrt_message.outputs.body }}" - echo "html_url : ${{ steps.nrt_message.outputs.html_url }}" - - - name: Exec OpenIM System uninstall - run: | - sudo ./scripts/install/install.sh -u - - - name: gobenchdata publish - uses: bobheadxi/gobenchdata@v1 - with: - PRUNE_COUNT: 30 - GO_TEST_FLAGS: -cpu 1,2 - PUBLISH: true - PUBLISH_BRANCH: gh-pages - env: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - continue-on-error: true diff --git a/.github/workflows/go-build-test.yml b/.github/workflows/go-build-test.yml new file mode 100644 index 0000000000..1ed8f03977 --- /dev/null +++ b/.github/workflows/go-build-test.yml @@ -0,0 +1,197 @@ +name: Go Build Test + +on: + push: + branches: + - main + pull_request: + branches: + - main + paths-ignore: + - '**/*.md' + + workflow_dispatch: + +jobs: + go-build: + name: Test with go ${{ matrix.go_version }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + permissions: + contents: write + pull-requests: write + strategy: + matrix: + os: [ubuntu-latest] + go_version: ["1.21.x", "1.22.x"] + + steps: + - name: Checkout Server repository + uses: actions/checkout@v4 + + - name: Set up Go ${{ matrix.go_version }} + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go_version }} + + - name: Get Server dependencies + run: | + go install github.com/magefile/mage@latest + go mod tidy + go mod download + + - name: Set up infra services + uses: hoverkraft-tech/compose-action@v2.0.1 + # Uncomment and set the correct path to your docker-compose file + with: + compose-file: "./docker-compose.yml" + + # run: | + # sudo docker compose up -d + # sudo sleep 30 # Increased sleep time for better stability + # timeout-minutes: 60 # Increased timeout for Docker setup + + + # - name: Get Internal IP Address + # id: get-ip + # run: | + # IP=$(hostname -I | awk '{print $1}') + # echo "The IP Address is: $IP" + # echo "::set-output name=ip::$IP" + + # - name: Update .env + # run: | + # sed -i 's|externalAddress:.*|externalAddress: "http://${{ steps.get-ip.outputs.ip }}:10005"|' config/minio.yml + # cat config/minio.yml + + - name: Build and test Server Services + run: | + mage build + mage start + mage check + + - name: Checkout Chat repository + uses: actions/checkout@v4 + with: + repository: "openimsdk/chat" + path: "chat-repo" + + - name: Get Chat dependencies + run: | + cd ${{ github.workspace }}/chat-repo + go mod tidy + go mod download + go install github.com/magefile/mage@latest + + - name: Build and test Chat Services + run: | + cd ${{ github.workspace }}/chat-repo + mage build + mage start + mage check + + go-test: + name: Benchmark Test with go ${{ matrix.go_version }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + permissions: + contents: write + env: + SDK_DIR: openim-sdk-core + CONFIG_PATH: config/notification.yml + # pull-requests: write + strategy: + matrix: + os: [ ubuntu-latest ] + go_version: [ "1.22.x" ] + + steps: + - name: Checkout Server repository + uses: actions/checkout@v4 + + - name: Checkout SDK repository + uses: actions/checkout@v4 + with: + repository: 'openimsdk/openim-sdk-core' + path: ${{ env.SDK_DIR }} + + - name: Set up Go ${{ matrix.go_version }} + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go_version }} + + - name: Get Server dependencies + run: | + go install github.com/magefile/mage@latest + go mod download + + - name: Install yq + run: | + sudo wget https://github.com/mikefarah/yq/releases/download/v4.34.1/yq_linux_amd64 -O /usr/bin/yq + sudo chmod +x /usr/bin/yq + + - name: Modify Server Configuration + run: | + yq e '.groupCreated.unreadCount = true' -i ${{ env.CONFIG_PATH }} + yq e '.friendApplicationApproved.unreadCount = true' -i ${{ env.CONFIG_PATH }} + + - name: Start Server Services + run: | + docker compose up -d + mage build + mage start + mage check + + - name: Build test SDK core + run: | + cd ${{ env.SDK_DIR }} + go mod tidy + cd integration_test + mkdir data + go run main.go -lgr 0.8 -imf -crg -ckgn -ckcon -sem -ckmsn -u 20 -su 5 -lg 2 -cg 2 -cgm 3 -sm 10 -gm 10 -reg + + dockerfile-test: + name: Build and Test Dockerfile + runs-on: ubuntu-latest + strategy: + matrix: + go_version: ["1.21"] + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set up Go ${{ matrix.go_version }} + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go_version }} + + - name: Get dependencies + run: | + go mod tidy + go mod download + go install github.com/magefile/mage@latest + + - name: Build Docker Image + run: | + IMAGE_NAME="${{ github.event.repository.name }}-test" + CONTAINER_NAME="${{ github.event.repository.name }}-container" + docker build -t $IMAGE_NAME . + + - name: Run Docker Container + run: | + IMAGE_NAME="${{ github.event.repository.name }}-test" + CONTAINER_NAME="${{ github.event.repository.name }}-container" + docker run --name $CONTAINER_NAME -d $IMAGE_NAME + docker ps -a + + - name: Test Docker Container Logs + run: | + CONTAINER_NAME="${{ github.event.repository.name }}-container" + docker logs $CONTAINER_NAME + + # - name: Cleanup Docker Container + # run: | + # CONTAINER_NAME="${{ github.event.repository.name }}-container" + # IMAGE_NAME="${{ github.event.repository.name }}-test" + # docker stop $CONTAINER_NAME + # docker rm $CONTAINER_NAME + # docker rmi $IMAGE_NAME diff --git a/.github/workflows/golangci-lint.bak b/.github/workflows/golangci-lint.bak deleted file mode 100644 index 64bd498c54..0000000000 --- a/.github/workflows/golangci-lint.bak +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -name: OpenIM golangci-lint -on: - push: - branches: [main] - pull_request: -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: '1.21' - cache: false - - name: OpenIM Scripts Verification(make verify) - run: | - cd scripts - for script in verify-*; do - if [ -x "$script" ]; then - ./"$script" - fi - done - - name: golangci-lint - uses: golangci/golangci-lint-action@v4.0.0 - with: - # Require: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.54 - - # Optional: working directory, useful for monorepos - # working-directory: server - - # Optional: golangci-lint command line arguments. - # - # Note: by default the `.golangci.yml` file should be at the root of the repository. - # The location of the configuration file can be changed by using `--config=` - # args: --timeout=30m --config=/scripts/.golangci.yml --issues-exit-code=0 - - # Optional: show only new issues if it's a pull request. The default value is `false`. - only-new-issues: true - - # Optional:The mode to install golangci-lint. It can be 'binary' or 'goinstall'. - # install-mode: "goinstall" diff --git a/.github/workflows/gosec.yml b/.github/workflows/gosec.yml deleted file mode 100644 index b99330c05c..0000000000 --- a/.github/workflows/gosec.yml +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM Run Gosec - -# gosec is a source code security audit tool for the Go language. It performs a static -# analysis of the Go code, looking for potential security problems. The main functions of gosec are: -# 1. Find common security vulnerabilities, such as SQL injection, command injection, and cross-site scripting (XSS). -# 2. Audit codes according to common security standards and find non-standard codes. -# 3. Assist the Go language engineer to write safe and reliable code. -# https://github.com/securego/gosec/ -on: - push: - branches: "*" - pull_request: - branches: "*" - paths-ignore: - - '*.md' - - '*.yml' - - '.github' - -jobs: - golang-security-action: - runs-on: ubuntu-latest - env: - GO111MODULE: on - steps: - - name: Check out code - uses: actions/checkout@v4 - - name: Run Gosec Security Scanner - uses: securego/gosec@master - with: - args: ./... - continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/help-comment-issue.yml b/.github/workflows/help-comment-issue.yml index c4e72ffc67..b1cc621828 100644 --- a/.github/workflows/help-comment-issue.yml +++ b/.github/workflows/help-comment-issue.yml @@ -29,7 +29,7 @@ jobs: uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.issue.number }} - token: ${{ secrets.BOT_GITHUB_TOKEN }} + token: ${{ secrets.BOT_TOKEN }} body: | This issue is available for anyone to work on. **Make sure to reference this issue in your pull request.** :sparkles: Thank you for your contribution! :sparkles: [Join slack πŸ€–](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to connect and communicate with our developers. diff --git a/.github/workflows/issue-robot.yml b/.github/workflows/issue-robot.yml deleted file mode 100644 index 2a956ed19c..0000000000 --- a/.github/workflows/issue-robot.yml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM Issue Aotu Translator -on: - issue_comment: - types: [created] - issues: - types: [opened] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: usthe/issues-translate-action@v2.7 - with: - # it is not necessary to decide whether you need to modify the issue header content - IS_MODIFY_TITLE: true - BOT_GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - # Required, input your bot github token \ No newline at end of file diff --git a/.github/workflows/issue-translator.yml b/.github/workflows/issue-translator.yml new file mode 100644 index 0000000000..6a8528ae62 --- /dev/null +++ b/.github/workflows/issue-translator.yml @@ -0,0 +1,19 @@ +name: 'issue-translator' +on: + issue_comment: + types: [created] + issues: + types: [opened] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: usthe/issues-translate-action@v2.7 + with: + BOT_GITHUB_TOKEN: ${{ secrets.BOT_TOKEN }} + IS_MODIFY_TITLE: true + # not require, default false, . Decide whether to modify the issue title + # if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot. + CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. πŸ‘―πŸ‘­πŸ»πŸ§‘β€πŸ€β€πŸ§‘πŸ‘«πŸ§‘πŸΏβ€πŸ€β€πŸ§‘πŸ»πŸ‘©πŸΎβ€πŸ€β€πŸ‘¨πŸΏπŸ‘¬πŸΏ + # not require. Customize the translation robot prefix message. \ No newline at end of file diff --git a/.github/workflows/lock-issue.bak b/.github/workflows/lock-issue.bak deleted file mode 100644 index edf2809653..0000000000 --- a/.github/workflows/lock-issue.bak +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: 'Lock Threads' - -on: - schedule: - - cron: '0 * * * *' - workflow_dispatch: - -permissions: - issues: write - pull-requests: write - -concurrency: - group: lock - -jobs: - action: - runs-on: ubuntu-latest - steps: - - uses: dessant/lock-threads@v5 - with: - github-token: ${{ secrets.BOT_GITHUB_TOKEN }} - issue-inactive-days: '365' - exclude-issue-created-before: '' - exclude-issue-created-after: '' - exclude-issue-created-between: '' - exclude-issue-closed-before: '' - exclude-issue-closed-after: '' - exclude-issue-closed-between: '' - include-any-issue-labels: '' - include-all-issue-labels: '' - exclude-any-issue-labels: '' - add-issue-labels: '' - remove-issue-labels: '' - issue-comment: '' - issue-lock-reason: 'resolved' - pr-inactive-days: '365' - exclude-pr-created-before: '' - exclude-pr-created-after: '' - exclude-pr-created-between: '' - exclude-pr-closed-before: '' - exclude-pr-closed-after: '' - exclude-pr-closed-between: '' - include-any-pr-labels: '' - include-all-pr-labels: '' - exclude-any-pr-labels: '' - add-pr-labels: '' - remove-pr-labels: '' - pr-comment: '' - pr-lock-reason: 'resolved' - process-only: '' - log-output: false \ No newline at end of file diff --git a/.github/workflows/milestone.yml b/.github/workflows/milestone.yml deleted file mode 100644 index c74e7074a7..0000000000 --- a/.github/workflows/milestone.yml +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# shamelessly copied from https://github.com/sigstore/cosign/blob/main/.github/workflows/milestone.yaml - -name: milestone - -on: - pull_request_target: - types: [closed] - branches: - - main - -jobs: - milestone: - runs-on: ubuntu-latest - - permissions: - actions: none - checks: none - contents: read - deployments: none - issues: write - packages: none - pull-requests: write - repository-projects: none - security-events: none - statuses: none - - steps: - - uses: actions/github-script@v7 # v6 - with: - github-token: ${{ secrets.BOT_GITHUB_TOKEN }} - script: | - if (!context.payload.pull_request.merged) { - console.log('PR was not merged, skipping.'); - return; - } - - if (!!context.payload.pull_request.milestone) { - console.log('PR has existing milestone, skipping.'); - return; - } - - milestones = await github.rest.issues.listMilestones({ - owner: context.repo.owner, - repo: context.repo.repo, - state: 'open', - sort: 'title', - direction: 'desc' - }) - - if (milestones.data.length === 0) { - console.log('There are no milestones, skipping.'); - return; - } - - await github.rest.issues.update({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.payload.pull_request.number, - milestone: milestones.data[0].number - }); diff --git a/.github/workflows/opencommit.yml b/.github/workflows/opencommit.yml deleted file mode 100644 index d483ef1f65..0000000000 --- a/.github/workflows/opencommit.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM OpenCommit Action - -on: - push: - # this list of branches is often enough, - # but you may still ignore other public branches - branches-ignore: [main master dev development release] - -jobs: - opencommit: - timeout-minutes: 10 - name: OpenCommit - runs-on: ubuntu-latest - permissions: write-all - steps: - - name: Setup Node.js Environment - uses: actions/setup-node@v4 - with: - node-version: '16' - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - uses: di-sukharev/opencommit@github-action-v1.0.4 - with: - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - - env: - # set openAI api key in repo actions secrets, - # for openAI keys go to: https://platform.openai.com/account/api-keys - # for repo secret go to: /settings/secrets/actions - OCO_OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - - # customization - OCO_OPENAI_MAX_TOKENS: 500 - OCO_OPENAI_BASE_PATH: '' - OCO_DESCRIPTION: false - OCO_EMOJI: false - OCO_MODEL: gpt-3.5-turbo-16k - OCO_LANGUAGE: en - OCO_PROMPT_MODULE: conventional-commit - continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/openimci.yml b/.github/workflows/openimci.yml deleted file mode 100644 index 8f3630dd08..0000000000 --- a/.github/workflows/openimci.yml +++ /dev/null @@ -1,135 +0,0 @@ - -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: OpenIM CI Auto Build - -on: - push: - branches: - - main - - release-* - paths-ignore: - - "docs/**" - - "README.md" - - "README_zh-CN.md" - - "**.md" - - "docs/**" - - "CONTRIBUTING.md" - pull_request: - branches: - - main - - release-* - paths-ignore: - - "README.md" - - "README_zh-CN.md" - - "CONTRIBUTING/**" - - "**.md" - - "docs/**" - workflow_dispatch: - -jobs: - - build-linux: - name: Execute OpenIM Script On Linux - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - environment: - name: openim - strategy: - matrix: - arch: [arm64, armv7, amd64] - - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: '1.21' - - - name: Set up Docker for Linux - run: | - sudo docker compose up -d - sudo sleep 30 # Increased sleep time for better stability - timeout-minutes: 20 # Increased timeout for Docker setup - - - - name: init - run: sudo bash bootstrap.sh - timeout-minutes: 20 - - # - name: Get Internal IP Address - # id: get-ip - # run: | - # IP=$(hostname -I | awk '{print $1}') - # echo "The IP Address is: $IP" - # echo "::set-output name=ip::$IP" - - # - name: Update .env - # run: | - # sed -i 's|externalAddress:.*|externalAddress: "http://${{ steps.get-ip.outputs.ip }}:10005"|' config/minio.yml - # cat config/minio.yml - - - name: Build, Start, Check Services and Print Logs for Linux - run: | - sudo mage - sudo mage start - sudo mage check - - - - name: Restart Services and Print Logs - run: | - sudo mage stop - sudo mage start - sudo mage check - - - name: Checkout chat repository - uses: actions/checkout@v4 - with: - repository: 'openimsdk/chat' - path: 'chat-repo' - - - name: Build and Start Chat Services - run: | - cd ${{ github.workspace }}/chat-repo - sudo mage - sudo mage start - sudo mage check - - # - name: Checkout e2e repository - # uses: actions/checkout@v4 - # with: - # repository: "openimsdk/test-e2e" - # path: e2e-repo - - # - name: Set up Python 3.9 - # uses: actions/setup-python@v4 - # with: - # python-version: '3.9' - - # - name: Install dependencies - # run: | - # sudo apt-get update - # sudo apt-get install -y xvfb libxi6 libgconf-2-4 - # cd ${{ github.workspace }}/e2e-repo - # pip install -r requirements.txt - - # - name: Run tests - # run: | - # cd ${{ github.workspace }}/e2e-repo - # xvfb-run --auto-servernum --server-args='-screen 0 1920x1080x24' pytest -v -s ./script - - \ No newline at end of file diff --git a/.github/workflows/project-progress.yml b/.github/workflows/project-progress.yml deleted file mode 100644 index 87a4c13810..0000000000 --- a/.github/workflows/project-progress.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright Β© 2023 OpenIM open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# GitHub recommends pinning actions to a commit SHA. -# To get a newer version, you will need to update the SHA. -# You can also reference a tag or branch, but the action may change without warning. - -name: Move assigned card -on: - issues: - types: - - assigned - pull_request: - types: - - assigned - branches-ignore: - - 'asf-auto-updates' - - 'ignore' - -jobs: - move-assigned-card: - runs-on: ubuntu-latest - steps: - - uses: alex-page/github-project-automation-plus@v0.9.0 - with: - project: openim-powerful - column: In Progress - repo-token: ${{ secrets.BOT_GITHUB_TOKEN }} diff --git a/.github/workflows/publish-docker-image.yml b/.github/workflows/publish-docker-image.yml new file mode 100644 index 0000000000..40b79e61ad --- /dev/null +++ b/.github/workflows/publish-docker-image.yml @@ -0,0 +1,150 @@ +name: Publish Docker image to registries + +on: + push: + branches: + - release-* + # tags: + # - 'v*' + + release: + types: [published] + + workflow_dispatch: + inputs: + tag: + description: "Tag version to be used for Docker image" + required: true + default: "v3.8.0" + +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + path: main-repo + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Docker image + id: build + uses: docker/build-push-action@v5 + with: + context: ./main-repo + load: true + tags: "openim/openim-server:local" + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Save Docker image to file + run: docker save -o image.tar openim/openim-server:local + + - name: Checkout compose repository + uses: actions/checkout@v4 + with: + repository: "openimsdk/openim-docker" + path: "compose-repo" + + - name: Get Internal IP Address + id: get-ip + run: | + IP=$(hostname -I | awk '{print $1}') + echo "The IP Address is: $IP" + echo "::set-output name=ip::$IP" + + - name: Update .env to use the local image + run: | + sed -i 's|OPENIM_SERVER_IMAGE=.*|OPENIM_SERVER_IMAGE=openim/openim-server:local|' ${{ github.workspace }}/compose-repo/.env + sed -i 's|MINIO_EXTERNAL_ADDRESS=.*|MINIO_EXTERNAL_ADDRESS=http://${{ steps.get-ip.outputs.ip }}:10005|' ${{ github.workspace }}/compose-repo/.env + + - name: Start services using Docker Compose + run: | + cd ${{ github.workspace }}/compose-repo + docker compose up -d + sleep 60 + + # - name: Check openim-server health + # run: | + # timeout=300 + # interval=30 + # elapsed=0 + # while [[ $elapsed -le $timeout ]]; do + # if ! docker exec openim-server mage check; then + # echo "openim-server is not ready, waiting..." + # sleep $interval + # elapsed=$(($elapsed + $interval)) + # else + # echo "Health check successful" + # exit 0 + # fi + # done + # echo "Health check failed after 5 minutes" + # exit 1 + + # - name: Check openim-chat health + # if: success() + # run: | + # if ! docker exec openim-chat mage check; then + # echo "openim-chat check failed" + # exit 1 + # else + # echo "Health check successful" + # exit 0 + # fi + + - name: Load Docker image from file + run: docker load -i image.tar + + - name: Extract metadata for Docker (tags, labels) + id: meta + uses: docker/metadata-action@v5.5.1 + with: + images: | + openim/openim-server + ghcr.io/openimsdk/openim-server + registry.cn-hangzhou.aliyuncs.com/openimsdk/openim-server + tags: | + type=ref,event=tag + type=schedule + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern=release-{{raw}} + type=sha + type=raw,value=${{ github.event.inputs.tag }} + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to Aliyun Container Registry + uses: docker/login-action@v2 + with: + registry: registry.cn-hangzhou.aliyuncs.com + username: ${{ secrets.ALIREGISTRY_USERNAME }} + password: ${{ secrets.ALIREGISTRY_TOKEN }} + + - name: Push Docker images + uses: docker/build-push-action@v5 + with: + context: ./main-repo + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/pull-request.bak b/.github/workflows/pull-request.bak deleted file mode 100644 index f7c5900ce7..0000000000 --- a/.github/workflows/pull-request.bak +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Github Pull Request -on: - workflow_dispatch: - schedule: - - cron: '0 2 * * *' - -permissions: - contents: write - pull-requests: write - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: actions/setup-node@v4 - - name: Setup Go - uses: actions/setup-go@v5 - - name: Run go modules tidy - run: | - sudo apt-get install jq - sudo make tidy - sudo make tools.verify.go-gitlint - echo "Run go modules tidy successfully" - continue-on-error: true - - - name: Run go format and lint - run: | - sudo make format - echo "Run go format successfully" - continue-on-error: true - - - name: Run go lint - run: | - sudo make lint - echo "Run go lint successfully" - continue-on-error: true - - - name: Generate all necessary files, such as error code files - run: | - make gen.docgo.doc - make gen - echo "Generate all necessary files successfully" - continue-on-error: true - - - name: make init - run: | - export OPENIM_IP=127.0.0.1 - export LOG_STORAGE_LOCATION="../logs/" - ./scripts/init-config.sh --examples --force - echo "Generate all necessary files successfully" - continue-on-error: true - - - name: Generate Versions Including Pre-release Identifiers - run: | - latest_tag=$(git describe --tags `git rev-list --tags --max-count=1`) - echo $latest_tag > pkg/common/config/version - continue-on-error: true - - - name: Gen CHANGELOG file - run: | - current_tag=$(git describe --tags --abbrev=0) - version=$(echo "$current_tag" | sed -E 's/^v?([0-9]+)\.([0-9]+)\..*$/\1.\2/') - echo "OpenIM Version: $version" - make tools.install.git-chglog - cd CHANGELOG - git-chglog --tag-filter-pattern "v${version}.*" -o CHANGELOG-${version}.md - cd .. - continue-on-error: true - - - name: Run unit test and get test coverage - run: | - make cover - echo "Run unit test and get test coverage successfully" - continue-on-error: true - - - name: OpenIM verify copyright - run: | - sudo make add-copyright - echo "OpenIM verify successfully" - continue-on-error: true - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 - with: - token: ${{ secrets.BOT_GITHUB_TOKEN }} - commit-message: "cicd: bump League Patch" - author: kubbot <3293172751ysy@gmail.com> - committer: kubbot <3293172751ysy@gmail.com> - # signoff: false - # draft: false - branch: "asf-auto-updates" - assignees: cubxxw - reviewers: cubxxw - title: "[Auto PR πŸ€–] Bump League Patch auto PR" - body: | - I am a PR generated by robot automation. - - Review criteria: - - - [ ] Disenchanter can connect and issue actions - - Github Actions Status: - - [![Github Pull Request](https://github.com/openimsdk/open-im-server/actions/workflows/pull-request.yml/badge.svg)](https://github.com/openimsdk/open-im-server/actions/workflows/pull-request.yml) - - This is an automated PR. - [workflow](https://github.com/openimsdk/open-im-server/blob/main/.github/workflows/pull-request.yml). - labels: | - kind/documentation - enhancement - report diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml deleted file mode 100644 index 251f558764..0000000000 --- a/.github/workflows/release-drafter.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Release Drafter - -on: - push: - # branches to consider in the event; optional, defaults to all - branches: - - main - # pull_request event is required only for autolabeler - pull_request: - # Only following types are handled by the action, but one can default to all as well - # types: [opened, reopened, synchronize] - # pull_request_target event is required for autolabeler to support PRs from forks - # pull_request_target: - # types: [opened, reopened, synchronize] - -permissions: - contents: read - -jobs: - update_release_draft: - permissions: - # write permission is required to create a github release - contents: write - # write permission is required for autolabeler - # otherwise, read permission is required at least - pull-requests: write - runs-on: ubuntu-latest - steps: - # (Optional) GitHub Enterprise requires GHE_HOST variable set - #- name: Set GHE_HOST - # run: | - # echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV - - # Drafts your next Release notes as Pull Requests are merged into "master" - - uses: release-drafter/release-drafter@v6 - # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml - # with: - # config-name: my-config.yml - # disable-autolabeler: true - env: - GITHUB_TOKEN: ${{ secrets.REDBOT_GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/release.bak b/.github/workflows/release.bak deleted file mode 100644 index c15cff6a3c..0000000000 --- a/.github/workflows/release.bak +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM Server Release Workflow - -on: - push: - # run only against tags - tags: - - '*' - -permissions: - contents: write - packages: write - issues: write - -jobs: - goreleaser: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - run: git fetch --force --tags - - uses: actions/setup-go@v5 - with: - go-version: stable - # More assembly might be required: Docker logins, GPG, etc. It all depends - # on your needs. - - uses: goreleaser/goreleaser-action@v5 - with: - # either 'goreleaser' (default) or 'goreleaser-pro': - distribution: goreleaser - version: latest - workdir: . - args: release -f ./build/goreleaser.yaml --clean --release-footer-tmpl=scripts/template/footer.md.tmpl --release-header-tmpl=scripts/template/head.md.tmpl - env: - USERNAME: ${{ github.repository_owner }} - GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} - FURY_TOKEN: ${{ secrets.FURY_TOKEN }} - # Your GoReleaser Pro key, if you are using the 'goreleaser-pro' - # distribution: - # GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} - - goreleaser-check-pkgs: - runs-on: ubuntu-latest - env: - DOCKER_CLI_EXPERIMENTAL: "enabled" - needs: [ goreleaser ] - if: github.ref == 'refs/heads/main' - strategy: - matrix: - format: [ deb, rpm, apk ] - steps: - - uses: actions/checkout@v4 # v3 - with: - fetch-depth: 0 - - uses: arduino/setup-task@e26d8975574116b0097a1161e0fe16ba75d84c1c # v1 - with: - version: 3.x - repo-token: ${{ secrets.GITHUB_TOKEN }} - - uses: docker/setup-qemu-action@326560df218a7ea9cf6ab49bbc88b8b306bb437e # v2 - - uses: actions/cache@a2ed59d39b352305bdd2f628719a53b2cc4f9613 # v3 - with: - path: | - ./_output/dist/*.deb - ./_output/dist/*.rpm - ./_output/dist/*.apk - key: ${{ github.ref }} - - run: task goreleaser:test:${{ matrix.format }} diff --git a/.github/workflows/remove-unused-labels.yml b/.github/workflows/remove-unused-labels.yml new file mode 100644 index 0000000000..ab80b1f96e --- /dev/null +++ b/.github/workflows/remove-unused-labels.yml @@ -0,0 +1,74 @@ +name: Remove Unused Labels +on: + workflow_dispatch: + +jobs: + cleanup: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + contents: read + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Fetch All Issues and PRs + id: fetch_issues_prs + uses: actions/github-script@v7.0.1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const issues = await github.paginate(github.rest.issues.listForRepo, { + owner: context.repo.owner, + repo: context.repo.repo, + state: 'all', + per_page: 100 + }); + + const labelsInUse = new Set(); + issues.forEach(issue => { + issue.labels.forEach(label => { + labelsInUse.add(label.name); + }); + }); + + return JSON.stringify(Array.from(labelsInUse)); + result-encoding: string + + - name: Fetch All Labels + id: fetch_labels + uses: actions/github-script@v7.0.1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const labels = await github.paginate(github.rest.issues.listLabelsForRepo, { + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100 + }); + + return JSON.stringify(labels.map(label => label.name)); + result-encoding: string + + - name: Remove Unused Labels + uses: actions/github-script@v7.0.1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const labelsInUse = new Set(JSON.parse(process.env.LABELS_IN_USE)); + const allLabels = JSON.parse(process.env.ALL_LABELS); + + const unusedLabels = allLabels.filter(label => !labelsInUse.has(label)); + + for (const label of unusedLabels) { + await github.rest.issues.deleteLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + name: label + }); + console.log(`Deleted label: ${label}`); + } + env: + LABELS_IN_USE: ${{ steps.fetch_issues_prs.outputs.result }} + ALL_LABELS: ${{ steps.fetch_labels.outputs.result }} diff --git a/.github/workflows/reopen-issue.yml b/.github/workflows/reopen-issue.yml new file mode 100644 index 0000000000..32f838ba48 --- /dev/null +++ b/.github/workflows/reopen-issue.yml @@ -0,0 +1,78 @@ +name: Reopen and Update Stale Issues + +on: + workflow_dispatch: + +jobs: + reopen_stale_issues: + runs-on: ubuntu-latest + permissions: + issues: write + contents: read + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Fetch Closed Issues with lifecycle/stale Label + id: fetch_issues + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const issues = await github.paginate(github.rest.issues.listForRepo, { + owner: context.repo.owner, + repo: context.repo.repo, + state: 'closed', + labels: 'lifecycle/stale', + per_page: 100 + }); + const issueNumbers = issues + .filter(issue => !issue.pull_request) // exclude PR + .map(issue => issue.number); + console.log(`Fetched issues: ${issueNumbers}`); + return issueNumbers; + + - name: Set issue numbers + id: set_issue_numbers + run: | + echo "ISSUE_NUMBERS=${{ steps.fetch_issues.outputs.result }}" >> $GITHUB_ENV + echo "Issue numbers: ${{ steps.fetch_issues.outputs.result }}" + + - name: Reopen Issues + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const issueNumbers = JSON.parse(process.env.ISSUE_NUMBERS); + console.log(`Reopening issues: ${issueNumbers}`); + + for (const issue_number of issueNumbers) { + // Reopen the issue + await github.rest.issues.update({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + state: 'open' + }); + console.log(`Reopened issue #${issue_number}`); + } + + - name: Remove lifecycle/stale Label + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const issueNumbers = JSON.parse(process.env.ISSUE_NUMBERS); + console.log(`Removing 'lifecycle/stale' label from issues: ${issueNumbers}`); + + for (const issue_number of issueNumbers) { + // Remove the lifecycle/stale label + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + name: 'lifecycle/stale' + }); + console.log(`Removed label 'lifecycle/stale' from issue #${issue_number}`); + } diff --git a/.github/workflows/sync-release.bak b/.github/workflows/sync-release.bak deleted file mode 100644 index a85c74fde0..0000000000 --- a/.github/workflows/sync-release.bak +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright Β© 2023 KubeCub open source community. All rights reserved. -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. - -# https://github.com/BetaHuhn/repo-file-sync-action -name: Synchronize OpenIM Release Branch Public Code To Other Repositories -on: - push: - paths: - - scripts/* - - docs/* - - config/* - branches: - - release-v*.* - workflow_dispatch: - -jobs: - sync: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run GitHub File Sync - uses: BetaHuhn/repo-file-sync-action@latest - with: - GH_INSTALLATION_TOKEN: "${{ secrets.BOT_GITHUB_TOKEN }}" - CONFIG_PATH: .github/sync-release.yml - ORIGINAL_MESSAGE: true - SKIP_PR: true - COMMIT_EACH_FILE: false - COMMIT_BODY: "πŸ€– kubbot to synchronize the warehouse" - GIT_EMAIL: "3293172751ysy@gmail.com" - GIT_USERNAME: "kubbot" - PR_BODY: πŸ‘Œ kubecub provides automated community services - REVIEWERS: | - kubbot - cubxxw - PR_LABELS: | - file-sync - automerge - ASSIGNEES: | - kubbot - continue-on-error: true diff --git a/.github/workflows/sync.bak b/.github/workflows/sync.bak deleted file mode 100644 index 595cbbe2c8..0000000000 --- a/.github/workflows/sync.bak +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright Β© 2023 KubeCub open source community. All rights reserved. -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. - -# https://github.com/BetaHuhn/repo-file-sync-action -name: Synchronize OpenIM Main Branch Public Code To Other Repositories -on: - push: - branches: - - main - workflow_dispatch: - -jobs: - sync: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run GitHub File Sync - uses: BetaHuhn/repo-file-sync-action@latest - with: - GH_INSTALLATION_TOKEN: "${{ secrets.BOT_GITHUB_TOKEN }}" - CONFIG_PATH: .github/sync.yml - ORIGINAL_MESSAGE: true - SKIP_PR: true - COMMIT_EACH_FILE: false - COMMIT_BODY: "πŸ€– kubbot to synchronize the warehouse" - GIT_EMAIL: "3293172751ysy@gmail.com" - GIT_USERNAME: "kubbot" - PR_BODY: πŸ‘Œ kubecub provides automated community services - REVIEWERS: | - kubbot - cubxxw - PR_LABELS: | - file-sync - automerge - ASSIGNEES: | - kubbot - continue-on-error: true diff --git a/.github/workflows/greetings.yml b/.github/workflows/user-first-interaction.yml similarity index 65% rename from .github/workflows/greetings.yml rename to .github/workflows/user-first-interaction.yml index b1c85ee375..6999889eb0 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/user-first-interaction.yml @@ -1,18 +1,4 @@ -# Copyright Β© 2023 OpenIM. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: OpenIM First Interaction +name: User First Interaction on: issues: @@ -28,7 +14,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/first-interaction@v1.3.0 with: - repo-token: ${{ secrets.BOT_GITHUB_TOKEN }} + repo-token: ${{ secrets.BOT_TOKEN }} pr-message: | Hello! Thank you for your contribution. diff --git a/.golangci.yml b/.golangci.yml index ae8cea6732..a95e980f85 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,20 +1,3 @@ -# Copyright Β© 2023 OpenIMSDK open source community. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file contains all available configuration options -# with their default values. - # options for analysis running run: # default concurrency is a available CPU number @@ -302,7 +285,7 @@ linters-settings: gofumpt: # Select the Go version to target. The default is `1.18`. - lang-version: "1.20" + go-version: "1.21" # Choose whether or not to use the extra rules that are disabled # by default diff --git a/Dockerfile b/Dockerfile index e082dd64c8..f8cfbda9ef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Use Go 1.21 Alpine as the base image for building the application -FROM golang:1.21-alpine as builder +FROM golang:1.21-alpine AS builder # Define the base directory for the application as an environment variable ENV SERVER_DIR=/openim-server diff --git a/config/discovery.yml b/config/discovery.yml index 3d96ff9b66..78a36f3d1f 100644 --- a/config/discovery.yml +++ b/config/discovery.yml @@ -1,4 +1,4 @@ -enable: "etcd" +enable: etcd etcd: rootDirectory: openim address: [ localhost:12379 ] diff --git a/config/grafana-template/Demo.json b/config/grafana-template/Demo.json index dbb11fbf31..ea17d2c0ab 100644 --- a/config/grafana-template/Demo.json +++ b/config/grafana-template/Demo.json @@ -1,4 +1,35 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "11.0.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { @@ -18,12 +49,12 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, + "id": null, "links": [], "liveNow": false, "panels": [ { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, @@ -31,1129 +62,1251 @@ "y": 0 }, "id": 35, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "description": "Is the service up.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "stepBefore", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 2, - "pointSize": 9, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bool_on_off" + "panels": [], + "title": "Server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Is the service up.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 6, - "y": 1 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "insertNulls": false, + "lineInterpolation": "stepBefore", + "lineStyle": { + "fill": "solid" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "lineWidth": 2, + "pointSize": 9, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "exemplar": false, - "expr": "up", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "$legendName", - "range": true, - "refId": "A" - } - ], - "title": "UP", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bool_on_off" }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 6, + "y": 1 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "up", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "$legendName", + "range": true, + "refId": "A" + } + ], + "title": "UP", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of online users and login users within the time frame.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "description": "This metric represents the number of online users and login users within the time frame.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" - }, - "overrides": [ { - "matcher": { - "id": "byName", - "options": "online users" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#37bbff", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 12 - }, - "id": 37, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "online_user_num", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "online users", - "range": true, - "refId": "A" + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "online users" }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "expr": "increase(user_login_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "login num", - "range": true, - "refId": "B" - } - ], - "title": "Login Information", - "type": "timeseries" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#37bbff", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 12 + }, + "id": 37, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "description": "This metric represents the number of register users within the time frame.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "register users" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#7437ff", - "mode": "fixed", - "seriesBy": "last" - } - } - ] - } - ] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 12 - }, - "id": 59, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "user_register_total", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "register users", - "range": true, - "refId": "A" - } - ], - "title": "Register num", - "type": "timeseries" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "online_user_num", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "online users", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "description": "This metric represents the number of chat msg success.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(user_login_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "login num", + "range": true, + "refId": "B" + } + ], + "title": "Login Information", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of register users within the time frame.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 23 - }, - "id": 38, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(single_chat_msg_process_success_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "single msgs", - "range": true, - "refId": "A" + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "expr": "increase(group_chat_msg_process_success_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "group msgs", - "range": true, - "refId": "B" + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Chat Msg Success Num", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" }, - "description": "This metric represents the number of chat msg failed .", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [ + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ { - "matcher": { - "id": "byName", - "options": "single msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#ff00dc", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "green", + "value": null }, { - "matcher": { - "id": "byName", - "options": "group msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#0cffef", - "mode": "fixed" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 23 - }, - "id": 39, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "register users" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(single_chat_msg_process_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "single msgs", - "range": true, - "refId": "A" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#7437ff", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "user_register_total", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "register users", + "range": true, + "refId": "A" + } + ], + "title": "Register num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of chat msg success.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "expr": "increase(group_chat_msg_process_failed_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "group msgs", - "range": true, - "refId": "B" + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Chat Msg Failed Num", - "type": "timeseries" + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 23 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(single_chat_msg_process_success_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "single msgs", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(group_chat_msg_process_success_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "group msgs", + "range": true, + "refId": "B" + } + ], + "title": "Chat Msg Success Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of chat msg failed .", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "description": "This metric represents the number of msg failed offline pushed.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" - }, - "overrides": [ { - "matcher": { - "id": "byName", - "options": "failed msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "dark-red", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 11, - "w": 6, - "x": 4, - "y": 33 + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "single msgs" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff00dc", + "mode": "fixed", + "seriesBy": "last" + } + } + ] }, - "id": 42, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + { + "matcher": { + "id": "byName", + "options": "group msgs" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#0cffef", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 23 + }, + "id": 39, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(single_chat_msg_process_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "single msgs", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(group_chat_msg_process_failed_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "group msgs", + "range": true, + "refId": "B" + } + ], + "title": "Chat Msg Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of msg failed offline pushed.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_offline_push_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "failed msgs", - "range": true, - "refId": "A" - } - ], - "title": "Msg Offline Push Failed Num", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 0, + "y": 33 + }, + "id": 42, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_offline_push_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr:{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Msg Offline Push Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of failed set seq.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "description": "This metric represents the number of failed set seq.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" - }, - "overrides": [ { - "matcher": { - "id": "byName", - "options": "failed msgs" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "semi-dark-green", - "mode": "fixed", - "seriesBy": "last" - } - } - ] + "color": "red", + "value": 80 } ] }, - "gridPos": { - "h": 11, - "w": 6, - "x": 14, - "y": 33 - }, - "id": 43, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(seq_set_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "failed addr: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Seq Set Failed Num", - "type": "timeseries" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 8, + "y": 33 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "description": "This metric represents the number of successfully inserted messages.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(seq_set_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Seq Set Failed Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of messages that take a long time to send.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 44 - }, - "id": 44, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_insert_redis_success_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "redis: {{instance}}", - "range": true, - "refId": "A" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "failed msgs" }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "expr": "increase(msg_insert_mongo_success_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "mongo: {{instance}}", - "range": true, - "refId": "B" - } - ], - "title": "Msg Success Insert Num", - "type": "timeseries" + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 8, + "x": 16, + "y": 33 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "msg_long_time_push_total", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "addr:{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Long Time Send Msg Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of successfully inserted messages.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, - "description": "This metric represents the number of failed insertion messages.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "fieldMinMax": false, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "unit": "none" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 44 + }, + "id": 44, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_insert_redis_success_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "redis: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(msg_insert_mongo_success_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "mongo: {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Msg Success Insert Num", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of failed insertion messages.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 44 - }, - "id": 45, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" - }, - "editorMode": "code", - "exemplar": false, - "expr": "increase(msg_insert_redis_failed_total[$time])", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "redis: {{instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "editorMode": "code", - "expr": "increase(msg_insert_mongo_failed_total[$time])", - "hide": false, - "instant": false, - "legendFormat": "mongo: {{instance}}", - "range": true, - "refId": "B" - } - ], - "title": "Msg Failed Insert Num", - "type": "timeseries" + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 44 + }, + "id": 45, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "increase(msg_insert_redis_failed_total[$time])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "redis: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "increase(msg_insert_mongo_failed_total[$time])", + "hide": false, + "instant": false, + "legendFormat": "mongo: {{instance}}", + "range": true, + "refId": "B" } ], - "title": "Server", - "type": "row" + "title": "Msg Failed Insert Num", + "type": "timeseries" }, { "collapsed": true, @@ -1161,14 +1314,14 @@ "h": 1, "w": 24, "x": 0, - "y": 1 + "y": 54 }, "id": 22, "panels": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of call of all API.", "fieldConfig": { @@ -1217,8 +1370,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1254,7 +1406,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1274,7 +1426,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of call of all API within the time frame.", "fieldConfig": { @@ -1323,8 +1475,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1385,7 +1536,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1405,7 +1556,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of err return of API.", "fieldConfig": { @@ -1454,8 +1605,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1491,7 +1641,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1511,7 +1661,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of err return of API with err code.", "fieldConfig": { @@ -1560,8 +1710,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1597,7 +1746,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1617,7 +1766,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the qps of API.", "fieldConfig": { @@ -1666,8 +1815,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1719,7 +1867,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1739,7 +1887,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of err return of API within the time frame.", "fieldConfig": { @@ -1788,8 +1936,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1825,7 +1972,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1845,7 +1992,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of err return of API with err code within the time frame..", "fieldConfig": { @@ -1894,8 +2041,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1931,7 +2077,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -1958,14 +2104,14 @@ "h": 1, "w": 24, "x": 0, - "y": 2 + "y": 55 }, "id": 28, "panels": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of call of all RPC.", "fieldConfig": { @@ -2014,8 +2160,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2051,7 +2196,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2071,7 +2216,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the error return of RPC.", "fieldConfig": { @@ -2120,8 +2265,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2157,7 +2301,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2177,7 +2321,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the error return of RPC with code.", "fieldConfig": { @@ -2226,8 +2370,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2263,7 +2406,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2283,7 +2426,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of call of all RPC within the time frame.", "fieldConfig": { @@ -2368,7 +2511,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2388,7 +2531,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of RPC calls within the time frame, aggregated by name.", "fieldConfig": { @@ -2473,7 +2616,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2493,7 +2636,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of call of RPC within the time frame, aggregated by address.", "fieldConfig": { @@ -2578,7 +2721,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2598,7 +2741,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the error return of RPC within the time frame within the time frame.", "fieldConfig": { @@ -2683,7 +2826,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2703,7 +2846,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the error return of RPC with code within the time frame within the time frame.", "fieldConfig": { @@ -2788,7 +2931,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2815,14 +2958,14 @@ "h": 1, "w": 24, "x": 0, - "y": 3 + "y": 56 }, "id": 25, "panels": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of HTTP requests.", "fieldConfig": { @@ -2871,8 +3014,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2908,7 +3050,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -2928,7 +3070,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of HTTP requests with status.", "fieldConfig": { @@ -2977,8 +3119,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3014,7 +3155,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3034,7 +3175,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of HTTP requests within the time frame.", "fieldConfig": { @@ -3083,8 +3224,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3120,7 +3260,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3140,7 +3280,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of HTTP requests with status within the time frame.", "fieldConfig": { @@ -3189,8 +3329,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3226,7 +3365,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3246,7 +3385,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the qps of HTTP.", "fieldConfig": { @@ -3347,7 +3486,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3374,14 +3513,14 @@ "h": 1, "w": 24, "x": 0, - "y": 4 + "y": 57 }, "id": 6, "panels": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", "fieldConfig": { @@ -3444,7 +3583,7 @@ "h": 11, "w": 12, "x": 0, - "y": 16 + "y": 5 }, "id": 5, "options": { @@ -3465,7 +3604,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3485,7 +3624,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the proportion of CPU runtime within 1 second. It is calculated as the average CPU runtime over 1 minute.", "fieldConfig": { @@ -3548,7 +3687,7 @@ "h": 11, "w": 12, "x": 12, - "y": 16 + "y": 5 }, "id": 4, "options": { @@ -3569,7 +3708,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3589,7 +3728,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of open file descriptors.", "fieldConfig": { @@ -3652,7 +3791,7 @@ "h": 11, "w": 12, "x": 0, - "y": 27 + "y": 16 }, "id": 7, "options": { @@ -3673,7 +3812,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3693,7 +3832,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of open file descriptors.", "fieldConfig": { @@ -3756,7 +3895,7 @@ "h": 11, "w": 12, "x": 12, - "y": 27 + "y": 16 }, "id": 8, "options": { @@ -3777,7 +3916,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3795,23 +3934,113 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "This metric represents the number of process virtual memory bytes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, "gridPos": { "h": 11, "w": 12, "x": 0, - "y": 38 + "y": 27 }, "id": 9, - "libraryPanel": { - "name": "Virtual Memory bytes", - "uid": "fdriqgnk5lnnke" + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } }, - "title": "Virtual Memory bytes" + "pluginVersion": "10.3.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n process_virtual_memory_bytes{job=~\"$rpcNameFilter\"},\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{job}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Virtual Memory bytes", + "type": "timeseries" }, { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of process virtual memory bytes.", "fieldConfig": { @@ -3857,7 +4086,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3873,7 +4103,7 @@ "h": 11, "w": 12, "x": 12, - "y": 38 + "y": 27 }, "id": 10, "options": { @@ -3894,7 +4124,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -3914,7 +4144,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of process resident memory bytes.", "fieldConfig": { @@ -3976,7 +4206,7 @@ "h": 11, "w": 12, "x": 0, - "y": 49 + "y": 38 }, "id": 11, "options": { @@ -3997,7 +4227,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4017,7 +4247,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of process resident memory bytes.", "fieldConfig": { @@ -4079,7 +4309,7 @@ "h": 11, "w": 12, "x": 12, - "y": 49 + "y": 38 }, "id": 12, "options": { @@ -4100,7 +4330,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4127,14 +4357,14 @@ "h": 1, "w": 24, "x": 0, - "y": 5 + "y": 58 }, "id": 3, "panels": [ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "Measures the frequency of garbage collection operations in the Go environment, averaged over the last five minutes.", "fieldConfig": { @@ -4183,8 +4413,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4220,7 +4449,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4240,7 +4469,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "Measures the frequency of garbage collection operations in the Go environment, averaged over the last five minutes.", "fieldConfig": { @@ -4285,8 +4514,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4322,7 +4550,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "label_replace(\r\n rate(go_gc_duration_seconds_count{job!~\"$rpcNameFilter\"}[5m]),\r\n \"job\",\r\n \"$1\",\r\n \"job\",\r\n \".*openim-(.*)\"\r\n)", @@ -4339,7 +4567,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of goroutines.", "fieldConfig": { @@ -4388,8 +4616,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4425,7 +4652,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4445,7 +4672,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of goroutines.", "fieldConfig": { @@ -4494,8 +4721,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4531,7 +4757,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4551,7 +4777,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes allocated and still in use.", "fieldConfig": { @@ -4600,8 +4826,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4637,7 +4862,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4657,7 +4882,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes allocated and still in use.", "fieldConfig": { @@ -4706,8 +4931,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4743,7 +4967,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4763,7 +4987,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes used by the profiling bucket hash table.", "fieldConfig": { @@ -4812,8 +5036,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4849,7 +5072,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4869,7 +5092,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes used by the profiling bucket hash table.", "fieldConfig": { @@ -4918,8 +5141,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4955,7 +5177,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -4975,7 +5197,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes in use by mcache structures.", "fieldConfig": { @@ -5024,8 +5246,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5061,7 +5282,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -5081,7 +5302,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "description": "This metric represents the number of bytes in use by mcache structures.", "fieldConfig": { @@ -5130,8 +5351,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5167,7 +5387,7 @@ { "datasource": { "type": "prometheus", - "uid": "f6b25a33-8915-4220-ad0b-2c4c60eb07ab" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "exemplar": false, @@ -5243,9 +5463,9 @@ }, { "current": { - "selected": true, - "text": "1h", - "value": "1h" + "selected": false, + "text": "5m", + "value": "5m" }, "description": "Global promQL time range.", "hide": 0, @@ -5260,7 +5480,7 @@ "value": "1m" }, { - "selected": false, + "selected": true, "text": "5m", "value": "5m" }, @@ -5270,7 +5490,7 @@ "value": "30m" }, { - "selected": true, + "selected": false, "text": "1h", "value": "1h" }, @@ -5351,6 +5571,6 @@ "timezone": "", "title": "Demo", "uid": "a506d250-b606-4702-86a7-ac6aa1d069a1", - "version": 22, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/config/kafka.yml b/config/kafka.yml index d412e1be06..fd06ae2bb4 100644 --- a/config/kafka.yml +++ b/config/kafka.yml @@ -3,34 +3,38 @@ username: '' # Password for authentication password: '' # Producer acknowledgment settings -producerAck: "" +producerAck: # Compression type to use (e.g., none, gzip, snappy) -compressType: "none" +compressType: none # List of Kafka broker addresses address: [ localhost:19094 ] # Kafka topic for Redis integration -toRedisTopic: "toRedis" +toRedisTopic: toRedis # Kafka topic for MongoDB integration -toMongoTopic: "toMongo" +toMongoTopic: toMongo # Kafka topic for push notifications -toPushTopic: "toPush" +toPushTopic: toPush +# Kafka topic for offline push notifications +toOfflinePushTopic: toOfflinePush # Consumer group ID for Redis topic toRedisGroupID: redis # Consumer group ID for MongoDB topic toMongoGroupID: mongo # Consumer group ID for push notifications topic toPushGroupID: push +# Consumer group ID for offline push notifications topic +toOfflinePushGroupID: offlinePush # TLS (Transport Layer Security) configuration tls: # Enable or disable TLS enableTLS: false # CA certificate file path - caCrt: "" + caCrt: # Client certificate file path - clientCrt: "" + clientCrt: # Client key file path - clientKey: "" + clientKey: # Client key password - clientKeyPwd: "" + clientKeyPwd: # Whether to skip TLS verification (not recommended for production) insecureSkipVerify: false diff --git a/config/minio.yml b/config/minio.yml index 11a9ace354..ad1a32a8c2 100644 --- a/config/minio.yml +++ b/config/minio.yml @@ -1,15 +1,15 @@ # Name of the bucket in MinIO -bucket: "openim" +bucket: openim # Access key ID for MinIO authentication -accessKeyID: "root" +accessKeyID: root # Secret access key for MinIO authentication -secretAccessKey: "openIM123" +secretAccessKey: openIM123 # Session token for MinIO authentication (optional) -sessionToken: '' +sessionToken: # Internal address of the MinIO server -internalAddress: "localhost:10005" +internalAddress: localhost:10005 # External address of the MinIO server, accessible from outside. Supports both HTTP and HTTPS using a domain name -externalAddress: "http://external_ip:10005" +externalAddress: http://external_ip:10005 # Flag to enable or disable public read access to the bucket publicRead: false diff --git a/config/mongodb.yml b/config/mongodb.yml index 98f5694e45..78f85992c9 100644 --- a/config/mongodb.yml +++ b/config/mongodb.yml @@ -1,5 +1,5 @@ # URI for database connection, leave empty if using address and credential settings directly -uri: '' +uri: # List of MongoDB server addresses address: [ localhost:37017 ] # Name of the database diff --git a/config/notification.yml b/config/notification.yml index 278376c244..85ca91af18 100644 --- a/config/notification.yml +++ b/config/notification.yml @@ -28,11 +28,11 @@ groupCreated: # Enables or disables offline push notifications. enable: false # Title for the notification when a group is created. - title: "create group title" + title: create group title # Description for the notification. - desc: "create group desc" + desc: create group desc # Additional information for the notification. - ext: "create group ext" + ext: create group ext groupInfoSet: isSendMsg: false @@ -40,9 +40,9 @@ groupInfoSet: unreadCount: false offlinePush: enable: false - title: "groupInfoSet title" - desc: "groupInfoSet desc" - ext: "groupInfoSet ext" + title: groupInfoSet title + desc: groupInfoSet desc + ext: groupInfoSet ext joinGroupApplication: @@ -51,9 +51,9 @@ joinGroupApplication: unreadCount: false offlinePush: enable: false - title: "joinGroupApplication title" - desc: "joinGroupApplication desc" - ext: "joinGroupApplication ext" + title: joinGroupApplication title + desc: joinGroupApplication desc + ext: joinGroupApplication ext memberQuit: isSendMsg: true @@ -61,9 +61,9 @@ memberQuit: unreadCount: false offlinePush: enable: false - title: "memberQuit title" - desc: "memberQuit desc" - ext: "memberQuit ext" + title: memberQuit title + desc: memberQuit desc + ext: memberQuit ext groupApplicationAccepted: isSendMsg: false @@ -71,9 +71,9 @@ groupApplicationAccepted: unreadCount: false offlinePush: enable: false - title: "groupApplicationAccepted title" - desc: "groupApplicationAccepted desc" - ext: "groupApplicationAccepted ext" + title: groupApplicationAccepted title + desc: groupApplicationAccepted desc + ext: groupApplicationAccepted ext groupApplicationRejected: isSendMsg: false @@ -81,9 +81,9 @@ groupApplicationRejected: unreadCount: false offlinePush: enable: false - title: "groupApplicationRejected title" - desc: "groupApplicationRejected desc" - ext: "groupApplicationRejected ext" + title: groupApplicationRejected title + desc: groupApplicationRejected desc + ext: groupApplicationRejected ext groupOwnerTransferred: @@ -92,9 +92,9 @@ groupOwnerTransferred: unreadCount: false offlinePush: enable: false - title: "groupOwnerTransferred title" - desc: "groupOwnerTransferred desc" - ext: "groupOwnerTransferred ext" + title: groupOwnerTransferred title + desc: groupOwnerTransferred desc + ext: groupOwnerTransferred ext memberKicked: isSendMsg: true @@ -102,9 +102,9 @@ memberKicked: unreadCount: false offlinePush: enable: false - title: "memberKicked title" - desc: "memberKicked desc" - ext: "memberKicked ext" + title: memberKicked title + desc: memberKicked desc + ext: memberKicked ext memberInvited: isSendMsg: true @@ -112,9 +112,9 @@ memberInvited: unreadCount: false offlinePush: enable: false - title: "memberInvited title" - desc: "memberInvited desc" - ext: "memberInvited ext" + title: memberInvited title + desc: memberInvited desc + ext: memberInvited ext memberEnter: isSendMsg: true @@ -122,9 +122,9 @@ memberEnter: unreadCount: false offlinePush: enable: false - title: "memberEnter title" - desc: "memberEnter desc" - ext: "memberEnter ext" + title: memberEnter title + desc: memberEnter desc + ext: memberEnter ext groupDismissed: isSendMsg: true @@ -132,9 +132,9 @@ groupDismissed: unreadCount: false offlinePush: enable: false - title: "groupDismissed title" - desc: "groupDismissed desc" - ext: "groupDismissed ext" + title: groupDismissed title + desc: groupDismissed desc + ext: groupDismissed ext groupMuted: isSendMsg: true @@ -142,9 +142,9 @@ groupMuted: unreadCount: false offlinePush: enable: false - title: "groupMuted title" - desc: "groupMuted desc" - ext: "groupMuted ext" + title: groupMuted title + desc: groupMuted desc + ext: groupMuted ext groupCancelMuted: isSendMsg: true @@ -152,11 +152,11 @@ groupCancelMuted: unreadCount: false offlinePush: enable: false - title: "groupCancelMuted title" - desc: "groupCancelMuted desc" - ext: "groupCancelMuted ext" + title: groupCancelMuted title + desc: groupCancelMuted desc + ext: groupCancelMuted ext defaultTips: - tips: "group Cancel Muted" + tips: group Cancel Muted groupMemberMuted: @@ -165,9 +165,9 @@ groupMemberMuted: unreadCount: false offlinePush: enable: false - title: "groupMemberMuted title" - desc: "groupMemberMuted desc" - ext: "groupMemberMuted ext" + title: groupMemberMuted title + desc: groupMemberMuted desc + ext: groupMemberMuted ext groupMemberCancelMuted: isSendMsg: true @@ -175,9 +175,9 @@ groupMemberCancelMuted: unreadCount: false offlinePush: enable: false - title: "groupMemberCancelMuted title" - desc: "groupMemberCancelMuted desc" - ext: "groupMemberCancelMuted ext" + title: groupMemberCancelMuted title + desc: groupMemberCancelMuted desc + ext: groupMemberCancelMuted ext groupMemberInfoSet: isSendMsg: false @@ -185,9 +185,9 @@ groupMemberInfoSet: unreadCount: false offlinePush: enable: false - title: "groupMemberInfoSet title" - desc: "groupMemberInfoSet desc" - ext: "groupMemberInfoSet ext" + title: groupMemberInfoSet title + desc: groupMemberInfoSet desc + ext: groupMemberInfoSet ext groupInfoSetAnnouncement: isSendMsg: true @@ -195,9 +195,9 @@ groupInfoSetAnnouncement: unreadCount: false offlinePush: enable: false - title: "groupInfoSetAnnouncement title" - desc: "groupInfoSetAnnouncement desc" - ext: "groupInfoSetAnnouncement ext" + title: groupInfoSetAnnouncement title + desc: groupInfoSetAnnouncement desc + ext: groupInfoSetAnnouncement ext groupInfoSetName: @@ -206,9 +206,9 @@ groupInfoSetName: unreadCount: false offlinePush: enable: false - title: "groupInfoSetName title" - desc: "groupInfoSetName desc" - ext: "groupInfoSetName ext" + title: groupInfoSetName title + desc: groupInfoSetName desc + ext: groupInfoSetName ext #############################friend################################# @@ -218,9 +218,9 @@ friendApplicationAdded: unreadCount: false offlinePush: enable: false - title: "Somebody applies to add you as a friend" - desc: "Somebody applies to add you as a friend" - ext: "Somebody applies to add you as a friend" + title: Somebody applies to add you as a friend + desc: Somebody applies to add you as a friend + ext: Somebody applies to add you as a friend friendApplicationApproved: isSendMsg: true @@ -228,9 +228,9 @@ friendApplicationApproved: unreadCount: false offlinePush: enable: true - title: "Someone applies to add your friend application" - desc: "Someone applies to add your friend application" - ext: "Someone applies to add your friend application" + title: Someone applies to add your friend application + desc: Someone applies to add your friend application + ext: Someone applies to add your friend application friendApplicationRejected: isSendMsg: false @@ -238,9 +238,9 @@ friendApplicationRejected: unreadCount: false offlinePush: enable: true - title: "Someone rejected your friend application" - desc: "Someone rejected your friend application" - ext: "Someone rejected your friend application" + title: Someone rejected your friend application + desc: Someone rejected your friend application + ext: Someone rejected your friend application friendAdded: isSendMsg: false @@ -248,9 +248,9 @@ friendAdded: unreadCount: false offlinePush: enable: true - title: "We have become friends" - desc: "We have become friends" - ext: "We have become friends" + title: We have become friends + desc: We have become friends + ext: We have become friends friendDeleted: isSendMsg: false @@ -258,9 +258,9 @@ friendDeleted: unreadCount: false offlinePush: enable: true - title: "deleted a friend" - desc: "deleted a friend" - ext: "deleted a friend" + title: deleted a friend + desc: deleted a friend + ext: deleted a friend friendRemarkSet: isSendMsg: false @@ -268,9 +268,9 @@ friendRemarkSet: unreadCount: false offlinePush: enable: true - title: "Your friend's profile has been changed" - desc: "Your friend's profile has been changed" - ext: "Your friend's profile has been changed" + title: Your friend's profile has been changed + desc: Your friend's profile has been changed + ext: Your friend's profile has been changed blackAdded: isSendMsg: false @@ -278,9 +278,9 @@ blackAdded: unreadCount: false offlinePush: enable: true - title: "blocked a user" - desc: "blocked a user" - ext: "blocked a user" + title: blocked a user + desc: blocked a user + ext: blocked a user blackDeleted: isSendMsg: false @@ -288,9 +288,9 @@ blackDeleted: unreadCount: false offlinePush: enable: true - title: "Remove a blocked user" - desc: "Remove a blocked user" - ext: "Remove a blocked user" + title: Remove a blocked user + desc: Remove a blocked user + ext: Remove a blocked user friendInfoUpdated: isSendMsg: false @@ -298,9 +298,9 @@ friendInfoUpdated: unreadCount: false offlinePush: enable: true - title: "friend info updated" - desc: "friend info updated" - ext: "friend info updated" + title: friend info updated + desc: friend info updated + ext: friend info updated #####################user######################### userInfoUpdated: @@ -309,9 +309,9 @@ userInfoUpdated: unreadCount: false offlinePush: enable: true - title: "Remove a blocked user" - desc: "Remove a blocked user" - ext: "Remove a blocked user" + title: Remove a blocked user + desc: Remove a blocked user + ext: Remove a blocked user userStatusChanged: isSendMsg: false @@ -319,9 +319,9 @@ userStatusChanged: unreadCount: false offlinePush: enable: false - title: "user status changed" - desc: "user status changed" - ext: "user status changed" + title: user status changed + desc: user status changed + ext: user status changed #####################conversation######################### conversationChanged: @@ -330,9 +330,9 @@ conversationChanged: unreadCount: false offlinePush: enable: true - title: "conversation changed" - desc: "conversation changed" - ext: "conversation changed" + title: conversation changed + desc: conversation changed + ext: conversation changed conversationSetPrivate: isSendMsg: true @@ -340,6 +340,6 @@ conversationSetPrivate: unreadCount: false offlinePush: enable: true - title: "burn after reading" - desc: "burn after reading" - ext: "burn after reading" + title: burn after reading + desc: burn after reading + ext: burn after reading diff --git a/config/openim-api.yml b/config/openim-api.yml index 78a688fcd6..4c38e1005b 100644 --- a/config/openim-api.yml +++ b/config/openim-api.yml @@ -3,11 +3,14 @@ api: listenIP: 0.0.0.0 # Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports ports: [ 10002 ] + # API compression level; 0: default compression, 1: best compression, 2: best speed, -1: no compression + compressionLevel: 0 + prometheus: # Whether to enable prometheus enable: true # Prometheus listening ports, must match the number of api.ports - ports: [ 20113 ] + ports: [ 12002 ] # This address can be accessed via a browser grafanaURL: http://127.0.0.1:13000/ diff --git a/config/openim-crontask.yml b/config/openim-crontask.yml index 3839104a44..c05bd2485f 100644 --- a/config/openim-crontask.yml +++ b/config/openim-crontask.yml @@ -1,3 +1,3 @@ -cronExecuteTime: "0 2 * * *" +cronExecuteTime: 0 2 * * * retainChatRecords: 365 fileExpireTime: 90 diff --git a/config/openim-msggateway.yml b/config/openim-msggateway.yml index 0c92d83278..5659c6f9b3 100644 --- a/config/openim-msggateway.yml +++ b/config/openim-msggateway.yml @@ -1,14 +1,14 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10140 ] + ports: [ 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20112 ] + ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ] # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 @@ -23,8 +23,4 @@ longConnSvr: # WebSocket connection handshake timeout in seconds websocketTimeout: 10 -# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time -multiLoginPolicy: 1 - - diff --git a/config/openim-msgtransfer.yml b/config/openim-msgtransfer.yml index 07a7dc1ab1..94ed073d86 100644 --- a/config/openim-msgtransfer.yml +++ b/config/openim-msgtransfer.yml @@ -3,4 +3,4 @@ prometheus: enable: true # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly # Because four instances have been launched, four ports need to be specified - ports: [ 20108, 20109, 20110, 20111 ] + ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035 ] diff --git a/config/openim-push.yml b/config/openim-push.yml index 9384008a04..4d2aaca6b0 100644 --- a/config/openim-push.yml +++ b/config/openim-push.yml @@ -1,46 +1,41 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10170 ] + ports: [ 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20107 ] + ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12182, 12183, 12184, 12185, 12186 ] maxConcurrentWorkers: 3 -#"Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified." -enable: "geTui" +#Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified. +enable: geTui geTui: - pushUrl: "https://restapi.getui.com/v2/$appId" - masterSecret: '' - appKey: '' - intent: '' - channelID: '' - channelName: '' + pushUrl: https://restapi.getui.com/v2/$appId + masterSecret: + appKey: + intent: + channelID: + channelName: fcm: # Prioritize using file paths. If the file path is empty, use URL - filePath: "" # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. - authURL: "" # Must start with https or http. + filePath: # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. + authURL: # Must start with https or http. jpns: - appKey: '' - masterSecret: '' - pushURL: '' - pushIntent: '' + appKey: + masterSecret: + pushURL: + pushIntent: # iOS system push sound and badge count iosPush: - pushSound: "xxx" + pushSound: xxx badgeCount: true production: false - - - - - - +fullUserCache: true diff --git a/config/openim-rpc-auth.yml b/config/openim-rpc-auth.yml index 2d861cd5ab..496803e43b 100644 --- a/config/openim-rpc-auth.yml +++ b/config/openim-rpc-auth.yml @@ -1,18 +1,17 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10160 ] + ports: [ 10200 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20106 ] + ports: [ 12200 ] tokenPolicy: # Token validity period, in days expire: 90 - diff --git a/config/openim-rpc-conversation.yml b/config/openim-rpc-conversation.yml index a094bfac10..3581d7e19e 100644 --- a/config/openim-rpc-conversation.yml +++ b/config/openim-rpc-conversation.yml @@ -1,13 +1,13 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10180 ] + ports: [ 10220 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20105 ] + ports: [ 12220 ] diff --git a/config/openim-rpc-friend.yml b/config/openim-rpc-friend.yml index 7b829f971c..3022c09f32 100644 --- a/config/openim-rpc-friend.yml +++ b/config/openim-rpc-friend.yml @@ -1,13 +1,13 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10120 ] + ports: [ 10240 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20104 ] + ports: [ 12240 ] diff --git a/config/openim-rpc-group.yml b/config/openim-rpc-group.yml index 78b44030e0..9a634d12ff 100644 --- a/config/openim-rpc-group.yml +++ b/config/openim-rpc-group.yml @@ -1,13 +1,16 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10150 ] + ports: [ 10260 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20103 ] + ports: [ 12260 ] + + +enableHistoryForNewMembers: true diff --git a/config/openim-rpc-msg.yml b/config/openim-rpc-msg.yml index 17ce26e9b2..82d6e2f539 100644 --- a/config/openim-rpc-msg.yml +++ b/config/openim-rpc-msg.yml @@ -1,20 +1,17 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10130 ] + ports: [ 10280 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20102 ] + ports: [ 12280 ] # Does sending messages require friend verification friendVerify: false - - - diff --git a/config/openim-rpc-third.yml b/config/openim-rpc-third.yml index 6fb60f47f8..d8f2d427f2 100644 --- a/config/openim-rpc-third.yml +++ b/config/openim-rpc-third.yml @@ -1,40 +1,40 @@ rpc: # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP - registerIP: '' + registerIP: # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP listenIP: 0.0.0.0 # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports - ports: [ 10190 ] + ports: [ 10300 ] prometheus: # Enable or disable Prometheus monitoring enable: true # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup - ports: [ 20101 ] + ports: [ 12300 ] object: # Use MinIO as object storage, or set to "cos", "oss", "kodo", "aws", while also configuring the corresponding settings - enable: "minio" + enable: minio cos: bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com - secretID: '' - secretKey: '' - sessionToken: '' + secretID: + secretKey: + sessionToken: publicRead: false oss: - endpoint: "https://oss-cn-chengdu.aliyuncs.com" - bucket: "demo-9999999" - bucketURL: "https://demo-9999999.oss-cn-chengdu.aliyuncs.com" - accessKeyID: '' - accessKeySecret: '' - sessionToken: '' + endpoint: https://oss-cn-chengdu.aliyuncs.com + bucket: demo-9999999 + bucketURL: https://demo-9999999.oss-cn-chengdu.aliyuncs.com + accessKeyID: + accessKeySecret: + sessionToken: publicRead: false kodo: - endpoint: "http://s3.cn-south-1.qiniucs.com" - bucket: "kodo-bucket-test" - bucketURL: "http://kodo-bucket-test-oetobfb.qiniudns.com" - accessKeyID: '' - accessKeySecret: '' - sessionToken: '' - publicRead: false \ No newline at end of file + endpoint: http://s3.cn-south-1.qiniucs.com + bucket: kodo-bucket-test + bucketURL: http://kodo-bucket-test-oetobfb.qiniudns.com + accessKeyID: + accessKeySecret: + sessionToken: + publicRead: false diff --git a/config/openim-rpc-user.yml b/config/openim-rpc-user.yml index cbfb55b6c7..798105472c 100644 --- a/config/openim-rpc-user.yml +++ b/config/openim-rpc-user.yml @@ -1,17 +1,13 @@ rpc: # API or other RPCs can access this RPC through this IP; if left blank, the internal network IP is obtained by default - registerIP: '' + registerIP: # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default listenIP: 0.0.0.0 # Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports - ports: [ 10110 ] + ports: [ 10320 ] prometheus: # Whether to enable prometheus enable: true # Prometheus listening ports, must be consistent with the number of rpc.ports - ports: [ 20100 ] - - - - + ports: [ 12320 ] diff --git a/config/prometheus.yml b/config/prometheus.yml index 5db41679f4..ab427ee828 100644 --- a/config/prometheus.yml +++ b/config/prometheus.yml @@ -8,76 +8,79 @@ global: alerting: alertmanagers: - static_configs: - - targets: ['internal_ip:19093'] + - targets: [internal_ip:19093] -# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +# Load rules once and periodically evaluate them according to the global evaluation_interval. rule_files: - - "instance-down-rules.yml" -# - "first_rules.yml" -# - "second_rules.yml" + - instance-down-rules.yml +# - first_rules.yml +# - second_rules.yml # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - # The job name is added as a label "job='job_name'"" to any timeseries scraped from this config. + # The job name is added as a label "job=job_name" to any timeseries scraped from this config. # Monitored information captured by prometheus # prometheus fetches application services - - job_name: 'node_exporter' + - job_name: node_exporter static_configs: - - targets: [ 'internal_ip:20114' ] - - job_name: 'openimserver-openim-api' + - targets: [ internal_ip:20500 ] + - job_name: openimserver-openim-api static_configs: - - targets: [ 'internal_ip:20113' ] + - targets: [ internal_ip:12002 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-msggateway' + namespace: default + - job_name: openimserver-openim-msggateway static_configs: - - targets: [ 'internal_ip:20112' ] + - targets: [ internal_ip:12140 ] +# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-msgtransfer' + namespace: default + - job_name: openimserver-openim-msgtransfer static_configs: - - targets: [ 'internal_ip:20111', 'internal_ip:20110', 'internal_ip:20109', 'internal_ip:20108' ] + - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ] +# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-push' + namespace: default + - job_name: openimserver-openim-push static_configs: - - targets: [ 'internal_ip:20107' ] + - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ] +# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185, internal_ip:12186 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-auth' + namespace: default + - job_name: openimserver-openim-rpc-auth static_configs: - - targets: [ 'internal_ip:20106' ] + - targets: [ internal_ip:12200 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-conversation' + namespace: default + - job_name: openimserver-openim-rpc-conversation static_configs: - - targets: [ 'internal_ip:20105' ] + - targets: [ internal_ip:12220 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-friend' + namespace: default + - job_name: openimserver-openim-rpc-friend static_configs: - - targets: [ 'internal_ip:20104' ] + - targets: [ internal_ip:12240 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-group' + namespace: default + - job_name: openimserver-openim-rpc-group static_configs: - - targets: [ 'internal_ip:20103' ] + - targets: [ internal_ip:12260 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-msg' + namespace: default + - job_name: openimserver-openim-rpc-msg static_configs: - - targets: [ 'internal_ip:20102' ] + - targets: [ internal_ip:12280 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-third' + namespace: default + - job_name: openimserver-openim-rpc-third static_configs: - - targets: [ 'internal_ip:20101' ] + - targets: [ internal_ip:12300 ] labels: - namespace: 'default' - - job_name: 'openimserver-openim-rpc-user' + namespace: default + - job_name: openimserver-openim-rpc-user static_configs: - - targets: [ 'internal_ip:20100' ] + - targets: [ internal_ip:12320 ] labels: - namespace: 'default' \ No newline at end of file + namespace: default \ No newline at end of file diff --git a/config/redis.yml b/config/redis.yml index 87abed0e1c..2448bcb5c6 100644 --- a/config/redis.yml +++ b/config/redis.yml @@ -1,6 +1,7 @@ address: [ localhost:16379 ] -username: '' +username: password: openIM123 clusterMode: false db: 0 maxRetry: 10 +poolSize: 100 diff --git a/config/share.yml b/config/share.yml index fc97b6a1ff..5f8521eaa9 100644 --- a/config/share.yml +++ b/config/share.yml @@ -10,5 +10,7 @@ rpcRegisterName: conversation: conversation third: third -imAdminUserID: [ "imAdmin" ] +imAdminUserID: [ imAdmin ] +# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time +multiLoginPolicy: 1 diff --git a/config/webhooks.yml b/config/webhooks.yml index 11a85ba0c4..854d2dc2cc 100644 --- a/config/webhooks.yml +++ b/config/webhooks.yml @@ -1,8 +1,18 @@ -url: "webhook://127.0.0.1:10008/callbackExample" +url: http://127.0.0.1:10006/callbackExample beforeSendSingleMsg: enable: false timeout: 5 failedContinue: true + # Only the contentType in allowedTypes will send the callback. + # Supports two formats: a single type or a range. The range is defined by the lower and upper bounds connected with a hyphen ("-"). + # e.g. allowedTypes: [1, 100, 200-500, 600-700] means that only contentType within the range + # {1, 100} βˆͺ [200, 500] βˆͺ [600, 700] will be allowed through the filter. + # If not set, all contentType messages will through this filter. + allowedTypes: [] + # Only the contentType not in deniedTypes will send the callback. + # Supports two formats, same as allowedTypes. + # If not set, all contentType messages will through this filter. + deniedTypes: [] beforeUpdateUserInfoEx: enable: false timeout: 5 @@ -16,17 +26,29 @@ afterSendSingleMsg: # Only the senID/recvID specified in attentionIds will send the callback # if not set, all user messages will be callback attentionIds: [] + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] beforeSendGroupMsg: enable: false timeout: 5 failedContinue: true + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] beforeMsgModify: enable: false timeout: 5 failedContinue: true + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] afterSendGroupMsg: enable: false timeout: 5 + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] afterUserOnline: enable: false timeout: 5 @@ -130,6 +152,13 @@ beforeSetGroupInfo: enable: false timeout: 5 failedContinue: true +afterSetGroupInfoEx: + enable: false + timeout: 5 +beforeSetGroupInfoEx: + enable: false + timeout: 5 + failedContinue: true afterRevokeMsg: enable: false timeout: 5 @@ -144,6 +173,9 @@ beforeAddFriendAgree: enable: false timeout: 5 failedContinue: true +afterAddFriendAgree: + enable: false + timeout: 5 afterDeleteFriend: enable: false timeout: 5 diff --git a/docker-compose.yml b/docker-compose.yml index 8cc1f24b2f..edac65b13e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,3 @@ -version: '3' - networks: openim: driver: bridge @@ -140,50 +138,49 @@ services: networks: - openim - prometheus: - image: ${PROMETHEUS_IMAGE} - container_name: prometheus - restart: always - volumes: - - ./config/prometheus.yml:/etc/prometheus/prometheus.yml - - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml - - ${DATA_DIR}/components/prometheus/data:/prometheus - command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - ports: - - "19091:9090" - networks: - - openim - - alertmanager: - image: ${ALERTMANAGER_IMAGE} - container_name: alertmanager - restart: always - volumes: - - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml - - ./config/email.tmpl:/etc/alertmanager/email.tmpl - ports: - - "19093:9093" - networks: - - openim - - grafana: - image: ${GRAFANA_IMAGE} - container_name: grafana - user: root - restart: always - environment: - - GF_SECURITY_ALLOW_EMBEDDING=true - - GF_SESSION_COOKIE_SAMESITE=none - - GF_SESSION_COOKIE_SECURE=true - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - ports: - - "13000:3000" - volumes: - - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana - networks: - - openim - +# prometheus: +# image: ${PROMETHEUS_IMAGE} +# container_name: prometheus +# restart: always +# volumes: +# - ./config/prometheus.yml:/etc/prometheus/prometheus.yml +# - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml +# - ${DATA_DIR}/components/prometheus/data:/prometheus +# command: +# - '--config.file=/etc/prometheus/prometheus.yml' +# - '--storage.tsdb.path=/prometheus' +# ports: +# - "19091:9090" +# networks: +# - openim +# +# alertmanager: +# image: ${ALERTMANAGER_IMAGE} +# container_name: alertmanager +# restart: always +# volumes: +# - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml +# - ./config/email.tmpl:/etc/alertmanager/email.tmpl +# ports: +# - "19093:9093" +# networks: +# - openim +# +# grafana: +# image: ${GRAFANA_IMAGE} +# container_name: grafana +# user: root +# restart: always +# environment: +# - GF_SECURITY_ALLOW_EMBEDDING=true +# - GF_SESSION_COOKIE_SAMESITE=none +# - GF_SESSION_COOKIE_SECURE=true +# - GF_AUTH_ANONYMOUS_ENABLED=true +# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin +# ports: +# - "13000:3000" +# volumes: +# - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana +# networks: +# - openim diff --git a/go.mod b/go.mod index fba1499fe1..09c626bc7d 100644 --- a/go.mod +++ b/go.mod @@ -3,24 +3,24 @@ module github.com/openimsdk/open-im-server/v3 go 1.21.2 require ( - firebase.google.com/go v3.13.0+incompatible + firebase.google.com/go/v4 v4.14.1 github.com/dtm-labs/rockscache v0.1.1 github.com/gin-gonic/gin v1.9.1 - github.com/go-playground/validator/v10 v10.18.0 + github.com/go-playground/validator/v10 v10.20.0 github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.69 - github.com/openimsdk/tools v0.0.49-alpha.55 + github.com/openimsdk/protocol v0.0.72 + github.com/openimsdk/tools v0.0.50-alpha.16 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 go.mongodb.org/mongo-driver v1.14.0 - google.golang.org/api v0.165.0 - google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/api v0.170.0 + google.golang.org/grpc v1.66.2 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 ) @@ -29,6 +29,7 @@ require github.com/google/uuid v1.6.0 require ( github.com/IBM/sarama v1.43.0 github.com/fatih/color v1.14.1 + github.com/gin-contrib/gzip v1.0.1 github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redismock/v9 v9.2.0 github.com/hashicorp/golang-lru/v2 v2.0.7 @@ -41,17 +42,18 @@ require ( github.com/spf13/viper v1.18.2 github.com/stathat/consistent v1.0.0 go.uber.org/automaxprocs v1.5.3 - golang.org/x/sync v0.6.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/sync v0.8.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/firestore v1.14.0 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - cloud.google.com/go/storage v1.36.0 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/firestore v1.15.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + cloud.google.com/go/storage v1.40.0 // indirect + github.com/MicahParks/keyfunc v1.9.0 // indirect github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect github.com/aws/aws-sdk-go-v2 v1.23.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect @@ -72,11 +74,12 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect github.com/aws/smithy-go v1.17.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chai2010/webp v1.1.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clbanning/mxj v1.8.4 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -100,7 +103,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -117,7 +120,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kelindar/simd v1.1.2 // indirect github.com/klauspost/compress v1.17.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/lithammer/shortuuid v3.0.0+incompatible // indirect @@ -132,7 +135,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -162,25 +165,24 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect go.etcd.io/etcd/client/v3 v3.5.13 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/arch v0.7.0 // indirect golang.org/x/image v0.15.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/appengine/v2 v2.0.2 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gorm.io/gorm v1.25.8 // indirect stathat.com/c/consistent v1.0.0 // indirect ) @@ -188,10 +190,10 @@ require ( require ( github.com/go-playground/locales v0.14.1 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/spf13/cobra v1.8.0 - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.27.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) diff --git a/go.sum b/go.sum index 1a8e1d76d8..00ecc7ed73 100644 --- a/go.sum +++ b/go.sum @@ -1,23 +1,23 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= -firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= -firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8= +cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +firebase.google.com/go/v4 v4.14.1 h1:4qiUETaFRWoFGE1XP5VbcEdtPX93Qs+8B/7KvP2825g= +firebase.google.com/go/v4 v4.14.1/go.mod h1:fgk2XshgNDEKaioKco+AouiegSI9oTWVqRaBdTTGBoM= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= +github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o= +github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= @@ -65,23 +65,21 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/webp v1.1.1 h1:jTRmEccAJ4MGrhFOrPMpNGIJ/eybIgwKpcACsrTEapk= -github.com/chai2010/webp v1.1.1/go.mod h1:0XVwvZWdjjdxpUEIf7b9g9VkHFnInUSYujwqTLEuldU= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -109,8 +107,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -123,6 +119,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gin-contrib/gzip v1.0.1 h1:HQ8ENHODeLY7a4g1Au/46Z92bdGFl74OhxcZble9WJE= +github.com/gin-contrib/gzip v1.0.1/go.mod h1:njt428fdUNRvjuJf16tZMYZ2Yl+WQB53X5wmhDwXvC4= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= @@ -146,8 +144,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= -github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= -github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw= @@ -159,6 +157,7 @@ github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MG github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -167,6 +166,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -175,8 +175,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -188,7 +186,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -205,8 +202,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= @@ -259,8 +256,9 @@ github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLA github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -290,8 +288,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -321,12 +319,12 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.69 h1:dVi8meSg8kmUzSH1XQab4MjihqKkkcCAmt1BYXPJuXo= -github.com/openimsdk/protocol v0.0.69/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= -github.com/openimsdk/tools v0.0.49-alpha.55 h1:KPgC53oqiwZYssLKljhtXbWXifMlTj2SSQEusj4Uf4k= -github.com/openimsdk/tools v0.0.49-alpha.55/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/openimsdk/protocol v0.0.72 h1:K+vslwaR7lDXyBzb07UuEQITaqsgighz7NyXVIWsu6A= +github.com/openimsdk/protocol v0.0.72/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= +github.com/openimsdk/tools v0.0.50-alpha.16 h1:bC1AQvJMuOHtZm8LZRvN8L5mH1Ws2VYdL+TLTs1iGSc= +github.com/openimsdk/tools v0.0.50-alpha.16/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -410,8 +408,8 @@ github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -435,18 +433,18 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -458,8 +456,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -467,8 +465,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= @@ -491,23 +489,24 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -522,8 +521,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -536,8 +535,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -553,30 +552,30 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.165.0 h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc= -google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/appengine/v2 v2.0.2 h1:MSqyWy2shDLwG7chbwBJ5uMyw6SNqJzhJHNDwYB0Akk= +google.golang.org/appengine/v2 v2.0.2/go.mod h1:PkgRUWz4o1XOvbqtWTkBtCitEJ5Tp4HoVEdMMYQR/8E= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -586,10 +585,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -610,6 +607,7 @@ gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo= gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c= stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= diff --git a/internal/api/auth.go b/internal/api/auth.go index f0790ce984..f41b530bf1 100644 --- a/internal/api/auth.go +++ b/internal/api/auth.go @@ -27,8 +27,8 @@ func NewAuthApi(client rpcclient.Auth) AuthApi { return AuthApi(client) } -func (o *AuthApi) UserToken(c *gin.Context) { - a2r.Call(auth.AuthClient.UserToken, o.Client, c) +func (o *AuthApi) GetAdminToken(c *gin.Context) { + a2r.Call(auth.AuthClient.GetAdminToken, o.Client, c) } func (o *AuthApi) GetUserToken(c *gin.Context) { diff --git a/internal/api/conversation.go b/internal/api/conversation.go index 360313ea87..8e3a3ca82d 100644 --- a/internal/api/conversation.go +++ b/internal/api/conversation.go @@ -62,3 +62,11 @@ func (o *ConversationApi) GetIncrementalConversation(c *gin.Context) { func (o *ConversationApi) GetOwnerConversation(c *gin.Context) { a2r.Call(conversation.ConversationClient.GetOwnerConversation, o.Client, c) } + +func (o *ConversationApi) GetNotNotifyConversationIDs(c *gin.Context) { + a2r.Call(conversation.ConversationClient.GetNotNotifyConversationIDs, o.Client, c) +} + +func (o *ConversationApi) GetPinnedConversationIDs(c *gin.Context) { + a2r.Call(conversation.ConversationClient.GetPinnedConversationIDs, o.Client, c) +} diff --git a/internal/api/friend.go b/internal/api/friend.go index f9f15fb246..d000cccddb 100644 --- a/internal/api/friend.go +++ b/internal/api/friend.go @@ -72,6 +72,10 @@ func (o *FriendApi) GetPaginationBlacks(c *gin.Context) { a2r.Call(relation.FriendClient.GetPaginationBlacks, o.Client, c) } +func (o *FriendApi) GetSpecifiedBlacks(c *gin.Context) { + a2r.Call(relation.FriendClient.GetSpecifiedBlacks, o.Client, c) +} + func (o *FriendApi) RemoveBlack(c *gin.Context) { a2r.Call(relation.FriendClient.RemoveBlack, o.Client, c) } diff --git a/internal/api/group.go b/internal/api/group.go index bff0089748..9c35da7081 100644 --- a/internal/api/group.go +++ b/internal/api/group.go @@ -35,6 +35,10 @@ func (o *GroupApi) SetGroupInfo(c *gin.Context) { a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c) } +func (o *GroupApi) SetGroupInfoEx(c *gin.Context) { + a2r.Call(group.GroupClient.SetGroupInfoEx, o.Client, c) +} + func (o *GroupApi) JoinGroup(c *gin.Context) { a2r.Call(group.GroupClient.JoinGroup, o.Client, c) } @@ -63,6 +67,10 @@ func (o *GroupApi) GetGroupUsersReqApplicationList(c *gin.Context) { a2r.Call(group.GroupClient.GetGroupUsersReqApplicationList, o.Client, c) } +func (o *GroupApi) GetSpecifiedUserGroupRequestInfo(c *gin.Context) { + a2r.Call(group.GroupClient.GetSpecifiedUserGroupRequestInfo, o.Client, c) +} + func (o *GroupApi) GetGroupsInfo(c *gin.Context) { a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c) //a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupsInfo)) diff --git a/internal/api/jssdk/jssdk.go b/internal/api/jssdk/jssdk.go new file mode 100644 index 0000000000..7f136c74ce --- /dev/null +++ b/internal/api/jssdk/jssdk.go @@ -0,0 +1,204 @@ +package jssdk + +import ( + "github.com/gin-gonic/gin" + "github.com/openimsdk/protocol/conversation" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/a2r" + "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/tools/utils/datautil" + "sort" +) + +const ( + maxGetActiveConversation = 500 + defaultGetActiveConversation = 100 +) + +func NewJSSdkApi(msg msg.MsgClient, conv conversation.ConversationClient) *JSSdk { + return &JSSdk{ + msg: msg, + conv: conv, + } +} + +type JSSdk struct { + msg msg.MsgClient + conv conversation.ConversationClient +} + +func (x *JSSdk) GetActiveConversations(c *gin.Context) { + call(c, x.getActiveConversations) +} + +func (x *JSSdk) GetConversations(c *gin.Context) { + call(c, x.getConversations) +} + +func (x *JSSdk) getActiveConversations(ctx *gin.Context) (*ConversationsResp, error) { + req, err := a2r.ParseRequest[ActiveConversationsReq](ctx) + if err != nil { + return nil, err + } + if req.Count <= 0 || req.Count > maxGetActiveConversation { + req.Count = defaultGetActiveConversation + } + opUserID := mcontext.GetOpUserID(ctx) + conversationIDs, err := field(ctx, x.conv.GetConversationIDs, + &conversation.GetConversationIDsReq{UserID: opUserID}, (*conversation.GetConversationIDsResp).GetConversationIDs) + if err != nil { + return nil, err + } + if len(conversationIDs) == 0 { + return &ConversationsResp{}, nil + } + readSeq, err := field(ctx, x.msg.GetHasReadSeqs, + &msg.GetHasReadSeqsReq{UserID: opUserID, ConversationIDs: conversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + if err != nil { + return nil, err + } + activeConversation, err := field(ctx, x.msg.GetActiveConversation, + &msg.GetActiveConversationReq{ConversationIDs: conversationIDs}, (*msg.GetActiveConversationResp).GetConversations) + if err != nil { + return nil, err + } + if len(activeConversation) == 0 { + return &ConversationsResp{}, nil + } + sortConversations := sortActiveConversations{ + Conversation: activeConversation, + } + if len(activeConversation) > 1 { + pinnedConversationIDs, err := field(ctx, x.conv.GetPinnedConversationIDs, + &conversation.GetPinnedConversationIDsReq{UserID: opUserID}, (*conversation.GetPinnedConversationIDsResp).GetConversationIDs) + if err != nil { + return nil, err + } + sortConversations.PinnedConversationIDs = datautil.SliceSet(pinnedConversationIDs) + } + sort.Sort(&sortConversations) + sortList := sortConversations.Top(req.Count) + conversations, err := field(ctx, x.conv.GetConversations, + &conversation.GetConversationsReq{ + OwnerUserID: opUserID, + ConversationIDs: datautil.Slice(sortList, func(c *msg.ActiveConversation) string { + return c.ConversationID + })}, (*conversation.GetConversationsResp).GetConversations) + if err != nil { + return nil, err + } + msgs, err := field(ctx, x.msg.GetSeqMessage, + &msg.GetSeqMessageReq{ + UserID: opUserID, + Conversations: datautil.Slice(sortList, func(c *msg.ActiveConversation) *msg.ConversationSeqs { + return &msg.ConversationSeqs{ + ConversationID: c.ConversationID, + Seqs: []int64{c.MaxSeq}, + } + }), + }, (*msg.GetSeqMessageResp).GetMsgs) + if err != nil { + return nil, err + } + conversationMap := datautil.SliceToMap(conversations, func(c *conversation.Conversation) string { + return c.ConversationID + }) + resp := make([]ConversationMsg, 0, len(sortList)) + for _, c := range sortList { + conv, ok := conversationMap[c.ConversationID] + if !ok { + continue + } + var lastMsg *sdkws.MsgData + if msgList, ok := msgs[c.ConversationID]; ok && len(msgList.Msgs) > 0 { + lastMsg = msgList.Msgs[0] + } + resp = append(resp, ConversationMsg{ + Conversation: conv, + LastMsg: lastMsg, + MaxSeq: c.MaxSeq, + ReadSeq: readSeq[c.ConversationID], + }) + } + var unreadCount int64 + for _, c := range activeConversation { + count := c.MaxSeq - readSeq[c.ConversationID] + if count > 0 { + unreadCount += count + } + } + return &ConversationsResp{ + Conversations: resp, + UnreadCount: unreadCount, + }, nil +} + +func (x *JSSdk) getConversations(ctx *gin.Context) (*ConversationsResp, error) { + req, err := a2r.ParseRequest[conversation.GetConversationsReq](ctx) + if err != nil { + return nil, err + } + req.OwnerUserID = mcontext.GetOpUserID(ctx) + conversations, err := field(ctx, x.conv.GetConversations, req, (*conversation.GetConversationsResp).GetConversations) + if err != nil { + return nil, err + } + if len(conversations) == 0 { + return &ConversationsResp{}, nil + } + req.ConversationIDs = datautil.Slice(conversations, func(c *conversation.Conversation) string { + return c.ConversationID + }) + maxSeqs, err := field(ctx, x.msg.GetMaxSeqs, + &msg.GetMaxSeqsReq{ConversationIDs: req.ConversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + if err != nil { + return nil, err + } + readSeqs, err := field(ctx, x.msg.GetHasReadSeqs, + &msg.GetHasReadSeqsReq{UserID: req.OwnerUserID, ConversationIDs: req.ConversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + if err != nil { + return nil, err + } + conversationSeqs := make([]*msg.ConversationSeqs, 0, len(conversations)) + for _, c := range conversations { + if seq := maxSeqs[c.ConversationID]; seq > 0 { + conversationSeqs = append(conversationSeqs, &msg.ConversationSeqs{ + ConversationID: c.ConversationID, + Seqs: []int64{seq}, + }) + } + } + var msgs map[string]*sdkws.PullMsgs + if len(conversationSeqs) > 0 { + msgs, err = field(ctx, x.msg.GetSeqMessage, + &msg.GetSeqMessageReq{UserID: req.OwnerUserID, Conversations: conversationSeqs}, (*msg.GetSeqMessageResp).GetMsgs) + if err != nil { + return nil, err + } + } + resp := make([]ConversationMsg, 0, len(conversations)) + for _, c := range conversations { + var lastMsg *sdkws.MsgData + if msgList, ok := msgs[c.ConversationID]; ok && len(msgList.Msgs) > 0 { + lastMsg = msgList.Msgs[0] + } + resp = append(resp, ConversationMsg{ + Conversation: c, + LastMsg: lastMsg, + MaxSeq: maxSeqs[c.ConversationID], + ReadSeq: readSeqs[c.ConversationID], + }) + } + var unreadCount int64 + for conversationID, maxSeq := range maxSeqs { + count := maxSeq - readSeqs[conversationID] + if count > 0 { + unreadCount += count + } + } + return &ConversationsResp{ + Conversations: resp, + UnreadCount: unreadCount, + }, nil +} diff --git a/internal/api/jssdk/sort.go b/internal/api/jssdk/sort.go new file mode 100644 index 0000000000..f5fd041480 --- /dev/null +++ b/internal/api/jssdk/sort.go @@ -0,0 +1,33 @@ +package jssdk + +import "github.com/openimsdk/protocol/msg" + +type sortActiveConversations struct { + Conversation []*msg.ActiveConversation + PinnedConversationIDs map[string]struct{} +} + +func (s sortActiveConversations) Top(limit int) []*msg.ActiveConversation { + if limit > 0 && len(s.Conversation) > limit { + return s.Conversation[:limit] + } + return s.Conversation +} + +func (s sortActiveConversations) Len() int { + return len(s.Conversation) +} + +func (s sortActiveConversations) Less(i, j int) bool { + iv, jv := s.Conversation[i], s.Conversation[j] + _, ip := s.PinnedConversationIDs[iv.ConversationID] + _, jp := s.PinnedConversationIDs[jv.ConversationID] + if ip != jp { + return ip + } + return iv.LastTime > jv.LastTime +} + +func (s sortActiveConversations) Swap(i, j int) { + s.Conversation[i], s.Conversation[j] = s.Conversation[j], s.Conversation[i] +} diff --git a/internal/api/jssdk/stu.go b/internal/api/jssdk/stu.go new file mode 100644 index 0000000000..2f63975b3b --- /dev/null +++ b/internal/api/jssdk/stu.go @@ -0,0 +1,22 @@ +package jssdk + +import ( + "github.com/openimsdk/protocol/conversation" + "github.com/openimsdk/protocol/sdkws" +) + +type ActiveConversationsReq struct { + Count int `json:"count"` +} + +type ConversationMsg struct { + Conversation *conversation.Conversation `json:"conversation"` + LastMsg *sdkws.MsgData `json:"lastMsg"` + MaxSeq int64 `json:"maxSeq"` + ReadSeq int64 `json:"readSeq"` +} + +type ConversationsResp struct { + UnreadCount int64 `json:"unreadCount"` + Conversations []ConversationMsg `json:"conversations"` +} diff --git a/internal/api/jssdk/tools.go b/internal/api/jssdk/tools.go new file mode 100644 index 0000000000..c57457d9f4 --- /dev/null +++ b/internal/api/jssdk/tools.go @@ -0,0 +1,26 @@ +package jssdk + +import ( + "context" + "github.com/gin-gonic/gin" + "github.com/openimsdk/tools/apiresp" + "google.golang.org/grpc" +) + +func field[A, B, C any](ctx context.Context, fn func(ctx context.Context, req *A, opts ...grpc.CallOption) (*B, error), req *A, get func(*B) C) (C, error) { + resp, err := fn(ctx, req) + if err != nil { + var c C + return c, err + } + return get(resp), nil +} + +func call[R any](c *gin.Context, fn func(ctx *gin.Context) (R, error)) { + resp, err := fn(c) + if err != nil { + apiresp.GinError(c, err) + return + } + apiresp.GinSuccess(c, resp) +} diff --git a/internal/api/jssdk_test.go b/internal/api/jssdk_test.go new file mode 100644 index 0000000000..472ca56b57 --- /dev/null +++ b/internal/api/jssdk_test.go @@ -0,0 +1,37 @@ +package api + +import ( + "github.com/openimsdk/protocol/msg" + "sort" + "testing" +) + +func TestName(t *testing.T) { + val := sortActiveConversations{ + Conversation: []*msg.ActiveConversation{ + { + ConversationID: "100", + LastTime: 100, + }, + { + ConversationID: "200", + LastTime: 200, + }, + { + ConversationID: "300", + LastTime: 300, + }, + { + ConversationID: "400", + LastTime: 400, + }, + }, + //PinnedConversationIDs: map[string]struct{}{ + // "100": {}, + // "300": {}, + //}, + } + sort.Sort(&val) + t.Log(val) + +} diff --git a/internal/api/msg.go b/internal/api/msg.go index ba63fbb66f..bf7cb83a43 100644 --- a/internal/api/msg.go +++ b/internal/api/msg.go @@ -49,14 +49,14 @@ func NewMessageApi(msgRpcClient *rpcclient.Message, userRpcClient *rpcclient.Use userRpcClient: rpcclient.NewUserRpcClientByUser(userRpcClient), imAdminUserID: imAdminUserID} } -func (MessageApi) SetOptions(options map[string]bool, value bool) { +func (*MessageApi) SetOptions(options map[string]bool, value bool) { datautil.SetSwitchFromOptions(options, constant.IsHistory, value) datautil.SetSwitchFromOptions(options, constant.IsPersistent, value) datautil.SetSwitchFromOptions(options, constant.IsSenderSync, value) datautil.SetSwitchFromOptions(options, constant.IsConversationUpdate, value) } -func (m MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq { +func (m *MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg) *msg.SendMsgReq { var newContent string options := make(map[string]bool, 5) switch params.ContentType { @@ -231,7 +231,7 @@ func (m *MessageApi) SendMessage(c *gin.Context) { } // Set the status to successful if the message is sent. - var status int = constant.MsgSendSuccessed + var status = constant.MsgSendSuccessed // Attempt to update the message sending status in the system. _, err = m.Client.SetSendMsgStatus(c, &msg.SetSendMsgStatusReq{ diff --git a/internal/api/router.go b/internal/api/router.go index 0667c3e751..f87ec526c7 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -2,12 +2,20 @@ package api import ( "fmt" + + "github.com/openimsdk/open-im-server/v3/internal/api/jssdk" + + "github.com/gin-contrib/gzip" + "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" "github.com/go-playground/validator/v10" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "net/http" + "strings" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" @@ -16,8 +24,13 @@ import ( "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mw" - "net/http" - "strings" +) + +const ( + NoCompression = -1 + DefaultCompression = 0 + BestCompression = 1 + BestSpeed = 2 ) func prommetricsGin() gin.HandlerFunc { @@ -52,10 +65,19 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation) authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL) - + switch config.API.Api.CompressionLevel { + case NoCompression: + case DefaultCompression: + r.Use(gzip.Gzip(gzip.DefaultCompression)) + case BestCompression: + r.Use(gzip.Gzip(gzip.BestCompression)) + case BestSpeed: + r.Use(gzip.Gzip(gzip.BestSpeed)) + } r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) u := NewUserApi(*userRpc) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) + j := jssdk.NewJSSdkApi(messageRpc.Client, conversationRpc.Client) userRouterGroup := r.Group("/user") { userRouterGroup.POST("/user_register", u.UserRegister) @@ -97,6 +119,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En friendRouterGroup.POST("/set_friend_remark", f.SetFriendRemark) friendRouterGroup.POST("/add_black", f.AddBlack) friendRouterGroup.POST("/get_black_list", f.GetPaginationBlacks) + friendRouterGroup.POST("/get_specified_blacks", f.GetSpecifiedBlacks) friendRouterGroup.POST("/remove_black", f.RemoveBlack) friendRouterGroup.POST("/get_incremental_blacks", f.GetIncrementalBlacks) friendRouterGroup.POST("/import_friend", f.ImportFriends) @@ -112,6 +135,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En { groupRouterGroup.POST("/create_group", g.CreateGroup) groupRouterGroup.POST("/set_group_info", g.SetGroupInfo) + groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEx) groupRouterGroup.POST("/join_group", g.JoinGroup) groupRouterGroup.POST("/quit_group", g.QuitGroup) groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse) @@ -119,6 +143,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En groupRouterGroup.POST("/get_recv_group_applicationList", g.GetRecvGroupApplicationList) groupRouterGroup.POST("/get_user_req_group_applicationList", g.GetUserReqGroupApplicationList) groupRouterGroup.POST("/get_group_users_req_application_list", g.GetGroupUsersReqApplicationList) + groupRouterGroup.POST("/get_specified_user_group_request_info", g.GetSpecifiedUserGroupRequestInfo) groupRouterGroup.POST("/get_groups_info", g.GetGroupsInfo) groupRouterGroup.POST("/kick_group", g.KickGroupMember) groupRouterGroup.POST("/get_group_members_info", g.GetGroupMembersInfo) @@ -144,7 +169,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En authRouterGroup := r.Group("/auth") { a := NewAuthApi(*authRpc) - authRouterGroup.POST("/user_token", a.UserToken) + authRouterGroup.POST("/get_admin_token", a.GetAdminToken) authRouterGroup.POST("/get_user_token", a.GetUserToken) authRouterGroup.POST("/parse_token", a.ParseToken) authRouterGroup.POST("/force_logout", a.ForceLogout) @@ -211,6 +236,8 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En conversationGroup.POST("/get_full_conversation_ids", c.GetFullOwnerConversationIDs) conversationGroup.POST("/get_incremental_conversations", c.GetIncrementalConversation) conversationGroup.POST("/get_owner_conversation", c.GetOwnerConversation) + conversationGroup.POST("/get_not_notify_conversation_ids", c.GetNotNotifyConversationIDs) + conversationGroup.POST("/get_pinned_conversation_ids", c.GetPinnedConversationIDs) } statisticsGroup := r.Group("/statistics") @@ -220,6 +247,11 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En statisticsGroup.POST("/group/create", g.GroupCreateCount) statisticsGroup.POST("/group/active", m.GetActiveGroup) } + + jssdk := r.Group("/jssdk") + jssdk.POST("/get_conversations", j.GetConversations) + jssdk.POST("/get_active_conversations", j.GetActiveConversations) + return r } @@ -256,7 +288,6 @@ func GinParseToken(authRPC *rpcclient.Auth) gin.HandlerFunc { // Whitelist api not parse token var Whitelist = []string{ - "/user/user_register", - "/auth/user_token", + "/auth/get_admin_token", "/auth/parse_token", } diff --git a/internal/api/user.go b/internal/api/user.go index d48111b9eb..b499f71dc2 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -36,9 +36,11 @@ func (u *UserApi) UserRegister(c *gin.Context) { a2r.Call(user.UserClient.UserRegister, u.Client, c) } +// UpdateUserInfo is deprecated. Use UpdateUserInfoEx func (u *UserApi) UpdateUserInfo(c *gin.Context) { a2r.Call(user.UserClient.UpdateUserInfo, u.Client, c) } + func (u *UserApi) UpdateUserInfoEx(c *gin.Context) { a2r.Call(user.UserClient.UpdateUserInfoEx, u.Client, c) } @@ -105,14 +107,14 @@ func (u *UserApi) GetUsersOnlineStatus(c *gin.Context) { if v2.UserID == v1 { flag = true res.UserID = v1 - res.Status = constant.OnlineStatus + res.Status = constant.Online res.DetailPlatformStatus = append(res.DetailPlatformStatus, v2.DetailPlatformStatus...) break } } if !flag { res.UserID = v1 - res.Status = constant.OfflineStatus + res.Status = constant.Offline } respResult = append(respResult, res) } @@ -151,26 +153,26 @@ func (u *UserApi) GetUsersOnlineTokenDetail(c *gin.Context) { } for _, v1 := range req.UserIDs { - m := make(map[string][]string, 10) + m := make(map[int32][]string, 10) flag = false temp := new(msggateway.SingleDetail) for _, v2 := range wsResult { if v2.UserID == v1 { flag = true temp.UserID = v1 - temp.Status = constant.OnlineStatus + temp.Status = constant.Online for _, status := range v2.DetailPlatformStatus { - if v, ok := m[status.Platform]; ok { - m[status.Platform] = append(v, status.Token) + if v, ok := m[status.PlatformID]; ok { + m[status.PlatformID] = append(v, status.Token) } else { - m[status.Platform] = []string{status.Token} + m[status.PlatformID] = []string{status.Token} } } } } for p, tokens := range m { t := new(msggateway.SinglePlatformToken) - t.Platform = p + t.PlatformID = p t.Token = tokens t.Total = int32(len(tokens)) temp.SinglePlatformToken = append(temp.SinglePlatformToken, t) diff --git a/internal/msggateway/client.go b/internal/msggateway/client.go index a4902570a6..bc06fa9507 100644 --- a/internal/msggateway/client.go +++ b/internal/msggateway/client.go @@ -22,6 +22,8 @@ import ( "sync/atomic" "time" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" @@ -30,7 +32,6 @@ import ( "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/stringutil" - "google.golang.org/protobuf/proto" ) var ( @@ -220,6 +221,10 @@ func (c *Client) handleMessage(message []byte) error { resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq) case WSPullMsgBySeqList: resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq) + case WSPullMsg: + resp, messageErr = c.longConnServer.GetSeqMessage(ctx, binaryReq) + case WSGetConvMaxReadSeq: + resp, messageErr = c.longConnServer.GetConversationsHasReadAndMaxSeq(ctx, binaryReq) case WsLogoutMsg: resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) case WsSetBackgroundStatus: @@ -271,11 +276,13 @@ func (c *Client) replyMessage(ctx context.Context, binaryReq *Req, err error, re ErrMsg: errResp.ErrMsg, Data: resp, } + t := time.Now() log.ZDebug(ctx, "gateway reply message", "resp", mReply.String()) err = c.writeBinaryMsg(mReply) if err != nil { log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String()) } + log.ZDebug(ctx, "wireBinaryMsg end", "time cost", time.Since(t)) if binaryReq.ReqIdentifier == WsLogoutMsg { return errs.New("user logout", "operationID", binaryReq.OperationID).Wrap() diff --git a/internal/msggateway/constant.go b/internal/msggateway/constant.go index dc5ad77861..584cebe1e1 100644 --- a/internal/msggateway/constant.go +++ b/internal/msggateway/constant.go @@ -39,6 +39,8 @@ const ( WSPullMsgBySeqList = 1002 WSSendMsg = 1003 WSSendSignalMsg = 1004 + WSPullMsg = 1005 + WSGetConvMaxReadSeq = 1006 WSPushMsg = 2001 WSKickOnlineMsg = 2002 WsLogoutMsg = 2003 diff --git a/internal/msggateway/context.go b/internal/msggateway/context.go index 6c80ece1ba..3909766b1b 100644 --- a/internal/msggateway/context.go +++ b/internal/msggateway/context.go @@ -66,12 +66,16 @@ func (c *UserConnContext) Value(key any) any { } func newContext(respWriter http.ResponseWriter, req *http.Request) *UserConnContext { + remoteAddr := req.RemoteAddr + if forwarded := req.Header.Get("X-Forwarded-For"); forwarded != "" { + remoteAddr += "_" + forwarded + } return &UserConnContext{ RespWriter: respWriter, Req: req, Path: req.URL.Path, Method: req.Method, - RemoteAddr: req.RemoteAddr, + RemoteAddr: remoteAddr, ConnID: encrypt.Md5(req.RemoteAddr + "_" + strconv.Itoa(int(timeutil.GetCurrentTimestampByMill()))), } } diff --git a/internal/msggateway/hub_server.go b/internal/msggateway/hub_server.go index 28c227162a..e96ab4b0dc 100644 --- a/internal/msggateway/hub_server.go +++ b/internal/msggateway/hub_server.go @@ -16,6 +16,8 @@ package msggateway import ( "context" + "sync/atomic" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc" @@ -30,7 +32,6 @@ import ( "github.com/openimsdk/tools/mq/memamq" "github.com/openimsdk/tools/utils/datautil" "google.golang.org/grpc" - "sync/atomic" ) func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error { @@ -111,15 +112,14 @@ func (s *Server) GetUsersOnlineStatus( } ps := new(msggateway.GetUsersOnlineStatusResp_SuccessDetail) - ps.Platform = constant.PlatformIDToName(client.PlatformID) - ps.Status = constant.OnlineStatus + ps.PlatformID = int32(client.PlatformID) ps.ConnID = client.ctx.GetConnID() ps.Token = client.token ps.IsBackground = client.IsBackground - uresp.Status = constant.OnlineStatus + uresp.Status = constant.Online uresp.DetailPlatformStatus = append(uresp.DetailPlatformStatus, ps) } - if uresp.Status == constant.OnlineStatus { + if uresp.Status == constant.Online { resp.SuccessResult = append(resp.SuccessResult, uresp) } } diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index 44e79e4122..50da060976 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error { ) hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { - longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges) + longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges) return nil }) diff --git a/internal/msggateway/message_handler.go b/internal/msggateway/message_handler.go index 8a11e6ab3c..4b78c10048 100644 --- a/internal/msggateway/message_handler.go +++ b/internal/msggateway/message_handler.go @@ -19,6 +19,8 @@ import ( "sync" "github.com/go-playground/validator/v10" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/msg" @@ -27,7 +29,6 @@ import ( "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/utils/jsonutil" - "google.golang.org/protobuf/proto" ) type Req struct { @@ -94,6 +95,8 @@ type MessageHandler interface { SendMessage(context context.Context, data *Req) ([]byte, error) SendSignalMessage(context context.Context, data *Req) ([]byte, error) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) + GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) + GetSeqMessage(context context.Context, data *Req) ([]byte, error) UserLogout(context context.Context, data *Req) ([]byte, error) SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error) } @@ -175,7 +178,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]by func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) { req := sdkws.PullMessageBySeqsReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { - return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") + return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") } if err := g.validate.Struct(data); err != nil { return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq") @@ -191,6 +194,44 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([ return c, nil } +func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) { + req := msg.GetConversationsHasReadAndMaxSeqReq{} + if err := proto.Unmarshal(data.Data, &req); err != nil { + return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "GetConversationsHasReadAndMaxSeq") + } + if err := g.validate.Struct(data); err != nil { + return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetConversationsHasReadAndMaxSeq") + } + resp, err := g.msgRpcClient.GetConversationsHasReadAndMaxSeq(context, &req) + if err != nil { + return nil, err + } + c, err := proto.Marshal(resp) + if err != nil { + return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetConversationsHasReadAndMaxSeq") + } + return c, nil +} + +func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, error) { + req := msg.GetSeqMessageReq{} + if err := proto.Unmarshal(data.Data, &req); err != nil { + return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "GetSeqMessage") + } + if err := g.validate.Struct(data); err != nil { + return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetSeqMessage") + } + resp, err := g.msgRpcClient.GetSeqMessage(context, &req) + if err != nil { + return nil, err + } + c, err := proto.Marshal(resp) + if err != nil { + return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetSeqMessage") + } + return c, nil +} + func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) { req := push.DelUserPushTokenReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { diff --git a/internal/msggateway/ws_server.go b/internal/msggateway/ws_server.go index 537b8c5f0a..7df2974885 100644 --- a/internal/msggateway/ws_server.go +++ b/internal/msggateway/ws_server.go @@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) { if clientOK { ws.clients.Set(client.UserID, client) // There is already a connection to the platform - log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID", + log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID", client.PlatformID, "old remote addr", getRemoteAdders(oldClients)) ws.onlineUserConnNum.Add(1) } else { @@ -275,7 +275,7 @@ func (ws *WsServer) registerClient(client *Client) { } wg := sync.WaitGroup{} - log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", ws.msgGatewayConfig.Discovery.Enable) + log.ZDebug(client.ctx, "ws.msgGatewayConfig.Discovery.Enable", "discoveryEnable", ws.msgGatewayConfig.Discovery.Enable) if ws.msgGatewayConfig.Discovery.Enable != "k8s" { wg.Add(1) @@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) { wg.Wait() - log.ZInfo( + log.ZDebug( client.ctx, "user online", "online user Num", @@ -321,7 +321,7 @@ func (ws *WsServer) KickUserConn(client *Client) error { } func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Client, newClient *Client) { - switch ws.msgGatewayConfig.MsgGateway.MultiLoginPolicy { + switch ws.msgGatewayConfig.Share.MultiLoginPolicy { case constant.DefalutNotKick: case constant.PCAndOther: if constant.PlatformIDToClass(newClient.PlatformID) == constant.TerminalPC { @@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) { ws.onlineUserConnNum.Add(-1) ws.subscription.DelClient(client) //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) - log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", + log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserConnNum.Load(), ) @@ -425,6 +425,7 @@ func (ws *WsServer) wsHandler(w http.ResponseWriter, r *http.Request) { return } + log.ZDebug(connContext, "new conn", "token", connContext.GetToken()) // Create a WebSocket long connection object wsLongConn := newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize) if err := wsLongConn.GenerateLongConn(w, r); err != nil { diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index b4b2245eb0..7dc2ebeea0 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -16,20 +16,22 @@ package msgtransfer import ( "context" + "errors" "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/utils/datautil" - "net/http" - "os" - "os/signal" - "syscall" "github.com/openimsdk/open-im-server/v3/pkg/common/config" - kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" + discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/tools/errs" @@ -64,6 +66,7 @@ type Config struct { func Start(ctx context.Context, index int, config *Config) error { log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts", config.MsgTransfer.Prometheus.Ports, "index", index) + mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) if err != nil { return err @@ -72,12 +75,13 @@ func Start(ctx context.Context, index int, config *Config) error { if err != nil { return err } - client, err := kdisc.NewDiscoveryRegister(&config.Discovery, &config.Share) + client, err := discRegister.NewDiscoveryRegister(&config.Discovery, &config.Share) if err != nil { return err } client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) + msgModel := redis.NewMsgCache(rdb) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) if err != nil { @@ -93,20 +97,21 @@ func Start(ctx context.Context, index int, config *Config) error { return err } seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) - msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) + msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) if err != nil { return err } conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient) + historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgTransferDatabase, &conversationRpcClient, &groupRpcClient) if err != nil { return err } - historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgDatabase) + historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase) if err != nil { return err } + msgTransfer := &MsgTransfer{ historyCH: historyCH, historyMongoCH: historyMongoCH, @@ -137,7 +142,7 @@ func (m *MsgTransfer) Start(index int, config *Config) error { return } - if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed { + if err := prommetrics.TransferInit(prometheusPort); err != nil && !errors.Is(err, http.ErrServerClosed) { netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort) netDone <- struct{}{} } diff --git a/internal/msgtransfer/online_history_msg_handler.go b/internal/msgtransfer/online_history_msg_handler.go index d671ec52a2..b0078649cb 100644 --- a/internal/msgtransfer/online_history_msg_handler.go +++ b/internal/msgtransfer/online_history_msg_handler.go @@ -16,6 +16,12 @@ package msgtransfer import ( "context" + "encoding/json" + "errors" + "strconv" + "strings" + "time" + "github.com/IBM/sarama" "github.com/go-redis/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/config" @@ -31,9 +37,6 @@ import ( "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/utils/stringutil" "google.golang.org/protobuf/proto" - "strconv" - "strings" - "time" ) const ( @@ -54,19 +57,19 @@ type OnlineHistoryRedisConsumerHandler struct { redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] - msgDatabase controller.CommonMsgDatabase + msgTransferDatabase controller.MsgTransferDatabase conversationRpcClient *rpcclient.ConversationRpcClient groupRpcClient *rpcclient.GroupRpcClient } -func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase, +func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase, conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) { historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) if err != nil { return nil, err } var och OnlineHistoryRedisConsumerHandler - och.msgDatabase = database + och.msgTransferDatabase = database b := batcher.New[sarama.ConsumerMessage]( batcher.WithSize(size), @@ -88,6 +91,7 @@ func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database cont och.conversationRpcClient = conversationRpcClient och.groupRpcClient = groupRpcClient och.historyConsumerGroup = historyConsumerGroup + return &och, err } func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) { @@ -96,6 +100,7 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID ctx = withAggregationCtx(ctx, ctxMessages) log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages), "key", val.Key()) + och.doSetReadSeq(ctx, ctxMessages) storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList := och.categorizeMessageLists(ctxMessages) @@ -109,6 +114,60 @@ func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID och.handleNotification(ctx, val.Key(), conversationIDNotification, storageNotificationList, notStorageNotificationList) } +func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context, msgs []*ContextMsg) { + type seqKey struct { + conversationID string + userID string + } + var readSeq map[seqKey]int64 + for _, msg := range msgs { + if msg.message.ContentType != constant.HasReadReceipt { + continue + } + var elem sdkws.NotificationElem + if err := json.Unmarshal(msg.message.Content, &elem); err != nil { + log.ZError(ctx, "handlerConversationRead Unmarshal NotificationElem msg err", err, "msg", msg) + continue + } + var tips sdkws.MarkAsReadTips + if err := json.Unmarshal([]byte(elem.Detail), &tips); err != nil { + log.ZError(ctx, "handlerConversationRead Unmarshal MarkAsReadTips msg err", err, "msg", msg) + continue + } + if len(tips.Seqs) > 0 { + for _, seq := range tips.Seqs { + if tips.HasReadSeq < seq { + tips.HasReadSeq = seq + } + } + clear(tips.Seqs) + tips.Seqs = nil + } + if tips.HasReadSeq < 0 { + continue + } + if readSeq == nil { + readSeq = make(map[seqKey]int64) + } + key := seqKey{ + conversationID: tips.ConversationID, + userID: tips.MarkAsReadUserID, + } + if readSeq[key] > tips.HasReadSeq { + continue + } + readSeq[key] = tips.HasReadSeq + } + if readSeq == nil { + return + } + for key, seq := range readSeq { + if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil { + log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq) + } + } +} + func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg { var ctxMessages []*ContextMsg for i := 0; i < len(consumerMessages); i++ { @@ -179,6 +238,11 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [ } func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) { + log.ZInfo(ctx, "handle storage msg") + for _, storageMsg := range storageList { + log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String()) + } + och.toPushTopic(ctx, key, conversationID, notStorageList) var storageMessageList []*sdkws.MsgData for _, msg := range storageList { @@ -186,21 +250,25 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key } if len(storageMessageList) > 0 { msg := storageMessageList[0] - lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) - if err != nil && errs.Unwrap(err) != redis.Nil { + lastSeq, isNewConversation, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) { log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) return } + log.ZInfo(ctx, "BatchInsertChat2Cache end") + if isNewConversation { switch msg.SessionType { case constant.ReadGroupChatType: - log.ZInfo(ctx, "group chat first create conversation", "conversationID", + log.ZDebug(ctx, "group chat first create conversation", "conversationID", conversationID) userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID) if err != nil { log.ZWarn(ctx, "get group member ids error", err, "conversationID", conversationID) } else { + log.ZInfo(ctx, "GetGroupMemberIDs end") + if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, msg.GroupID, userIDs); err != nil { log.ZWarn(ctx, "single chat first create conversation error", err, @@ -219,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key } } - log.ZDebug(ctx, "success incr to next topic") - err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) + log.ZInfo(ctx, "success incr to next topic") + err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) if err != nil { log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) } + log.ZInfo(ctx, "MsgToMongoMQ end") + och.toPushTopic(ctx, key, conversationID, storageList) + log.ZInfo(ctx, "toPushTopic end") } } @@ -237,14 +308,14 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con storageMessageList = append(storageMessageList, msg.message) } if len(storageMessageList) > 0 { - lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) + lastSeq, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) if err != nil { log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID, "storageList", storageMessageList) return } log.ZDebug(ctx, "success to next topic", "conversationID", conversationID) - err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) + err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) if err != nil { log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) @@ -253,9 +324,10 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con } } -func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(_ context.Context, key, conversationID string, msgs []*ContextMsg) { +func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) { for _, v := range msgs { - och.msgDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) + log.ZDebug(ctx, "push msg to topic", "msg", v.message.String()) + _, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) } } @@ -280,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group - log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset", + log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) { session.MarkMessage(lastMessage, "") diff --git a/internal/msgtransfer/online_msg_to_mongo_handler.go b/internal/msgtransfer/online_msg_to_mongo_handler.go index e5651012c6..82002c26b9 100644 --- a/internal/msgtransfer/online_msg_to_mongo_handler.go +++ b/internal/msgtransfer/online_msg_to_mongo_handler.go @@ -29,10 +29,10 @@ import ( type OnlineHistoryMongoConsumerHandler struct { historyConsumerGroup *kafka.MConsumerGroup - msgDatabase controller.CommonMsgDatabase + msgTransferDatabase controller.MsgTransferDatabase } -func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase) (*OnlineHistoryMongoConsumerHandler, error) { +func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) { historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true) if err != nil { return nil, err @@ -40,7 +40,7 @@ func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database cont mc := &OnlineHistoryMongoConsumerHandler{ historyConsumerGroup: historyConsumerGroup, - msgDatabase: database, + msgTransferDatabase: database, } return mc, nil } @@ -57,8 +57,8 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) return } - log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) - err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) + log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) + err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) if err != nil { log.ZError( ctx, @@ -77,7 +77,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont for _, msg := range msgFromMQ.MsgData { seqs = append(seqs, msg.Seq) } - err = mc.msgDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) + err = mc.msgTransferDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) if err != nil { log.ZError( ctx, @@ -91,13 +91,13 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont } } -func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } -func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } +func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } +func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim( sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim, -) error { // a instance in the consumer group +) error { // an instance in the consumer group log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) for msg := range claim.Messages() { diff --git a/internal/push/a_test.go b/internal/push/a_test.go new file mode 100644 index 0000000000..8b2d864071 --- /dev/null +++ b/internal/push/a_test.go @@ -0,0 +1,29 @@ +package push + +import ( + "github.com/openimsdk/protocol/sdkws" + "testing" +) + +func TestName(t *testing.T) { + var c ConsumerHandler + c.readCh = make(chan *sdkws.MarkAsReadTips) + + go c.loopRead() + + go func() { + for i := 0; ; i++ { + seq := int64(i + 1) + if seq%3 == 0 { + seq = 1 + } + c.readCh <- &sdkws.MarkAsReadTips{ + ConversationID: "c100", + MarkAsReadUserID: "u100", + HasReadSeq: seq, + } + } + }() + + select {} +} diff --git a/internal/push/offlinepush/dummy/push.go b/internal/push/offlinepush/dummy/push.go index 028e7edd34..09831cabfa 100644 --- a/internal/push/offlinepush/dummy/push.go +++ b/internal/push/offlinepush/dummy/push.go @@ -17,6 +17,7 @@ package dummy import ( "context" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/tools/log" ) func NewClient() *Dummy { @@ -27,5 +28,6 @@ type Dummy struct { } func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { + log.ZDebug(ctx, "dummy push") return nil } diff --git a/internal/push/offlinepush/fcm/push.go b/internal/push/offlinepush/fcm/push.go index f015ca4e51..6e8355af39 100644 --- a/internal/push/offlinepush/fcm/push.go +++ b/internal/push/offlinepush/fcm/push.go @@ -22,8 +22,8 @@ import ( "path/filepath" "strings" - firebase "firebase.google.com/go" - "firebase.google.com/go/messaging" + firebase "firebase.google.com/go/v4" + "firebase.google.com/go/v4/messaging" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/protocol/constant" @@ -99,7 +99,7 @@ func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string, apns := &messaging.APNSConfig{Payload: &messaging.APNSPayload{Aps: &messaging.Aps{Sound: opts.IOSPushSound}}} messageCount := len(messages) if messageCount >= SinglePushCountLimit { - response, err := f.fcmMsgCli.SendAll(ctx, messages) + response, err := f.fcmMsgCli.SendEach(ctx, messages) if err != nil { Fail = Fail + messageCount // Record push error @@ -154,7 +154,7 @@ func (f *Fcm) Push(ctx context.Context, userIDs []string, title, content string, } messageCount := len(messages) if messageCount > 0 { - response, err := f.fcmMsgCli.SendAll(ctx, messages) + response, err := f.fcmMsgCli.SendEach(ctx, messages) if err != nil { Fail = Fail + messageCount } else { diff --git a/internal/push/offlinepush/getui/push.go b/internal/push/offlinepush/getui/push.go index 27b19e8fe0..e266f9c464 100644 --- a/internal/push/offlinepush/getui/push.go +++ b/internal/push/offlinepush/getui/push.go @@ -18,11 +18,11 @@ import ( "context" "crypto/sha256" "encoding/hex" - "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "strconv" "sync" "time" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/tools/errs" @@ -91,6 +91,15 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri for i, v := range s.GetSplitResult() { go func(index int, userIDs []string) { defer wg.Done() + for i := 0; i < len(userIDs); i += maxNum { + end := i + maxNum + if end > len(userIDs) { + end = len(userIDs) + } + if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil { + log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) + } + } if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil { log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) } diff --git a/internal/push/offlinepush_handler.go b/internal/push/offlinepush_handler.go new file mode 100644 index 0000000000..bf69aed3e2 --- /dev/null +++ b/internal/push/offlinepush_handler.go @@ -0,0 +1,122 @@ +package push + +import ( + "context" + + "github.com/IBM/sarama" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/protocol/constant" + pbpush "github.com/openimsdk/protocol/push" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" + "github.com/openimsdk/tools/utils/jsonutil" + "google.golang.org/protobuf/proto" +) + +type OfflinePushConsumerHandler struct { + OfflinePushConsumerGroup *kafka.MConsumerGroup + offlinePusher offlinepush.OfflinePusher +} + +func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) { + var offlinePushConsumerHandler OfflinePushConsumerHandler + var err error + offlinePushConsumerHandler.offlinePusher = offlinePusher + offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID, + []string{config.KafkaConfig.ToOfflinePushTopic}, true) + if err != nil { + return nil, err + } + return &offlinePushConsumerHandler, nil +} + +func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } +func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } +func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + for msg := range claim.Messages() { + ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg) + o.handleMsg2OfflinePush(ctx, msg.Value) + sess.MarkMessage(msg, "") + } + return nil +} + +func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) { + offlinePushMsg := pbpush.PushMsgReq{} + if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil { + log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg)) + return + } + if offlinePushMsg.MsgData == nil || offlinePushMsg.UserIDs == nil { + log.ZError(ctx, "offline push msg is empty", errs.New("offlinePushMsg is empty"), "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData) + return + } + log.ZInfo(ctx, "receive to OfflinePush MQ", "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData) + + err := o.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs) + if err != nil { + log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String()) + } +} + +func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) { + type AtTextElem struct { + Text string `json:"text,omitempty"` + AtUserList []string `json:"atUserList,omitempty"` + IsAtSelf bool `json:"isAtSelf"` + } + + opts = &options.Opts{Signal: &options.Signal{}} + if msg.OfflinePushInfo != nil { + opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount + opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound + opts.Ex = msg.OfflinePushInfo.Ex + } + + if msg.OfflinePushInfo != nil { + title = msg.OfflinePushInfo.Title + content = msg.OfflinePushInfo.Desc + } + if title == "" { + switch msg.ContentType { + case constant.Text: + fallthrough + case constant.Picture: + fallthrough + case constant.Voice: + fallthrough + case constant.Video: + fallthrough + case constant.File: + title = constant.ContentType2PushContent[int64(msg.ContentType)] + case constant.AtText: + ac := AtTextElem{} + _ = jsonutil.JsonStringToStruct(string(msg.Content), &ac) + case constant.SignalingNotification: + title = constant.ContentType2PushContent[constant.SignalMsg] + default: + title = constant.ContentType2PushContent[constant.Common] + } + } + if content == "" { + content = title + } + return +} + +func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error { + title, content, opts, err := o.getOfflinePushInfos(msg) + if err != nil { + return err + } + err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts) + if err != nil { + prommetrics.MsgOfflinePushFailedCounter.Inc() + return err + } + return nil +} diff --git a/internal/push/onlinepusher.go b/internal/push/onlinepusher.go index a61399fb6b..9521a84a07 100644 --- a/internal/push/onlinepusher.go +++ b/internal/push/onlinepusher.go @@ -19,20 +19,20 @@ type OnlinePusher interface { pushToUserIDs *[]string) []string } -type emptyOnlinePUsher struct{} +type emptyOnlinePusher struct{} -func newEmptyOnlinePUsher() *emptyOnlinePUsher { - return &emptyOnlinePUsher{} +func newEmptyOnlinePusher() *emptyOnlinePusher { + return &emptyOnlinePusher{} } -func (emptyOnlinePUsher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, +func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { - log.ZWarn(ctx, "emptyOnlinePUsher GetConnsAndOnlinePush", nil) + log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil) return nil, nil } -func (u emptyOnlinePUsher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, +func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { - log.ZWarn(ctx, "emptyOnlinePUsher GetOnlinePushFailedUserIDs", nil) + log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil) return nil } @@ -45,7 +45,7 @@ func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) Onli case "etcd": return NewDefaultAllNode(disCov, config) default: - return newEmptyOnlinePUsher() + return newEmptyOnlinePusher() } } diff --git a/internal/push/push.go b/internal/push/push.go index 1a04bbea26..850f91d22e 100644 --- a/internal/push/push.go +++ b/internal/push/push.go @@ -2,6 +2,7 @@ package push import ( "context" + "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" @@ -17,12 +18,12 @@ type pushServer struct { disCov discovery.SvcDiscoveryRegistry offlinePusher offlinepush.OfflinePusher pushCh *ConsumerHandler + offlinePushCh *OfflinePushConsumerHandler } type Config struct { RpcConfig config.Push RedisConfig config.Redis - MongodbConfig config.Mongo KafkaConfig config.Kafka NotificationConfig config.Notification Share config.Share @@ -55,18 +56,30 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - database := controller.NewPushDatabase(cacheModel) - consumer, err := NewConsumerHandler(config, offlinePusher, rdb, client) + database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig) + + consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client) + if err != nil { + return err + } + + offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher) if err != nil { return err } + pbpush.RegisterPushMsgServiceServer(server, &pushServer{ database: database, disCov: client, offlinePusher: offlinePusher, pushCh: consumer, + offlinePushCh: offlinePushConsumer, }) + go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer) + + go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer) + return nil } diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go index 249622a592..4ecf20de52 100644 --- a/internal/push/push_handler.go +++ b/internal/push/push_handler.go @@ -1,33 +1,20 @@ -// Copyright Β© 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package push import ( "context" "encoding/json" + "github.com/IBM/sarama" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" - pbchat "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msggateway" pbpush "github.com/openimsdk/protocol/push" "github.com/openimsdk/protocol/sdkws" @@ -40,12 +27,16 @@ import ( "github.com/openimsdk/tools/utils/timeutil" "github.com/redis/go-redis/v9" "google.golang.org/protobuf/proto" + "math/rand" + "strconv" + "time" ) type ConsumerHandler struct { pushConsumerGroup *kafka.MConsumerGroup offlinePusher offlinepush.OfflinePusher onlinePusher OnlinePusher + pushDatabase controller.PushDatabase onlineCache *rpccache.OnlineCache groupLocalCache *rpccache.GroupLocalCache conversationLocalCache *rpccache.ConversationLocalCache @@ -56,7 +47,7 @@ type ConsumerHandler struct { config *Config } -func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, +func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) { var consumerHandler ConsumerHandler var err error @@ -65,7 +56,9 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, if err != nil { return nil, err } + userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + consumerHandler.offlinePusher = offlinePusher consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) @@ -75,42 +68,45 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.config = config - consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil) + consumerHandler.pushDatabase = database + consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil) + if err != nil { + return nil, err + } return &consumerHandler, nil } func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { - msgFromMQ := pbchat.PushMsgDataToMQ{} + msgFromMQ := pbpush.PushMsgReq{} if err := proto.Unmarshal(msg, &msgFromMQ); err != nil { log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg)) return } - pbData := &pbpush.PushMsgReq{ - MsgData: msgFromMQ.MsgData, - ConversationID: msgFromMQ.ConversationID, - } + sec := msgFromMQ.MsgData.SendTime / 1000 nowSec := timeutil.GetCurrentTimestampBySecond() - log.ZDebug(ctx, "push msg", "msg", pbData.String(), "sec", sec, "nowSec", nowSec) + if nowSec-sec > 10 { - return + prommetrics.MsgLoneTimePushCounter.Inc() + log.ZWarn(ctx, "it’s been a while since the message was sent", nil, "msg", msgFromMQ.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec) } var err error + switch msgFromMQ.MsgData.SessionType { case constant.ReadGroupChatType: - err = c.Push2Group(ctx, pbData.MsgData.GroupID, pbData.MsgData) + err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData) default: var pushUserIDList []string - isSenderSync := datautil.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) - if !isSenderSync || pbData.MsgData.SendID == pbData.MsgData.RecvID { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID) + isSenderSync := datautil.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync) + if !isSenderSync || msgFromMQ.MsgData.SendID == msgFromMQ.MsgData.RecvID { + pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID) } else { - pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) + pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID, msgFromMQ.MsgData.SendID) } - err = c.Push2User(ctx, pushUserIDList, pbData.MsgData) + err = c.Push2User(ctx, pushUserIDList, msgFromMQ.MsgData) } if err != nil { - log.ZWarn(ctx, "push failed", err, "msg", pbData.String()) + log.ZWarn(ctx, "push failed", err, "msg", msgFromMQ.String()) } } @@ -119,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + c.onlineCache.Lock.Lock() + for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver { + c.onlineCache.Cond.Wait() + } + c.onlineCache.Lock.Unlock() + ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10)) + log.ZInfo(ctx, "begin consume messages") + for msg := range claim.Messages() { ctx := c.pushConsumerGroup.GetContextFromMsg(msg) c.handleMs2PsChat(ctx, msg.Value) @@ -129,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) { - log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) + log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) + defer func(duration time.Time) { + t := time.Since(duration) + log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t) + }(time.Now()) if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { return err } + log.ZInfo(ctx, "webhookBeforeOnlinePush end") + wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs) if err != nil { return err } - log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) + log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) if !c.shouldPushOffline(ctx, msg) { return nil } + log.ZInfo(ctx, "shouldPushOffline end") for _, v := range wsResults { //message sender do not need offline push @@ -154,17 +165,17 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg * return nil } } - offlinePUshUserID := []string{msg.RecvID} + offlinePushUserID := []string{msg.RecvID} //receiver offline push if err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, - offlinePUshUserID, msg, nil); err != nil { + offlinePushUserID, msg, nil); err != nil { return err } - - err = c.offlinePushMsg(ctx, msg, offlinePUshUserID) + log.ZInfo(ctx, "webhookBeforeOfflinePush end") + err = c.offlinePushMsg(ctx, msg, offlinePushUserID) if err != nil { - log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePUshUserID", offlinePUshUserID, "msg", msg) + log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg) return nil } @@ -183,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat } func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) { - var ( - onlineUserIDs []string - offlineUserIDs []string - ) - for _, userID := range pushToUserIDs { - online, err := c.onlineCache.GetUserOnline(ctx, userID) - if err != nil { - return nil, err - } - if online { - onlineUserIDs = append(onlineUserIDs, userID) - } else { - offlineUserIDs = append(offlineUserIDs, userID) - } + onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs) + if err != nil { + return nil, err } + log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs) var result []*msggateway.SingleMsgToUserResults if len(onlineUserIDs) > 0 { @@ -216,57 +217,70 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws. } func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { - log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) + log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) + defer func(duration time.Time) { + t := time.Since(duration) + log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t) + }(time.Now()) var pushToUserIDs []string if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg, &pushToUserIDs); err != nil { return err } + log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end") err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg) if err != nil { return err } + log.ZInfo(ctx, "groupMessagesHandler end") wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) if err != nil { return err } - log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg) + log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg) if !c.shouldPushOffline(ctx, msg) { return nil } needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs) - + log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end") //filter some user, like don not disturb or don't need offline push etc. needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs) if err != nil { return err } + log.ZInfo(ctx, "filterGroupMessageOfflinePush end") + // Use offline push messaging if len(needOfflinePushUserIDs) > 0 { - var offlinePushUserIDs []string - err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) - if err != nil { - return err - } - - if len(offlinePushUserIDs) > 0 { - needOfflinePushUserIDs = offlinePushUserIDs - } + c.asyncOfflinePush(ctx, needOfflinePushUserIDs, msg) + } - err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs) - if err != nil { - log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) - return nil - } + return nil +} +func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) { + var offlinePushUserIDs []string + err := c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) + if err != nil { + log.ZWarn(ctx, "webhookBeforeOfflinePush failed", err, "msg", msg) + return } - return nil + if len(offlinePushUserIDs) > 0 { + needOfflinePushUserIDs = offlinePushUserIDs + } + if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil { + log.ZError(ctx, "Msg To OfflinePush MQ error", err, "needOfflinePushUserIDs", + needOfflinePushUserIDs, "msg", msg) + prommetrics.SingleChatMsgProcessFailedCounter.Inc() + return + } } + func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) { if len(*pushToUserIDs) == 0 { *pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID) @@ -300,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri if unmarshalNotificationElem(msg.Content, &tips) != nil { return err } - log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) + log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) if len(c.config.Share.IMAdminUserID) > 0 { ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0]) } @@ -384,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten } return } + func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) @@ -392,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, } return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) } + func unmarshalNotificationElem(bytes []byte, t any) error { var notification sdkws.NotificationElem if err := json.Unmarshal(bytes, ¬ification); err != nil { diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go index 804375e4fb..06ae89d971 100644 --- a/internal/rpc/auth/auth.go +++ b/internal/rpc/auth/auth.go @@ -20,6 +20,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/config" redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/tools/db/redisutil" + "github.com/openimsdk/tools/utils/datautil" "github.com/redis/go-redis/v9" "github.com/openimsdk/open-im-server/v3/pkg/authverify" @@ -64,24 +65,33 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire), config.Share.Secret, config.RpcConfig.TokenPolicy.Expire, + config.Share.MultiLoginPolicy, ), config: config, }) return nil } -func (s *authServer) UserToken(ctx context.Context, req *pbauth.UserTokenReq) (*pbauth.UserTokenResp, error) { - resp := pbauth.UserTokenResp{} +func (s *authServer) GetAdminToken(ctx context.Context, req *pbauth.GetAdminTokenReq) (*pbauth.GetAdminTokenResp, error) { + resp := pbauth.GetAdminTokenResp{} if req.Secret != s.config.Share.Secret { return nil, errs.ErrNoPermission.WrapMsg("secret invalid") } + + if !datautil.Contain(req.UserID, s.config.Share.IMAdminUserID...) { + return nil, errs.ErrArgs.WrapMsg("userID is error.", "userID", req.UserID, "adminUserID", s.config.Share.IMAdminUserID) + + } + if _, err := s.userRpcClient.GetUserInfo(ctx, req.UserID); err != nil { return nil, err } - token, err := s.authDatabase.CreateToken(ctx, req.UserID, int(req.PlatformID)) + + token, err := s.authDatabase.CreateToken(ctx, req.UserID, int(constant.AdminPlatformID)) if err != nil { return nil, err } + prommetrics.UserLoginCounter.Inc() resp.Token = token resp.ExpireTimeSeconds = s.config.RpcConfig.TokenPolicy.Expire * 24 * 60 * 60 @@ -92,6 +102,11 @@ func (s *authServer) GetUserToken(ctx context.Context, req *pbauth.GetUserTokenR if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { return nil, err } + + if req.PlatformID == constant.AdminPlatformID { + return nil, errs.ErrNoPermission.WrapMsg("platformID invalid. platformID must not be adminPlatformID") + } + resp := pbauth.GetUserTokenResp{} if authverify.IsManagerUserID(req.UserID, s.config.Share.IMAdminUserID) { diff --git a/internal/rpc/conversation/conversaion.go b/internal/rpc/conversation/conversation.go similarity index 82% rename from internal/rpc/conversation/conversaion.go rename to internal/rpc/conversation/conversation.go index 4cf20f919c..6f6ca1f674 100644 --- a/internal/rpc/conversation/conversaion.go +++ b/internal/rpc/conversation/conversation.go @@ -221,11 +221,11 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers return resp, nil } -// nolint func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) { if req.Conversation == nil { return nil, errs.ErrArgs.WrapMsg("conversation must not be nil") } + if req.Conversation.ConversationType == constant.WriteGroupChatType { groupInfo, err := c.groupRpcClient.GetGroupInfo(ctx, req.Conversation.GroupID) if err != nil { @@ -235,98 +235,141 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed") } } - var unequal int - var conv dbModel.Conversation - if len(req.UserIDs) == 1 { - cs, err := c.conversationDatabase.FindConversations(ctx, req.UserIDs[0], []string{req.Conversation.ConversationID}) + + conversationMap := make(map[string]*dbModel.Conversation) + var needUpdateUsersList []string + + for _, userID := range req.UserIDs { + conversationList, err := c.conversationDatabase.FindConversations(ctx, userID, []string{req.Conversation.ConversationID}) if err != nil { return nil, err } - if len(cs) == 0 { - return nil, errs.ErrRecordNotFound.WrapMsg("conversation not found") + if len(conversationList) != 0 { + conversationMap[userID] = conversationList[0] + } else { + needUpdateUsersList = append(needUpdateUsersList, userID) } - conv = *cs[0] } + var conversation dbModel.Conversation conversation.ConversationID = req.Conversation.ConversationID conversation.ConversationType = req.Conversation.ConversationType conversation.UserID = req.Conversation.UserID conversation.GroupID = req.Conversation.GroupID + m := make(map[string]any) - if req.Conversation.RecvMsgOpt != nil { - m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value - if req.Conversation.RecvMsgOpt.Value != conv.RecvMsgOpt { - unequal++ + + setConversationFieldsFunc := func() { + if req.Conversation.RecvMsgOpt != nil { + m["recv_msg_opt"] = req.Conversation.RecvMsgOpt.Value } - } - if req.Conversation.AttachedInfo != nil { - m["attached_info"] = req.Conversation.AttachedInfo.Value - if req.Conversation.AttachedInfo.Value != conv.AttachedInfo { - unequal++ + if req.Conversation.AttachedInfo != nil { + m["attached_info"] = req.Conversation.AttachedInfo.Value } - } - if req.Conversation.Ex != nil { - m["ex"] = req.Conversation.Ex.Value - if req.Conversation.Ex.Value != conv.Ex { - unequal++ + if req.Conversation.Ex != nil { + m["ex"] = req.Conversation.Ex.Value } - } - if req.Conversation.IsPinned != nil { - m["is_pinned"] = req.Conversation.IsPinned.Value - if req.Conversation.IsPinned.Value != conv.IsPinned { - unequal++ + if req.Conversation.IsPinned != nil { + m["is_pinned"] = req.Conversation.IsPinned.Value } - } - if req.Conversation.GroupAtType != nil { - m["group_at_type"] = req.Conversation.GroupAtType.Value - if req.Conversation.GroupAtType.Value != conv.GroupAtType { - unequal++ + if req.Conversation.GroupAtType != nil { + m["group_at_type"] = req.Conversation.GroupAtType.Value } - } - if req.Conversation.MsgDestructTime != nil { - m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value - if req.Conversation.MsgDestructTime.Value != conv.MsgDestructTime { - unequal++ + if req.Conversation.MsgDestructTime != nil { + m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value + } + if req.Conversation.IsMsgDestruct != nil { + m["is_msg_destruct"] = req.Conversation.IsMsgDestruct.Value + } + if req.Conversation.BurnDuration != nil { + m["burn_duration"] = req.Conversation.BurnDuration.Value } } - if req.Conversation.IsMsgDestruct != nil { - m["is_msg_destruct"] = req.Conversation.IsMsgDestruct.Value - if req.Conversation.IsMsgDestruct.Value != conv.IsMsgDestruct { - unequal++ + + // set need set field in conversation + setConversationFieldsFunc() + + for userID := range conversationMap { + unequal := len(m) + + if req.Conversation.RecvMsgOpt != nil { + if req.Conversation.RecvMsgOpt.Value == conversationMap[userID].RecvMsgOpt { + unequal-- + } + } + + if req.Conversation.AttachedInfo != nil { + if req.Conversation.AttachedInfo.Value == conversationMap[userID].AttachedInfo { + unequal-- + } + } + + if req.Conversation.Ex != nil { + if req.Conversation.Ex.Value == conversationMap[userID].Ex { + unequal-- + } + } + if req.Conversation.IsPinned != nil { + if req.Conversation.IsPinned.Value == conversationMap[userID].IsPinned { + unequal-- + } + } + + if req.Conversation.GroupAtType != nil { + if req.Conversation.GroupAtType.Value == conversationMap[userID].GroupAtType { + unequal-- + } + } + + if req.Conversation.MsgDestructTime != nil { + if req.Conversation.MsgDestructTime.Value == conversationMap[userID].MsgDestructTime { + unequal-- + } + } + + if req.Conversation.IsMsgDestruct != nil { + if req.Conversation.IsMsgDestruct.Value == conversationMap[userID].IsMsgDestruct { + unequal-- + } + } + + if req.Conversation.BurnDuration != nil { + if req.Conversation.BurnDuration.Value == conversationMap[userID].BurnDuration { + unequal-- + } + } + + if unequal > 0 { + needUpdateUsersList = append(needUpdateUsersList, userID) } } + if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType { var conversations []*dbModel.Conversation for _, ownerUserID := range req.UserIDs { - conversation2 := conversation - conversation2.OwnerUserID = ownerUserID - conversation2.IsPrivateChat = req.Conversation.IsPrivateChat.Value - conversations = append(conversations, &conversation2) + transConversation := conversation + transConversation.OwnerUserID = ownerUserID + transConversation.IsPrivateChat = req.Conversation.IsPrivateChat.Value + conversations = append(conversations, &transConversation) } if err := c.conversationDatabase.SyncPeerUserPrivateConversationTx(ctx, conversations); err != nil { return nil, err } + for _, userID := range req.UserIDs { c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID) } - } - - if req.Conversation.BurnDuration != nil { - m["burn_duration"] = req.Conversation.BurnDuration.Value - if req.Conversation.BurnDuration.Value != conv.BurnDuration { - unequal++ - } - } - - if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, req.UserIDs, &conversation, m); err != nil { - return nil, err - } + } else { + if len(m) != 0 && len(needUpdateUsersList) != 0 { + if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, needUpdateUsersList, &conversation, m); err != nil { + return nil, err + } - if unequal > 0 { - for _, v := range req.UserIDs { - c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) + for _, v := range needUpdateUsersList { + c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) + } } } @@ -392,6 +435,14 @@ func (c *conversationServer) SetConversationMaxSeq(ctx context.Context, req *pbc return &pbconversation.SetConversationMaxSeqResp{}, nil } +func (c *conversationServer) SetConversationMinSeq(ctx context.Context, req *pbconversation.SetConversationMinSeqReq) (*pbconversation.SetConversationMinSeqResp, error) { + if err := c.conversationDatabase.UpdateUsersConversationField(ctx, req.OwnerUserID, req.ConversationID, + map[string]any{"min_seq": req.MinSeq}); err != nil { + return nil, err + } + return &pbconversation.SetConversationMinSeqResp{}, nil +} + func (c *conversationServer) GetConversationIDs(ctx context.Context, req *pbconversation.GetConversationIDsReq) (*pbconversation.GetConversationIDsResp, error) { conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID) if err != nil { @@ -634,11 +685,11 @@ func (c *conversationServer) GetConversationsNeedDestructMsgs(ctx context.Contex conversationIDs, err := c.conversationDatabase.PageConversationIDs(ctx, pagination) if err != nil { - log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber) + // log.ZError(ctx, "PageConversationIDs failed", err, "pageNumber", pageNumber) continue } - log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs) + // log.ZDebug(ctx, "PageConversationIDs success", "pageNumber", pageNumber, "conversationIDsNum", len(conversationIDs), "conversationIDs", conversationIDs) if len(conversationIDs) == 0 { continue } @@ -659,3 +710,19 @@ func (c *conversationServer) GetConversationsNeedDestructMsgs(ctx context.Contex return &pbconversation.GetConversationsNeedDestructMsgsResp{Conversations: convert.ConversationsDB2Pb(temp)}, nil } + +func (c *conversationServer) GetNotNotifyConversationIDs(ctx context.Context, req *pbconversation.GetNotNotifyConversationIDsReq) (*pbconversation.GetNotNotifyConversationIDsResp, error) { + conversationIDs, err := c.conversationDatabase.GetNotNotifyConversationIDs(ctx, req.UserID) + if err != nil { + return nil, err + } + return &pbconversation.GetNotNotifyConversationIDsResp{ConversationIDs: conversationIDs}, nil +} + +func (c *conversationServer) GetPinnedConversationIDs(ctx context.Context, req *pbconversation.GetPinnedConversationIDsReq) (*pbconversation.GetPinnedConversationIDsResp, error) { + conversationIDs, err := c.conversationDatabase.GetPinnedConversationIDs(ctx, req.UserID) + if err != nil { + return nil, err + } + return &pbconversation.GetPinnedConversationIDsResp{ConversationIDs: conversationIDs}, nil +} diff --git a/internal/rpc/group/callback.go b/internal/rpc/group/callback.go index f877aa64a8..559d64ff47 100644 --- a/internal/rpc/group/callback.go +++ b/internal/rpc/group/callback.go @@ -218,6 +218,7 @@ func (s *groupServer) webhookAfterKickGroupMember(ctx context.Context, after *co CallbackCommand: callbackstruct.CallbackAfterKickGroupCommand, GroupID: req.GroupID, KickedUserIDs: req.KickedUserIDs, + Reason: req.Reason, } s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackKillGroupMemberResp{}, after) } @@ -358,3 +359,74 @@ func (s *groupServer) webhookAfterSetGroupInfo(ctx context.Context, after *confi } s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after) } + +func (s *groupServer) webhookBeforeSetGroupInfoEx(ctx context.Context, before *config.BeforeConfig, req *group.SetGroupInfoExReq) error { + return webhook.WithCondition(ctx, before, func(ctx context.Context) error { + cbReq := &callbackstruct.CallbackBeforeSetGroupInfoExReq{ + CallbackCommand: callbackstruct.CallbackBeforeSetGroupInfoExCommand, + GroupID: req.GroupID, + GroupName: req.GroupName, + Notification: req.Notification, + Introduction: req.Introduction, + FaceURL: req.FaceURL, + } + + if req.Ex != nil { + cbReq.Ex = req.Ex + } + log.ZDebug(ctx, "debug CallbackBeforeSetGroupInfoEx", "ex", cbReq.Ex) + + if req.NeedVerification != nil { + cbReq.NeedVerification = req.NeedVerification + } + if req.LookMemberInfo != nil { + cbReq.LookMemberInfo = req.LookMemberInfo + } + if req.ApplyMemberFriend != nil { + cbReq.ApplyMemberFriend = req.ApplyMemberFriend + } + + resp := &callbackstruct.CallbackBeforeSetGroupInfoExResp{} + + if err := s.webhookClient.SyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, before); err != nil { + return err + } + + datautil.NotNilReplace(&req.GroupID, &resp.GroupID) + datautil.NotNilReplace(&req.GroupName, &resp.GroupName) + datautil.NotNilReplace(&req.FaceURL, &resp.FaceURL) + datautil.NotNilReplace(&req.Introduction, &resp.Introduction) + datautil.NotNilReplace(&req.Ex, &resp.Ex) + datautil.NotNilReplace(&req.NeedVerification, &resp.NeedVerification) + datautil.NotNilReplace(&req.LookMemberInfo, &resp.LookMemberInfo) + datautil.NotNilReplace(&req.ApplyMemberFriend, &resp.ApplyMemberFriend) + + return nil + }) +} + +func (s *groupServer) webhookAfterSetGroupInfoEx(ctx context.Context, after *config.AfterConfig, req *group.SetGroupInfoExReq) { + cbReq := &callbackstruct.CallbackAfterSetGroupInfoExReq{ + CallbackCommand: callbackstruct.CallbackAfterSetGroupInfoExCommand, + GroupID: req.GroupID, + GroupName: req.GroupName, + Notification: req.Notification, + Introduction: req.Introduction, + FaceURL: req.FaceURL, + } + + if req.Ex != nil { + cbReq.Ex = req.Ex + } + if req.NeedVerification != nil { + cbReq.NeedVerification = req.NeedVerification + } + if req.LookMemberInfo != nil { + cbReq.LookMemberInfo = req.LookMemberInfo + } + if req.ApplyMemberFriend != nil { + cbReq.ApplyMemberFriend = req.ApplyMemberFriend + } + + s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoExResp{}, after) +} diff --git a/internal/rpc/group/db_map.go b/internal/rpc/group/db_map.go index b4b503b950..26c9a46255 100644 --- a/internal/rpc/group/db_map.go +++ b/internal/rpc/group/db_map.go @@ -20,6 +20,7 @@ import ( pbgroup "github.com/openimsdk/protocol/group" "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/mcontext" ) @@ -54,6 +55,43 @@ func UpdateGroupInfoMap(ctx context.Context, group *sdkws.GroupInfoForSet) map[s return m } +func UpdateGroupInfoExMap(ctx context.Context, group *pbgroup.SetGroupInfoExReq) (map[string]any, error) { + m := make(map[string]any) + + if group.GroupName != nil { + if group.GroupName.Value != "" { + m["group_name"] = group.GroupName.Value + } else { + return nil, errs.ErrArgs.WrapMsg("group name is empty") + } + } + if group.Notification != nil { + m["notification"] = group.Notification.Value + m["notification_update_time"] = time.Now() + m["notification_user_id"] = mcontext.GetOpUserID(ctx) + } + if group.Introduction != nil { + m["introduction"] = group.Introduction.Value + } + if group.FaceURL != nil { + m["face_url"] = group.FaceURL.Value + } + if group.NeedVerification != nil { + m["need_verification"] = group.NeedVerification.Value + } + if group.LookMemberInfo != nil { + m["look_member_info"] = group.LookMemberInfo.Value + } + if group.ApplyMemberFriend != nil { + m["apply_member_friend"] = group.ApplyMemberFriend.Value + } + if group.Ex != nil { + m["ex"] = group.Ex.Value + } + + return m, nil +} + func UpdateGroupStatusMap(status int) map[string]any { return map[string]any{ "status": status, diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index aa12c9d0f1..80d1c9b2f8 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -105,13 +105,20 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg database := controller.NewGroupDatabase(rdb, &config.LocalCacheConfig, groupDB, groupMemberDB, groupRequestDB, mgocli.GetTx(), grouphash.NewGroupHashFromGroupServer(&gs)) gs.db = database gs.user = userRpcClient - gs.notification = NewGroupNotificationSender(database, &msgRpcClient, &userRpcClient, config, func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) { - users, err := userRpcClient.GetUsersInfo(ctx, userIDs) - if err != nil { - return nil, err - } - return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil - }) + gs.notification = NewGroupNotificationSender( + database, + &msgRpcClient, + &userRpcClient, + &conversationRpcClient, + config, + func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) { + users, err := userRpcClient.GetUsersInfo(ctx, userIDs) + if err != nil { + return nil, err + } + return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil + }, + ) localcache.InitLocalCache(&config.LocalCacheConfig) gs.conversationRpcClient = conversationRpcClient gs.msgRpcClient = msgRpcClient @@ -121,8 +128,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg return nil } -func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) { - members, err := s.db.FindGroupMemberUser(ctx, nil, req.UserID) +func (g *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgroup.NotificationUserInfoUpdateReq) (*pbgroup.NotificationUserInfoUpdateResp, error) { + members, err := g.db.FindGroupMemberUser(ctx, nil, req.UserID) if err != nil { return nil, err } @@ -134,22 +141,22 @@ func (s *groupServer) NotificationUserInfoUpdate(ctx context.Context, req *pbgro groupIDs = append(groupIDs, member.GroupID) } for _, groupID := range groupIDs { - if err := s.db.MemberGroupIncrVersion(ctx, groupID, []string{req.UserID}, model.VersionStateUpdate); err != nil { + if err := g.db.MemberGroupIncrVersion(ctx, groupID, []string{req.UserID}, model.VersionStateUpdate); err != nil { return nil, err } } for _, groupID := range groupIDs { - s.notification.GroupMemberInfoSetNotification(ctx, groupID, req.UserID) + g.notification.GroupMemberInfoSetNotification(ctx, groupID, req.UserID) } - if err = s.db.DeleteGroupMemberHash(ctx, groupIDs); err != nil { + if err = g.db.DeleteGroupMemberHash(ctx, groupIDs); err != nil { return nil, err } return &pbgroup.NotificationUserInfoUpdateResp{}, nil } -func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error { - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - groupMember, err := s.db.TakeGroupMember(ctx, groupID, mcontext.GetOpUserID(ctx)) +func (g *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + groupMember, err := g.db.TakeGroupMember(ctx, groupID, mcontext.GetOpUserID(ctx)) if err != nil { return err } @@ -160,11 +167,11 @@ func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error return nil } -func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string, complete bool) (map[string]*sdkws.PublicUserInfo, error) { +func (g *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.PublicUserInfo, error) { if len(userIDs) == 0 { return map[string]*sdkws.PublicUserInfo{}, nil } - users, err := s.user.GetPublicUserInfos(ctx, userIDs, complete) + users, err := g.user.GetPublicUserInfos(ctx, userIDs) if err != nil { return nil, err } @@ -173,16 +180,16 @@ func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string }), nil } -func (s *groupServer) IsNotFound(err error) bool { +func (g *groupServer) IsNotFound(err error) bool { return errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) } -func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { +func (g *groupServer) GenGroupID(ctx context.Context, groupID *string) error { if *groupID != "" { - _, err := s.db.TakeGroup(ctx, *groupID) + _, err := g.db.TakeGroup(ctx, *groupID) if err == nil { return servererrs.ErrGroupIDExisted.WrapMsg("group id existed " + *groupID) - } else if s.IsNotFound(err) { + } else if g.IsNotFound(err) { return nil } else { return err @@ -193,10 +200,10 @@ func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { bi := big.NewInt(0) bi.SetString(id[0:8], 16) id = bi.String() - _, err := s.db.TakeGroup(ctx, id) + _, err := g.db.TakeGroup(ctx, id) if err == nil { continue - } else if s.IsNotFound(err) { + } else if g.IsNotFound(err) { *groupID = id return nil } else { @@ -206,14 +213,14 @@ func (s *groupServer) GenGroupID(ctx context.Context, groupID *string) error { return servererrs.ErrData.WrapMsg("group id gen error") } -func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupReq) (*pbgroup.CreateGroupResp, error) { +func (g *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupReq) (*pbgroup.CreateGroupResp, error) { if req.GroupInfo.GroupType != constant.WorkingGroup { return nil, errs.ErrArgs.WrapMsg(fmt.Sprintf("group type only supports %d", constant.WorkingGroup)) } if req.OwnerUserID == "" { return nil, errs.ErrArgs.WrapMsg("no group owner") } - if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } @@ -227,7 +234,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, errs.ErrArgs.WrapMsg("group member repeated") } - userMap, err := s.user.GetUsersInfoMap(ctx, userIDs) + userMap, err := g.user.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } @@ -236,13 +243,13 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, servererrs.ErrUserIDNotFound.WrapMsg("user not found") } - if err := s.webhookBeforeCreateGroup(ctx, &s.config.WebhooksConfig.BeforeCreateGroup, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeCreateGroup(ctx, &g.config.WebhooksConfig.BeforeCreateGroup, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } var groupMembers []*model.GroupMember group := convert.Pb2DBGroupInfo(req.GroupInfo) - if err := s.GenGroupID(ctx, &group.GroupID); err != nil { + if err := g.GenGroupID(ctx, &group.GroupID); err != nil { return nil, err } @@ -271,11 +278,11 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR joinGroupFunc(userID, constant.GroupOrdinaryUsers) } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, []*model.Group{group}, groupMembers); err != nil { + if err := g.db.CreateGroup(ctx, []*model.Group{group}, groupMembers); err != nil { return nil, err } resp := &pbgroup.CreateGroupResp{GroupInfo: &sdkws.GroupInfo{}} @@ -285,17 +292,24 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR tips := &sdkws.GroupCreatedTips{ Group: resp.GroupInfo, OperationTime: group.CreateTime.UnixMilli(), - GroupOwnerUser: s.groupMemberDB2PB(groupMembers[0], userMap[groupMembers[0].UserID].AppMangerLevel), + GroupOwnerUser: g.groupMemberDB2PB(groupMembers[0], userMap[groupMembers[0].UserID].AppMangerLevel), } for _, member := range groupMembers { member.Nickname = userMap[member.UserID].Nickname - tips.MemberList = append(tips.MemberList, s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel)) + tips.MemberList = append(tips.MemberList, g.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel)) if member.UserID == opUserID { - tips.OpUser = s.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel) + tips.OpUser = g.groupMemberDB2PB(member, userMap[member.UserID].AppMangerLevel) break } } - s.notification.GroupCreatedNotification(ctx, tips) + g.notification.GroupCreatedNotification(ctx, tips) + + if req.GroupInfo.Notification != "" { + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{ + Group: tips.Group, + OpUser: tips.OpUser, + }) + } reqCallBackAfter := &pbgroup.CreateGroupReq{ MemberUserIDs: userIDs, @@ -304,16 +318,16 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR AdminUserIDs: req.AdminUserIDs, } - s.webhookAfterCreateGroup(ctx, &s.config.WebhooksConfig.AfterCreateGroup, reqCallBackAfter) + g.webhookAfterCreateGroup(ctx, &g.config.WebhooksConfig.AfterCreateGroup, reqCallBackAfter) return resp, nil } -func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJoinedGroupListReq) (*pbgroup.GetJoinedGroupListResp, error) { - if err := authverify.CheckAccessV3(ctx, req.FromUserID, s.config.Share.IMAdminUserID); err != nil { +func (g *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJoinedGroupListReq) (*pbgroup.GetJoinedGroupListResp, error) { + if err := authverify.CheckAccessV3(ctx, req.FromUserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } - total, members, err := s.db.PageGetJoinGroup(ctx, req.FromUserID, req.Pagination) + total, members, err := g.db.PageGetJoinGroup(ctx, req.FromUserID, req.Pagination) if err != nil { return nil, err } @@ -325,19 +339,19 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo groupIDs := datautil.Slice(members, func(e *model.GroupMember) string { return e.GroupID }) - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -355,14 +369,14 @@ func (s *groupServer) GetJoinedGroupList(ctx context.Context, req *pbgroup.GetJo return &resp, nil } -func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.InviteUserToGroupReq) (*pbgroup.InviteUserToGroupResp, error) { +func (g *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.InviteUserToGroupReq) (*pbgroup.InviteUserToGroupResp, error) { if len(req.InvitedUserIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("user empty") } if datautil.Duplicate(req.InvitedUserIDs) { return nil, errs.ErrArgs.WrapMsg("userID duplicate") } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -371,7 +385,7 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed checking group status found it dismissed") } - userMap, err := s.user.GetUsersInfoMap(ctx, req.InvitedUserIDs) + userMap, err := g.user.GetUsersInfoMap(ctx, req.InvitedUserIDs) if err != nil { return nil, err } @@ -382,24 +396,24 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite var groupMember *model.GroupMember var opUserID string - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { opUserID = mcontext.GetOpUserID(ctx) var err error - groupMember, err = s.db.TakeGroupMember(ctx, req.GroupID, opUserID) + groupMember, err = g.db.TakeGroupMember(ctx, req.GroupID, opUserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, groupMember); err != nil { + if err := g.PopulateGroupMember(ctx, groupMember); err != nil { return nil, err } } - if err := s.webhookBeforeInviteUserToGroup(ctx, &s.config.WebhooksConfig.BeforeInviteUserToGroup, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeInviteUserToGroup(ctx, &g.config.WebhooksConfig.BeforeInviteUserToGroup, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } if group.NeedVerification == constant.AllNeedVerification { - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if !(groupMember.RoleLevel == constant.GroupOwner || groupMember.RoleLevel == constant.GroupAdmin) { var requests []*model.GroupRequest for _, userID := range req.InvitedUserIDs { @@ -412,11 +426,11 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite HandledTime: time.Unix(0, 0), }) } - if err := s.db.CreateGroupRequest(ctx, requests); err != nil { + if err := g.db.CreateGroupRequest(ctx, requests); err != nil { return nil, err } for _, request := range requests { - s.notification.JoinGroupApplicationNotification(ctx, &pbgroup.JoinGroupReq{ + g.notification.JoinGroupApplicationNotification(ctx, &pbgroup.JoinGroupReq{ GroupID: request.GroupID, ReqMessage: request.ReqMsg, JoinSource: request.JoinSource, @@ -443,26 +457,26 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite groupMembers = append(groupMembers, member) } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, groupMembers, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, nil, groupMembers); err != nil { + if err := g.db.CreateGroup(ctx, nil, groupMembers); err != nil { return nil, err } - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, req.InvitedUserIDs); err != nil { + + if err = g.notification.MemberEnterNotification(ctx, req.GroupID, req.InvitedUserIDs...); err != nil { return nil, err } - s.notification.MemberInvitedNotification(ctx, req.GroupID, req.Reason, req.InvitedUserIDs) return &pbgroup.InviteUserToGroupResp{}, nil } -func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGroupAllMemberReq) (*pbgroup.GetGroupAllMemberResp, error) { - members, err := s.db.FindGroupMemberAll(ctx, req.GroupID) +func (g *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGroupAllMemberReq) (*pbgroup.GetGroupAllMemberResp, error) { + members, err := g.db.FindGroupMemberAll(ctx, req.GroupID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } var resp pbgroup.GetGroupAllMemberResp @@ -472,21 +486,21 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro return &resp, nil } -func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGroupMemberListReq) (*pbgroup.GetGroupMemberListResp, error) { +func (g *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGroupMemberListReq) (*pbgroup.GetGroupMemberListResp, error) { var ( total int64 members []*model.GroupMember err error ) if req.Keyword == "" { - total, members, err = s.db.PageGetGroupMember(ctx, req.GroupID, req.Pagination) + total, members, err = g.db.PageGetGroupMember(ctx, req.GroupID, req.Pagination) } else { - members, err = s.db.FindGroupMemberAll(ctx, req.GroupID) + members, err = g.db.FindGroupMemberAll(ctx, req.GroupID) } if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } if req.Keyword != "" { @@ -516,8 +530,8 @@ func (s *groupServer) GetGroupMemberList(ctx context.Context, req *pbgroup.GetGr }, nil } -func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGroupMemberReq) (*pbgroup.KickGroupMemberResp, error) { - group, err := s.db.TakeGroup(ctx, req.GroupID) +func (g *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGroupMemberReq) (*pbgroup.KickGroupMemberResp, error) { + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -531,7 +545,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou if datautil.Contain(opUserID, req.KickedUserIDs...) { return nil, errs.ErrArgs.WrapMsg("opUserID in KickedUserIDs") } - owner, err := s.db.TakeGroupOwner(ctx, req.GroupID) + owner, err := g.db.TakeGroupOwner(ctx, req.GroupID) if err != nil { return nil, err } @@ -539,18 +553,18 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou return nil, errs.ErrArgs.WrapMsg("ownerUID can not Kick") } - members, err := s.db.FindGroupMembers(ctx, req.GroupID, append(req.KickedUserIDs, opUserID)) + members, err := g.db.FindGroupMembers(ctx, req.GroupID, append(req.KickedUserIDs, opUserID)) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } memberMap := make(map[string]*model.GroupMember) for i, member := range members { memberMap[member.UserID] = members[i] } - isAppManagerUid := authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) + isAppManagerUid := authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) opMember := memberMap[opUserID] for _, userID := range req.KickedUserIDs { member, ok := memberMap[userID] @@ -574,11 +588,11 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou } } } - num, err := s.db.FindGroupMemberNum(ctx, req.GroupID) + num, err := g.db.FindGroupMemberNum(ctx, req.GroupID) if err != nil { return nil, err } - ownerUserIDs, err := s.db.GetGroupRoleLevelMemberIDs(ctx, req.GroupID, constant.GroupOwner) + ownerUserIDs, err := g.db.GetGroupRoleLevelMemberIDs(ctx, req.GroupID, constant.GroupOwner) if err != nil { return nil, err } @@ -586,7 +600,7 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou if len(ownerUserIDs) > 0 { ownerUserID = ownerUserIDs[0] } - if err := s.db.DeleteGroupMember(ctx, group.GroupID, req.KickedUserIDs); err != nil { + if err := g.db.DeleteGroupMember(ctx, group.GroupID, req.KickedUserIDs); err != nil { return nil, err } tips := &sdkws.MemberKickedTips{ @@ -617,23 +631,23 @@ func (s *groupServer) KickGroupMember(ctx context.Context, req *pbgroup.KickGrou for _, userID := range req.KickedUserIDs { tips.KickedUserList = append(tips.KickedUserList, convert.Db2PbGroupMember(memberMap[userID])) } - s.notification.MemberKickedNotification(ctx, tips) - if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, req.KickedUserIDs); err != nil { + g.notification.MemberKickedNotification(ctx, tips) + if err := g.deleteMemberAndSetConversationSeq(ctx, req.GroupID, req.KickedUserIDs); err != nil { return nil, err } - s.webhookAfterKickGroupMember(ctx, &s.config.WebhooksConfig.AfterKickGroupMember, req) + g.webhookAfterKickGroupMember(ctx, &g.config.WebhooksConfig.AfterKickGroupMember, req) return &pbgroup.KickGroupMemberResp{}, nil } -func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetGroupMembersInfoReq) (*pbgroup.GetGroupMembersInfoResp, error) { +func (g *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetGroupMembersInfoReq) (*pbgroup.GetGroupMembersInfoResp, error) { if len(req.UserIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("userIDs empty") } if req.GroupID == "" { return nil, errs.ErrArgs.WrapMsg("groupID empty") } - members, err := s.getGroupMembersInfo(ctx, req.GroupID, req.UserIDs) + members, err := g.getGroupMembersInfo(ctx, req.GroupID, req.UserIDs) if err != nil { return nil, err } @@ -642,15 +656,15 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG }, nil } -func (s *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { +func (g *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { if len(userIDs) == 0 { return nil, nil } - members, err := s.db.FindGroupMembers(ctx, groupID, userIDs) + members, err := g.db.FindGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo { @@ -659,8 +673,8 @@ func (s *groupServer) getGroupMembersInfo(ctx context.Context, groupID string, u } // GetGroupApplicationList handles functions that get a list of group requests. -func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.GetGroupApplicationListReq) (*pbgroup.GetGroupApplicationListResp, error) { - groupIDs, err := s.db.FindUserManagedGroupID(ctx, req.FromUserID) +func (g *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup.GetGroupApplicationListReq) (*pbgroup.GetGroupApplicationListResp, error) { + groupIDs, err := g.db.FindUserManagedGroupID(ctx, req.FromUserID) if err != nil { return nil, err } @@ -668,7 +682,7 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. if len(groupIDs) == 0 { return resp, nil } - total, groupRequests, err := s.db.PageGroupRequest(ctx, groupIDs, req.Pagination) + total, groupRequests, err := g.db.PageGroupRequest(ctx, groupIDs, req.Pagination) if err != nil { return nil, err } @@ -682,11 +696,11 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. userIDs = append(userIDs, gr.UserID) } userIDs = datautil.Distinct(userIDs) - userMap, err := s.user.GetPublicUserInfoMap(ctx, userIDs, true) + userMap, err := g.user.GetPublicUserInfoMap(ctx, userIDs) if err != nil { return nil, err } - groups, err := s.db.FindGroup(ctx, datautil.Distinct(groupIDs)) + groups, err := g.db.FindGroup(ctx, datautil.Distinct(groupIDs)) if err != nil { return nil, err } @@ -696,15 +710,15 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. if ids := datautil.Single(datautil.Keys(groupMap), groupIDs); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) } - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -720,11 +734,11 @@ func (s *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. return resp, nil } -func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsInfoReq) (*pbgroup.GetGroupsInfoResp, error) { +func (g *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsInfoReq) (*pbgroup.GetGroupsInfoResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupID is empty") } - groups, err := s.getGroupsInfo(ctx, req.GroupIDs) + groups, err := g.getGroupsInfo(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -733,23 +747,23 @@ func (s *groupServer) GetGroupsInfo(ctx context.Context, req *pbgroup.GetGroupsI }, nil } -func (s *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { +func (g *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { if len(groupIDs) == 0 { return nil, nil } - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { @@ -764,12 +778,12 @@ func (s *groupServer) getGroupsInfo(ctx context.Context, groupIDs []string) ([]* }), nil } -func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) (*pbgroup.GroupApplicationResponseResp, error) { +func (g *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) (*pbgroup.GroupApplicationResponseResp, error) { if !datautil.Contain(req.HandleResult, constant.GroupResponseAgree, constant.GroupResponseRefuse) { return nil, errs.ErrArgs.WrapMsg("HandleResult unknown") } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - groupMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + groupMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -777,11 +791,11 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") } } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } - groupRequest, err := s.db.TakeGroupRequest(ctx, req.GroupID, req.FromUserID) + groupRequest, err := g.db.TakeGroupRequest(ctx, req.GroupID, req.FromUserID) if err != nil { return nil, err } @@ -789,12 +803,12 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup return nil, servererrs.ErrGroupRequestHandled.WrapMsg("group request already processed") } var inGroup bool - if _, err := s.db.TakeGroupMember(ctx, req.GroupID, req.FromUserID); err == nil { + if _, err := g.db.TakeGroupMember(ctx, req.GroupID, req.FromUserID); err == nil { inGroup = true // Already in group - } else if !s.IsNotFound(err) { + } else if !g.IsNotFound(err) { return nil, err } - if _, err := s.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil { + if _, err := g.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil { return nil, err } var member *model.GroupMember @@ -812,38 +826,37 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup OperatorUserID: mcontext.GetOpUserID(ctx), } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{member}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{member}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } } log.ZDebug(ctx, "GroupApplicationResponse", "inGroup", inGroup, "HandleResult", req.HandleResult, "member", member) - if err := s.db.HandlerGroupRequest(ctx, req.GroupID, req.FromUserID, req.HandledMsg, req.HandleResult, member); err != nil { + if err := g.db.HandlerGroupRequest(ctx, req.GroupID, req.FromUserID, req.HandledMsg, req.HandleResult, member); err != nil { return nil, err } switch req.HandleResult { case constant.GroupResponseAgree: - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, []string{req.FromUserID}); err != nil { - return nil, err - } - s.notification.GroupApplicationAcceptedNotification(ctx, req) + g.notification.GroupApplicationAcceptedNotification(ctx, req) if member == nil { log.ZDebug(ctx, "GroupApplicationResponse", "member is nil") } else { - s.notification.MemberEnterNotification(ctx, req.GroupID, req.FromUserID) + if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, groupRequest.InviterUserID, req.FromUserID); err != nil { + return nil, err + } } case constant.GroupResponseRefuse: - s.notification.GroupApplicationRejectedNotification(ctx, req) + g.notification.GroupApplicationRejectedNotification(ctx, req) } return &pbgroup.GroupApplicationResponseResp{}, nil } -func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) (*pbgroup.JoinGroupResp, error) { - user, err := s.user.GetUserInfo(ctx, req.InviterUserID) +func (g *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) (*pbgroup.JoinGroupResp, error) { + user, err := g.user.GetUserInfo(ctx, req.InviterUserID) if err != nil { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -859,14 +872,14 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) Ex: req.Ex, } - if err := s.webhookBeforeApplyJoinGroup(ctx, &s.config.WebhooksConfig.BeforeApplyJoinGroup, reqCall); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeApplyJoinGroup(ctx, &g.config.WebhooksConfig.BeforeApplyJoinGroup, reqCall); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - _, err = s.db.TakeGroupMember(ctx, req.GroupID, req.InviterUserID) + _, err = g.db.TakeGroupMember(ctx, req.GroupID, req.InviterUserID) if err == nil { return nil, errs.ErrArgs.Wrap() - } else if !s.IsNotFound(err) && errs.Unwrap(err) != errs.ErrRecordNotFound { + } else if !g.IsNotFound(err) && errs.Unwrap(err) != errs.ErrRecordNotFound { return nil, err } log.ZDebug(ctx, "JoinGroup.groupInfo", "group", group, "eq", group.NeedVerification == constant.Directly) @@ -881,19 +894,18 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) MuteEndTime: time.UnixMilli(0), } - if err := s.webhookBeforeMembersJoinGroup(ctx, &s.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{groupMember}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeMembersJoinGroup(ctx, &g.config.WebhooksConfig.BeforeMemberJoinGroup, []*model.GroupMember{groupMember}, group.GroupID, group.Ex); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if err := s.db.CreateGroup(ctx, nil, []*model.GroupMember{groupMember}); err != nil { + if err := g.db.CreateGroup(ctx, nil, []*model.GroupMember{groupMember}); err != nil { return nil, err } - if err := s.conversationRpcClient.GroupChatFirstCreateConversation(ctx, req.GroupID, []string{req.InviterUserID}); err != nil { + if err = g.notification.MemberEnterNotification(ctx, req.GroupID, req.InviterUserID); err != nil { return nil, err } - s.notification.MemberEnterNotification(ctx, req.GroupID, req.InviterUserID) - s.webhookAfterJoinGroup(ctx, &s.config.WebhooksConfig.AfterJoinGroup, req) + g.webhookAfterJoinGroup(ctx, &g.config.WebhooksConfig.AfterJoinGroup, req) return &pbgroup.JoinGroupResp{}, nil } @@ -907,74 +919,74 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) HandledTime: time.Unix(0, 0), Ex: req.Ex, } - if err = s.db.CreateGroupRequest(ctx, []*model.GroupRequest{&groupRequest}); err != nil { + if err = g.db.CreateGroupRequest(ctx, []*model.GroupRequest{&groupRequest}); err != nil { return nil, err } - s.notification.JoinGroupApplicationNotification(ctx, req) + g.notification.JoinGroupApplicationNotification(ctx, req) return &pbgroup.JoinGroupResp{}, nil } -func (s *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) { +func (g *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) { if req.UserID == "" { req.UserID = mcontext.GetOpUserID(ctx) } else { - if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, g.config.Share.IMAdminUserID); err != nil { return nil, err } } - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } if member.RoleLevel == constant.GroupOwner { return nil, errs.ErrNoPermission.WrapMsg("group owner can't quit") } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - err = s.db.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID}) + err = g.db.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID}) if err != nil { return nil, err } - s.notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(member, 0)) - if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil { + g.notification.MemberQuitNotification(ctx, g.groupMemberDB2PB(member, 0)) + if err := g.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil { return nil, err } - s.webhookAfterQuitGroup(ctx, &s.config.WebhooksConfig.AfterQuitGroup, req) + g.webhookAfterQuitGroup(ctx, &g.config.WebhooksConfig.AfterQuitGroup, req) return &pbgroup.QuitGroupResp{}, nil } -func (s *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { +func (g *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { conevrsationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := s.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) + maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) if err != nil { return err } - return s.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) + return g.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) } -func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) { +func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) { var opMember *model.GroupMember - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { var err error - opMember, err = s.db.TakeGroupMember(ctx, req.GroupInfoForSet.GroupID, mcontext.GetOpUserID(ctx)) + opMember, err = g.db.TakeGroupMember(ctx, req.GroupInfoForSet.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } if !(opMember.RoleLevel == constant.GroupOwner || opMember.RoleLevel == constant.GroupAdmin) { return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") } - if err := s.PopulateGroupMember(ctx, opMember); err != nil { + if err := g.PopulateGroupMember(ctx, opMember); err != nil { return nil, err } } - if err := s.webhookBeforeSetGroupInfo(ctx, &s.config.WebhooksConfig.BeforeSetGroupInfo, req); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeSetGroupInfo(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfo, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) if err != nil { return nil, err } @@ -982,35 +994,35 @@ func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf return nil, servererrs.ErrDismissedAlready.Wrap() } - count, err := s.db.FindGroupMemberNum(ctx, group.GroupID) + count, err := g.db.FindGroupMemberNum(ctx, group.GroupID) if err != nil { return nil, err } - owner, err := s.db.TakeGroupOwner(ctx, group.GroupID) + owner, err := g.db.TakeGroupOwner(ctx, group.GroupID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owner); err != nil { + if err := g.PopulateGroupMember(ctx, owner); err != nil { return nil, err } update := UpdateGroupInfoMap(ctx, req.GroupInfoForSet) if len(update) == 0 { return &pbgroup.SetGroupInfoResp{}, nil } - if err := s.db.UpdateGroup(ctx, group.GroupID, update); err != nil { + if err := g.db.UpdateGroup(ctx, group.GroupID, update); err != nil { return nil, err } - group, err = s.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) + group, err = g.db.TakeGroup(ctx, req.GroupInfoForSet.GroupID) if err != nil { return nil, err } tips := &sdkws.GroupInfoSetTips{ - Group: s.groupDB2PB(group, owner.UserID, count), + Group: g.groupDB2PB(group, owner.UserID, count), MuteTime: 0, OpUser: &sdkws.GroupMemberFullInfo{}, } if opMember != nil { - tips.OpUser = s.groupMemberDB2PB(opMember, 0) + tips.OpUser = g.groupMemberDB2PB(opMember, 0) } num := len(update) if req.GroupInfoForSet.Notification != "" { @@ -1021,33 +1033,150 @@ func (s *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf ConversationType: constant.ReadGroupChatType, GroupID: req.GroupInfoForSet.GroupID, } - resp, err := s.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID}) + resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID}) if err != nil { - log.ZWarn(ctx, "GetGroupMemberIDs", err) + log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err) return } conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} - if err := s.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { - log.ZWarn(ctx, "SetConversations", err, resp.UserIDs, conversation) + if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) } }() - s.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) } if req.GroupInfoForSet.GroupName != "" { num-- - s.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) + g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) } if num > 0 { - s.notification.GroupInfoSetNotification(ctx, tips) + g.notification.GroupInfoSetNotification(ctx, tips) } - s.webhookAfterSetGroupInfo(ctx, &s.config.WebhooksConfig.AfterSetGroupInfo, req) + g.webhookAfterSetGroupInfo(ctx, &g.config.WebhooksConfig.AfterSetGroupInfo, req) return &pbgroup.SetGroupInfoResp{}, nil } -func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) { - group, err := s.db.TakeGroup(ctx, req.GroupID) +func (g *groupServer) SetGroupInfoEx(ctx context.Context, req *pbgroup.SetGroupInfoExReq) (*pbgroup.SetGroupInfoExResp, error) { + var opMember *model.GroupMember + + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + var err error + + opMember, err = g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if err != nil { + return nil, err + } + + if !(opMember.RoleLevel == constant.GroupOwner || opMember.RoleLevel == constant.GroupAdmin) { + return nil, errs.ErrNoPermission.WrapMsg("no group owner or admin") + } + + if err := g.PopulateGroupMember(ctx, opMember); err != nil { + return nil, err + } + } + + if err := g.webhookBeforeSetGroupInfoEx(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfoEx, req); err != nil && err != servererrs.ErrCallbackContinue { + return nil, err + } + + group, err := g.db.TakeGroup(ctx, req.GroupID) + if err != nil { + return nil, err + } + if group.Status == constant.GroupStatusDismissed { + return nil, servererrs.ErrDismissedAlready.Wrap() + } + + count, err := g.db.FindGroupMemberNum(ctx, group.GroupID) + if err != nil { + return nil, err + } + + owner, err := g.db.TakeGroupOwner(ctx, group.GroupID) + if err != nil { + return nil, err + } + + if err := g.PopulateGroupMember(ctx, owner); err != nil { + return nil, err + } + + updatedData, err := UpdateGroupInfoExMap(ctx, req) + if len(updatedData) == 0 { + return &pbgroup.SetGroupInfoExResp{}, nil + } + + if err != nil { + return nil, err + } + + if err := g.db.UpdateGroup(ctx, group.GroupID, updatedData); err != nil { + return nil, err + } + + group, err = g.db.TakeGroup(ctx, req.GroupID) + if err != nil { + return nil, err + } + + tips := &sdkws.GroupInfoSetTips{ + Group: g.groupDB2PB(group, owner.UserID, count), + MuteTime: 0, + OpUser: &sdkws.GroupMemberFullInfo{}, + } + + if opMember != nil { + tips.OpUser = g.groupMemberDB2PB(opMember, 0) + } + + num := len(updatedData) + if req.Notification != nil { + num-- + + if req.Notification.Value != "" { + func() { + conversation := &pbconversation.ConversationReq{ + ConversationID: msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupID), + ConversationType: constant.ReadGroupChatType, + GroupID: req.GroupID, + } + + resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupID}) + if err != nil { + log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err) + return + } + + conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} + + if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) + } + }() + + g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) + } + } + + if req.GroupName != nil { + num-- + g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) + } + + if num > 0 { + g.notification.GroupInfoSetNotification(ctx, tips) + } + + g.webhookAfterSetGroupInfoEx(ctx, &g.config.WebhooksConfig.AfterSetGroupInfoEx, req) + + return &pbgroup.SetGroupInfoExResp{}, nil +} + +func (g *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) { + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } @@ -1057,11 +1186,11 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans if req.OldOwnerUserID == req.NewOwnerUserID { return nil, errs.ErrArgs.WrapMsg("OldOwnerUserID == NewOwnerUserID") } - members, err := s.db.FindGroupMembers(ctx, req.GroupID, []string{req.OldOwnerUserID, req.NewOwnerUserID}) + members, err := g.db.FindGroupMembers(ctx, req.GroupID, []string{req.OldOwnerUserID, req.NewOwnerUserID}) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } memberMap := datautil.SliceToMap(members, func(e *model.GroupMember) string { return e.UserID }) @@ -1076,33 +1205,33 @@ func (s *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.Trans if newOwner == nil { return nil, errs.ErrArgs.WrapMsg("NewOwnerUser not in group " + req.NewOwnerUserID) } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if !(mcontext.GetOpUserID(ctx) == oldOwner.UserID && oldOwner.RoleLevel == constant.GroupOwner) { return nil, errs.ErrNoPermission.WrapMsg("no permission transfer group owner") } } - if err := s.db.TransferGroupOwner(ctx, req.GroupID, req.OldOwnerUserID, req.NewOwnerUserID, newOwner.RoleLevel); err != nil { + if err := g.db.TransferGroupOwner(ctx, req.GroupID, req.OldOwnerUserID, req.NewOwnerUserID, newOwner.RoleLevel); err != nil { return nil, err } - s.webhookAfterTransferGroupOwner(ctx, &s.config.WebhooksConfig.AfterTransferGroupOwner, req) + g.webhookAfterTransferGroupOwner(ctx, &g.config.WebhooksConfig.AfterTransferGroupOwner, req) - s.notification.GroupOwnerTransferredNotification(ctx, req) + g.notification.GroupOwnerTransferredNotification(ctx, req) return &pbgroup.TransferGroupOwnerResp{}, nil } -func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) (*pbgroup.GetGroupsResp, error) { +func (g *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) (*pbgroup.GetGroupsResp, error) { var ( group []*model.Group err error ) var resp pbgroup.GetGroupsResp if req.GroupID != "" { - group, err = s.db.FindGroup(ctx, []string{req.GroupID}) + group, err = g.db.FindGroup(ctx, []string{req.GroupID}) resp.Total = uint32(len(group)) } else { var total int64 - total, group, err = s.db.SearchGroup(ctx, req.GroupName, req.Pagination) + total, group, err = g.db.SearchGroup(ctx, req.GroupName, req.Pagination) resp.Total = uint32(total) } @@ -1114,7 +1243,7 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) return e.GroupID }) - ownerMembers, err := s.db.FindGroupsOwner(ctx, groupIDs) + ownerMembers, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } @@ -1122,7 +1251,7 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) ownerMemberMap := datautil.SliceToMap(ownerMembers, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNumMap, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNumMap, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } @@ -1140,14 +1269,14 @@ func (s *groupServer) GetGroups(ctx context.Context, req *pbgroup.GetGroupsReq) return &resp, nil } -func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGroupMembersCMSReq) (*pbgroup.GetGroupMembersCMSResp, error) { - total, members, err := s.db.SearchGroupMember(ctx, req.UserName, req.GroupID, req.Pagination) +func (g *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGroupMembersCMSReq) (*pbgroup.GetGroupMembersCMSResp, error) { + total, members, err := g.db.SearchGroupMember(ctx, req.UserName, req.GroupID, req.Pagination) if err != nil { return nil, err } var resp pbgroup.GetGroupMembersCMSResp resp.Total = uint32(total) - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } resp.Members = datautil.Slice(members, func(e *model.GroupMember) *sdkws.GroupMemberFullInfo { @@ -1156,12 +1285,12 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr return &resp, nil } -func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgroup.GetUserReqApplicationListReq) (*pbgroup.GetUserReqApplicationListResp, error) { - user, err := s.user.GetPublicUserInfo(ctx, req.UserID) +func (g *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgroup.GetUserReqApplicationListReq) (*pbgroup.GetUserReqApplicationListResp, error) { + user, err := g.user.GetPublicUserInfo(ctx, req.UserID) if err != nil { return nil, err } - total, requests, err := s.db.PageGroupRequestUser(ctx, req.UserID, req.Pagination) + total, requests, err := g.db.PageGroupRequestUser(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } @@ -1171,24 +1300,24 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string { return e.GroupID })) - groups, err := s.db.FindGroup(ctx, groupIDs) + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } groupMap := datautil.SliceToMap(groups, func(e *model.Group) string { return e.GroupID }) - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } @@ -1204,44 +1333,44 @@ func (s *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgrou }, nil } -func (s *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGroupReq) (*pbgroup.DismissGroupResp, error) { - owner, err := s.db.TakeGroupOwner(ctx, req.GroupID) +func (g *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGroupReq) (*pbgroup.DismissGroupResp, error) { + owner, err := g.db.TakeGroupOwner(ctx, req.GroupID) if err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if owner.UserID != mcontext.GetOpUserID(ctx) { return nil, errs.ErrNoPermission.WrapMsg("not group owner") } } - if err := s.PopulateGroupMember(ctx, owner); err != nil { + if err := g.PopulateGroupMember(ctx, owner); err != nil { return nil, err } - group, err := s.db.TakeGroup(ctx, req.GroupID) + group, err := g.db.TakeGroup(ctx, req.GroupID) if err != nil { return nil, err } if !req.DeleteMember && group.Status == constant.GroupStatusDismissed { return nil, servererrs.ErrDismissedAlready.WrapMsg("group status is dismissed") } - if err := s.db.DismissGroup(ctx, req.GroupID, req.DeleteMember); err != nil { + if err := g.db.DismissGroup(ctx, req.GroupID, req.DeleteMember); err != nil { return nil, err } if !req.DeleteMember { - num, err := s.db.FindGroupMemberNum(ctx, req.GroupID) + num, err := g.db.FindGroupMemberNum(ctx, req.GroupID) if err != nil { return nil, err } tips := &sdkws.GroupDismissedTips{ - Group: s.groupDB2PB(group, owner.UserID, num), + Group: g.groupDB2PB(group, owner.UserID, num), OpUser: &sdkws.GroupMemberFullInfo{}, } if mcontext.GetOpUserID(ctx) == owner.UserID { - tips.OpUser = s.groupMemberDB2PB(owner, 0) + tips.OpUser = g.groupMemberDB2PB(owner, 0) } - s.notification.GroupDismissedNotification(ctx, tips) + g.notification.GroupDismissedNotification(ctx, tips) } - membersID, err := s.db.FindGroupMemberUserID(ctx, group.GroupID) + membersID, err := g.db.FindGroupMemberUserID(ctx, group.GroupID) if err != nil { return nil, err } @@ -1252,21 +1381,21 @@ func (s *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGrou GroupType: string(group.GroupType), } - s.webhookAfterDismissGroup(ctx, &s.config.WebhooksConfig.AfterDismissGroup, cbReq) + g.webhookAfterDismissGroup(ctx, &g.config.WebhooksConfig.AfterDismissGroup, cbReq) return &pbgroup.DismissGroupResp{}, nil } -func (s *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGroupMemberReq) (*pbgroup.MuteGroupMemberResp, error) { - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) +func (g *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGroupMemberReq) (*pbgroup.MuteGroupMemberResp, error) { + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - opMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + opMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -1284,23 +1413,23 @@ func (s *groupServer) MuteGroupMember(ctx context.Context, req *pbgroup.MuteGrou } } data := UpdateGroupMemberMutedTimeMap(time.Now().Add(time.Second * time.Duration(req.MutedSeconds))) - if err := s.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { + if err := g.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { return nil, err } - s.notification.GroupMemberMutedNotification(ctx, req.GroupID, req.UserID, req.MutedSeconds) + g.notification.GroupMemberMutedNotification(ctx, req.GroupID, req.UserID, req.MutedSeconds) return &pbgroup.MuteGroupMemberResp{}, nil } -func (s *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.CancelMuteGroupMemberReq) (*pbgroup.CancelMuteGroupMemberResp, error) { - member, err := s.db.TakeGroupMember(ctx, req.GroupID, req.UserID) +func (g *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.CancelMuteGroupMemberReq) (*pbgroup.CancelMuteGroupMemberResp, error) { + member, err := g.db.TakeGroupMember(ctx, req.GroupID, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, member); err != nil { + if err := g.PopulateGroupMember(ctx, member); err != nil { return nil, err } - if !authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) { - opMember, err := s.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) + if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { + opMember, err := g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx)) if err != nil { return nil, err } @@ -1318,36 +1447,36 @@ func (s *groupServer) CancelMuteGroupMember(ctx context.Context, req *pbgroup.Ca } } data := UpdateGroupMemberMutedTimeMap(time.Unix(0, 0)) - if err := s.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { + if err := g.db.UpdateGroupMember(ctx, member.GroupID, member.UserID, data); err != nil { return nil, err } - s.notification.GroupMemberCancelMutedNotification(ctx, req.GroupID, req.UserID) + g.notification.GroupMemberCancelMutedNotification(ctx, req.GroupID, req.UserID) return &pbgroup.CancelMuteGroupMemberResp{}, nil } -func (s *groupServer) MuteGroup(ctx context.Context, req *pbgroup.MuteGroupReq) (*pbgroup.MuteGroupResp, error) { - if err := s.CheckGroupAdmin(ctx, req.GroupID); err != nil { +func (g *groupServer) MuteGroup(ctx context.Context, req *pbgroup.MuteGroupReq) (*pbgroup.MuteGroupResp, error) { + if err := g.CheckGroupAdmin(ctx, req.GroupID); err != nil { return nil, err } - if err := s.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupStatusMuted)); err != nil { + if err := g.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupStatusMuted)); err != nil { return nil, err } - s.notification.GroupMutedNotification(ctx, req.GroupID) + g.notification.GroupMutedNotification(ctx, req.GroupID) return &pbgroup.MuteGroupResp{}, nil } -func (s *groupServer) CancelMuteGroup(ctx context.Context, req *pbgroup.CancelMuteGroupReq) (*pbgroup.CancelMuteGroupResp, error) { - if err := s.CheckGroupAdmin(ctx, req.GroupID); err != nil { +func (g *groupServer) CancelMuteGroup(ctx context.Context, req *pbgroup.CancelMuteGroupReq) (*pbgroup.CancelMuteGroupResp, error) { + if err := g.CheckGroupAdmin(ctx, req.GroupID); err != nil { return nil, err } - if err := s.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupOk)); err != nil { + if err := g.db.UpdateGroup(ctx, req.GroupID, UpdateGroupStatusMap(constant.GroupOk)); err != nil { return nil, err } - s.notification.GroupCancelMutedNotification(ctx, req.GroupID) + g.notification.GroupCancelMutedNotification(ctx, req.GroupID) return &pbgroup.CancelMuteGroupResp{}, nil } -func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGroupMemberInfoReq) (*pbgroup.SetGroupMemberInfoResp, error) { +func (g *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGroupMemberInfoReq) (*pbgroup.SetGroupMemberInfoResp, error) { if len(req.Members) == 0 { return nil, errs.ErrArgs.WrapMsg("members empty") } @@ -1355,7 +1484,7 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if opUserID == "" { return nil, errs.ErrNoPermission.WrapMsg("no op user id") } - isAppManagerUid := authverify.IsAppManagerUid(ctx, s.config.Share.IMAdminUserID) + isAppManagerUid := authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) for i := range req.Members { req.Members[i].FaceURL = nil } @@ -1385,7 +1514,7 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if _, ok := temp[opUserID]; !ok { userIDs = append(userIDs, opUserID) } - dbMembers, err := s.db.FindGroupMembers(ctx, groupID, userIDs) + dbMembers, err := g.db.FindGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err } @@ -1440,12 +1569,12 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr for i := 0; i < len(req.Members); i++ { - if err := s.webhookBeforeSetGroupMemberInfo(ctx, &s.config.WebhooksConfig.BeforeSetGroupMemberInfo, req.Members[i]); err != nil && err != servererrs.ErrCallbackContinue { + if err := g.webhookBeforeSetGroupMemberInfo(ctx, &g.config.WebhooksConfig.BeforeSetGroupMemberInfo, req.Members[i]); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } } - if err := s.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *common.BatchUpdateGroupMember { + if err := g.db.UpdateGroupMembers(ctx, datautil.Slice(req.Members, func(e *pbgroup.SetGroupMemberInfo) *common.BatchUpdateGroupMember { return &common.BatchUpdateGroupMember{ GroupID: e.GroupID, UserID: e.UserID, @@ -1458,30 +1587,30 @@ func (s *groupServer) SetGroupMemberInfo(ctx context.Context, req *pbgroup.SetGr if member.RoleLevel != nil { switch member.RoleLevel.Value { case constant.GroupAdmin: - s.notification.GroupMemberSetToAdminNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberSetToAdminNotification(ctx, member.GroupID, member.UserID) case constant.GroupOrdinaryUsers: - s.notification.GroupMemberSetToOrdinaryUserNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberSetToOrdinaryUserNotification(ctx, member.GroupID, member.UserID) } } if member.Nickname != nil || member.FaceURL != nil || member.Ex != nil { - s.notification.GroupMemberInfoSetNotification(ctx, member.GroupID, member.UserID) + g.notification.GroupMemberInfoSetNotification(ctx, member.GroupID, member.UserID) } } for i := 0; i < len(req.Members); i++ { - s.webhookAfterSetGroupMemberInfo(ctx, &s.config.WebhooksConfig.AfterSetGroupMemberInfo, req.Members[i]) + g.webhookAfterSetGroupMemberInfo(ctx, &g.config.WebhooksConfig.AfterSetGroupMemberInfo, req.Members[i]) } return &pbgroup.SetGroupMemberInfoResp{}, nil } -func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.GetGroupAbstractInfoReq) (*pbgroup.GetGroupAbstractInfoResp, error) { +func (g *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.GetGroupAbstractInfoReq) (*pbgroup.GetGroupAbstractInfoResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupIDs empty") } if datautil.Duplicate(req.GroupIDs) { return nil, errs.ErrArgs.WrapMsg("groupIDs duplicate") } - groups, err := s.db.FindGroup(ctx, req.GroupIDs) + groups, err := g.db.FindGroup(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -1490,7 +1619,7 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get })); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg("not found group " + strings.Join(ids, ",")) } - groupUserMap, err := s.db.MapGroupMemberUserID(ctx, req.GroupIDs) + groupUserMap, err := g.db.MapGroupMemberUserID(ctx, req.GroupIDs) if err != nil { return nil, err } @@ -1505,15 +1634,15 @@ func (s *groupServer) GetGroupAbstractInfo(ctx context.Context, req *pbgroup.Get }, nil } -func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.GetUserInGroupMembersReq) (*pbgroup.GetUserInGroupMembersResp, error) { +func (g *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.GetUserInGroupMembersReq) (*pbgroup.GetUserInGroupMembersResp, error) { if len(req.GroupIDs) == 0 { return nil, errs.ErrArgs.WrapMsg("groupIDs empty") } - members, err := s.db.FindGroupMemberUser(ctx, req.GroupIDs, req.UserID) + members, err := g.db.FindGroupMemberUser(ctx, req.GroupIDs, req.UserID) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return &pbgroup.GetUserInGroupMembersResp{ @@ -1523,8 +1652,8 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge }, nil } -func (s *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetGroupMemberUserIDsReq) (*pbgroup.GetGroupMemberUserIDsResp, error) { - userIDs, err := s.db.FindGroupMemberUserID(ctx, req.GroupID) +func (g *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.GetGroupMemberUserIDsReq) (*pbgroup.GetGroupMemberUserIDsResp, error) { + userIDs, err := g.db.FindGroupMemberUserID(ctx, req.GroupID) if err != nil { return nil, err } @@ -1533,15 +1662,15 @@ func (s *groupServer) GetGroupMemberUserIDs(ctx context.Context, req *pbgroup.Ge }, nil } -func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.GetGroupMemberRoleLevelReq) (*pbgroup.GetGroupMemberRoleLevelResp, error) { +func (g *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.GetGroupMemberRoleLevelReq) (*pbgroup.GetGroupMemberRoleLevelResp, error) { if len(req.RoleLevels) == 0 { return nil, errs.ErrArgs.WrapMsg("RoleLevels empty") } - members, err := s.db.FindGroupMemberRoleLevels(ctx, req.GroupID, req.RoleLevels) + members, err := g.db.FindGroupMemberRoleLevels(ctx, req.GroupID, req.RoleLevels) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, members...); err != nil { + if err := g.PopulateGroupMember(ctx, members...); err != nil { return nil, err } return &pbgroup.GetGroupMemberRoleLevelResp{ @@ -1551,41 +1680,56 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup. }, nil } -func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *pbgroup.GetGroupUsersReqApplicationListReq) (*pbgroup.GetGroupUsersReqApplicationListResp, error) { - requests, err := s.db.FindGroupRequests(ctx, req.GroupID, req.UserIDs) +func (g *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req *pbgroup.GetGroupUsersReqApplicationListReq) (*pbgroup.GetGroupUsersReqApplicationListResp, error) { + requests, err := g.db.FindGroupRequests(ctx, req.GroupID, req.UserIDs) if err != nil { return nil, err } + if len(requests) == 0 { return &pbgroup.GetGroupUsersReqApplicationListResp{}, nil } + groupIDs := datautil.Distinct(datautil.Slice(requests, func(e *model.GroupRequest) string { return e.GroupID })) - groups, err := s.db.FindGroup(ctx, groupIDs) + + groups, err := g.db.FindGroup(ctx, groupIDs) if err != nil { return nil, err } + groupMap := datautil.SliceToMap(groups, func(e *model.Group) string { return e.GroupID }) + if ids := datautil.Single(groupIDs, datautil.Keys(groupMap)); len(ids) > 0 { return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) } - owners, err := s.db.FindGroupsOwner(ctx, groupIDs) + + userMap, err := g.user.GetPublicUserInfoMap(ctx, req.UserIDs) + if err != nil { + return nil, err + } + + owners, err := g.db.FindGroupsOwner(ctx, groupIDs) if err != nil { return nil, err } - if err := s.PopulateGroupMember(ctx, owners...); err != nil { + + if err := g.PopulateGroupMember(ctx, owners...); err != nil { return nil, err } + ownerMap := datautil.SliceToMap(owners, func(e *model.GroupMember) string { return e.GroupID }) - groupMemberNum, err := s.db.MapGroupMemberNum(ctx, groupIDs) + + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, groupIDs) if err != nil { return nil, err } + return &pbgroup.GetGroupUsersReqApplicationListResp{ Total: int64(len(requests)), GroupRequests: datautil.Slice(requests, func(e *model.GroupRequest) *sdkws.GroupRequest { @@ -1593,7 +1737,72 @@ func (s *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req * if owner, ok := ownerMap[e.GroupID]; ok { ownerUserID = owner.UserID } - return convert.Db2PbGroupRequest(e, nil, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerUserID, groupMemberNum[e.GroupID])) + + var userInfo *sdkws.PublicUserInfo + if user, ok := userMap[e.UserID]; !ok { + userInfo = user + } + + return convert.Db2PbGroupRequest(e, userInfo, convert.Db2PbGroupInfo(groupMap[e.GroupID], ownerUserID, groupMemberNum[e.GroupID])) }), }, nil } + +func (g *groupServer) GetSpecifiedUserGroupRequestInfo(ctx context.Context, req *pbgroup.GetSpecifiedUserGroupRequestInfoReq) (*pbgroup.GetSpecifiedUserGroupRequestInfoResp, error) { + opUserID := mcontext.GetOpUserID(ctx) + + owners, err := g.db.FindGroupsOwner(ctx, []string{req.GroupID}) + if err != nil { + return nil, err + } + + if req.UserID != opUserID { + req.UserID = mcontext.GetOpUserID(ctx) + adminIDs, err := g.db.GetGroupRoleLevelMemberIDs(ctx, req.GroupID, constant.GroupAdmin) + if err != nil { + return nil, err + } + + adminIDs = append(adminIDs, owners[0].UserID) + adminIDs = append(adminIDs, g.config.Share.IMAdminUserID...) + + if !datautil.Contain(req.UserID, adminIDs...) { + return nil, errs.ErrNoPermission.WrapMsg("opUser no permission") + } + } + requests, err := g.db.FindGroupRequests(ctx, req.GroupID, []string{req.UserID}) + if err != nil { + return nil, err + } + + if len(requests) == 0 { + return &pbgroup.GetSpecifiedUserGroupRequestInfoResp{}, nil + } + + groups, err := g.db.FindGroup(ctx, []string{req.GroupID}) + if err != nil { + return nil, err + } + + userInfos, err := g.user.GetPublicUserInfos(ctx, []string{req.UserID}) + if err != nil { + return nil, err + } + + groupMemberNum, err := g.db.MapGroupMemberNum(ctx, []string{req.GroupID}) + if err != nil { + return nil, err + } + + resp := &pbgroup.GetSpecifiedUserGroupRequestInfoResp{ + GroupRequests: make([]*sdkws.GroupRequest, 0, len(requests)), + } + + for _, request := range requests { + resp.GroupRequests = append(resp.GroupRequests, convert.Db2PbGroupRequest(request, userInfos[0], convert.Db2PbGroupInfo(groups[0], owners[0].UserID, groupMemberNum[groups[0].GroupID]))) + } + + resp.Total = uint32(len(requests)) + + return resp, nil +} diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 9815167e9b..64e922fe2b 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -16,25 +16,28 @@ package group import ( "context" + "errors" "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" + "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" - - "github.com/openimsdk/open-im-server/v3/pkg/authverify" - "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" "github.com/openimsdk/protocol/constant" pbgroup "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/stringutil" + "go.mongodb.org/mongo-driver/mongo" ) // GroupApplicationReceiver @@ -43,12 +46,22 @@ const ( adminReceiver ) -func NewGroupNotificationSender(db controller.GroupDatabase, msgRpcClient *rpcclient.MessageRpcClient, userRpcClient *rpcclient.UserRpcClient, config *Config, fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error)) *GroupNotificationSender { +func NewGroupNotificationSender( + db controller.GroupDatabase, + msgRpcClient *rpcclient.MessageRpcClient, + userRpcClient *rpcclient.UserRpcClient, + conversationRpcClient *rpcclient.ConversationRpcClient, + config *Config, + fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error), +) *GroupNotificationSender { return &GroupNotificationSender{ NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)), getUsersInfo: fn, db: db, config: config, + + conversationRpcClient: conversationRpcClient, + msgRpcClient: msgRpcClient, } } @@ -57,6 +70,9 @@ type GroupNotificationSender struct { getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) db controller.GroupDatabase config *Config + + conversationRpcClient *rpcclient.ConversationRpcClient + msgRpcClient *rpcclient.MessageRpcClient } func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error { @@ -212,10 +228,13 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap } */ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { + return g.fillOpUserByUserID(ctx, mcontext.GetOpUserID(ctx), opUser, groupID) +} + +func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error { if opUser == nil { return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil") } - userID := mcontext.GetOpUserID(ctx) if groupID != "" { if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) { *opUser = &sdkws.GroupMemberFullInfo{ @@ -228,7 +247,7 @@ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws member, err := g.db.TakeGroupMember(ctx, groupID, userID) if err == nil { *opUser = g.groupMemberDB2PB(member, 0) - } else if !errs.ErrRecordNotFound.Is(err) { + } else if !(errors.Is(err, mongo.ErrNoDocuments) || errs.ErrRecordNotFound.Is(err)) { return err } } @@ -494,50 +513,67 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context, g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) } -func (g *GroupNotificationSender) MemberInvitedNotification(ctx context.Context, groupID, reason string, invitedUserIDList []string) { +func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { var err error defer func() { if err != nil { log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err) } }() - var group *sdkws.GroupInfo - group, err = g.getGroupInfo(ctx, groupID) - if err != nil { - return + + if !g.config.RpcConfig.EnableHistoryForNewMembers { + conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) + maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + if err != nil { + return err + } + if _, err = g.msgRpcClient.SetUserConversationsMinSeq(ctx, &msg.SetUserConversationsMinSeqReq{ + UserIDs: entrantUserID, + ConversationID: conversationID, + Seq: maxSeq, + }); err != nil { + return err + } } - var users []*sdkws.GroupMemberFullInfo - users, err = g.getGroupMembers(ctx, groupID, invitedUserIDList) - if err != nil { - return + if err := g.conversationRpcClient.GroupChatFirstCreateConversation(ctx, groupID, entrantUserID); err != nil { + return err } - tips := &sdkws.MemberInvitedTips{Group: group, InvitedUserList: users} - err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID) - g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) - g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips) -} -func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID string) { - var err error - defer func() { - if err != nil { - log.ZError(ctx, stringutil.GetFuncName(1)+" failed", err) - } - }() var group *sdkws.GroupInfo group, err = g.getGroupInfo(ctx, groupID) if err != nil { - return + return err } - var user *sdkws.GroupMemberFullInfo - user, err = g.getGroupMember(ctx, groupID, entrantUserID) + users, err := g.getGroupMembers(ctx, groupID, entrantUserID) if err != nil { - return + return err + } + + tips := &sdkws.MemberInvitedTips{ + Group: group, + InvitedUserList: users, + } + opUserID := mcontext.GetOpUserID(ctx) + if err = g.fillOpUserByUserID(ctx, opUserID, &tips.OpUser, tips.Group.GroupID); err != nil { + return nil + } + switch { + case invitedOpUserID == "": + case invitedOpUserID == opUserID: + tips.InviterUser = tips.OpUser + default: + if err = g.fillOpUserByUserID(ctx, invitedOpUserID, &tips.InviterUser, tips.Group.GroupID); err != nil { + return err + } } - tips := &sdkws.MemberEnterTips{Group: group, EntrantUser: user} g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) - g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberEnterNotification, tips) + g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips) + return nil +} + +func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID ...string) error { + return g.GroupApplicationAgreeMemberEnterNotification(ctx, groupID, "", entrantUserID...) } func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { diff --git a/internal/rpc/msg/as_read.go b/internal/rpc/msg/as_read.go index bfba4824fe..03f35b42d5 100644 --- a/internal/rpc/msg/as_read.go +++ b/internal/rpc/msg/as_read.go @@ -55,7 +55,7 @@ func (m *msgServer) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *m conversationMaxSeqMap[conversation.ConversationID] = conversation.MaxSeq } } - maxSeqs, err := m.MsgDatabase.GetMaxSeqs(ctx, conversationIDs) + maxSeqs, err := m.MsgDatabase.GetMaxSeqsWithTime(ctx, conversationIDs) if err != nil { return nil, err } @@ -63,7 +63,8 @@ func (m *msgServer) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *m for conversationID, maxSeq := range maxSeqs { resp.Seqs[conversationID] = &msg.Seqs{ HasReadSeq: hasReadSeqs[conversationID], - MaxSeq: maxSeq, + MaxSeq: maxSeq.Seq, + MaxSeqTime: maxSeq.Time, } if v, ok := conversationMaxSeqMap[conversationID]; ok { resp.Seqs[conversationID].MaxSeq = v diff --git a/internal/rpc/msg/callback.go b/internal/rpc/msg/callback.go index be58d75047..c66dd6ca91 100644 --- a/internal/rpc/msg/callback.go +++ b/internal/rpc/msg/callback.go @@ -41,6 +41,7 @@ func toCommonCallback(ctx context.Context, msg *pbchat.SendMsgReq, command strin MsgFrom: msg.MsgData.MsgFrom, ContentType: msg.MsgData.ContentType, Status: msg.MsgData.Status, + SendTime: msg.MsgData.SendTime, CreateTime: msg.MsgData.CreateTime, AtUserIDList: msg.MsgData.AtUserIDList, SenderFaceURL: msg.MsgData.SenderFaceURL, @@ -66,6 +67,9 @@ func (m *msgServer) webhookBeforeSendSingleMsg(ctx context.Context, before *conf if msg.MsgData.ContentType == constant.Typing { return nil } + if !filterBeforeMsg(msg, before) { + return nil + } cbReq := &cbapi.CallbackBeforeSendSingleMsgReq{ CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackBeforeSendSingleMsgCommand), RecvID: msg.MsgData.RecvID, @@ -83,9 +87,7 @@ func (m *msgServer) webhookAfterSendSingleMsg(ctx context.Context, after *config if msg.MsgData.ContentType == constant.Typing { return } - // According to the attentionIds configuration, only some users are sent - attentionIds := after.AttentionIds - if attentionIds != nil && !datautil.Contain(msg.MsgData.RecvID, attentionIds...) && !datautil.Contain(msg.MsgData.SendID, attentionIds...) { + if !filterAfterMsg(msg, after) { return } cbReq := &cbapi.CallbackAfterSendSingleMsgReq{ @@ -97,6 +99,9 @@ func (m *msgServer) webhookAfterSendSingleMsg(ctx context.Context, after *config func (m *msgServer) webhookBeforeSendGroupMsg(ctx context.Context, before *config.BeforeConfig, msg *pbchat.SendMsgReq) error { return webhook.WithCondition(ctx, before, func(ctx context.Context) error { + if !filterBeforeMsg(msg, before) { + return nil + } if msg.MsgData.ContentType == constant.Typing { return nil } @@ -116,6 +121,9 @@ func (m *msgServer) webhookAfterSendGroupMsg(ctx context.Context, after *config. if msg.MsgData.ContentType == constant.Typing { return } + if !filterAfterMsg(msg, after) { + return + } cbReq := &cbapi.CallbackAfterSendGroupMsgReq{ CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackAfterSendGroupMsgCommand), GroupID: msg.MsgData.GroupID, @@ -128,6 +136,9 @@ func (m *msgServer) webhookBeforeMsgModify(ctx context.Context, before *config.B if msg.MsgData.ContentType != constant.Text { return nil } + if !filterBeforeMsg(msg, before) { + return nil + } cbReq := &cbapi.CallbackMsgModifyCommandReq{ CommonCallbackReq: toCommonCallback(ctx, msg, cbapi.CallbackBeforeMsgModifyCommand), } diff --git a/internal/rpc/msg/clear.go b/internal/rpc/msg/clear.go index 6be551eada..c5bd36b445 100644 --- a/internal/rpc/msg/clear.go +++ b/internal/rpc/msg/clear.go @@ -30,8 +30,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg. msgNum int start = time.Now() ) + clearMsg := func(ctx context.Context) (bool, error) { - msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, 100) + docIDs, err := m.MsgDatabase.GetDocIDs(ctx) + if err != nil { + return false, err + } + + msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, docIDs, 5000) if err != nil { return false, err } @@ -55,19 +61,14 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg. return true, nil } - for { - keep, err := clearMsg(ctx) - if err != nil { - log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) - return nil, err - } - if !keep { - log.ZInfo(ctx, "clear msg success", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) - break - } - - log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + _, err = clearMsg(ctx) + if err != nil { + log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + return nil, err } + + log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) + return &msg.ClearMsgResp{}, nil } diff --git a/internal/rpc/msg/filter.go b/internal/rpc/msg/filter.go new file mode 100644 index 0000000000..ed1a488f12 --- /dev/null +++ b/internal/rpc/msg/filter.go @@ -0,0 +1,67 @@ +package msg + +import ( + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + pbchat "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/tools/utils/datautil" + "strconv" + "strings" +) + +const ( + separator = "-" +) + +func filterAfterMsg(msg *pbchat.SendMsgReq, after *config.AfterConfig) bool { + return filterMsg(msg, after.AttentionIds, after.AllowedTypes, after.DeniedTypes) +} + +func filterBeforeMsg(msg *pbchat.SendMsgReq, before *config.BeforeConfig) bool { + return filterMsg(msg, nil, before.AllowedTypes, before.DeniedTypes) +} + +func filterMsg(msg *pbchat.SendMsgReq, attentionIds, allowedTypes, deniedTypes []string) bool { + // According to the attentionIds configuration, only some users are sent + if len(attentionIds) != 0 && !datautil.Contains([]string{msg.MsgData.SendID, msg.MsgData.RecvID}, attentionIds...) { + return false + } + if len(allowedTypes) != 0 && !isInInterval(msg.MsgData.ContentType, allowedTypes) { + return false + } + if len(deniedTypes) != 0 && isInInterval(msg.MsgData.ContentType, deniedTypes) { + return false + } + return true +} + +func isInInterval(contentType int32, interval []string) bool { + for _, v := range interval { + if strings.Contains(v, separator) { + // is interval + bounds := strings.Split(v, separator) + if len(bounds) != 2 { + continue + } + bottom, err := strconv.Atoi(bounds[0]) + if err != nil { + continue + } + top, err := strconv.Atoi(bounds[1]) + if err != nil { + continue + } + if datautil.BetweenEq(int(contentType), bottom, top) { + return true + } + } else { + iv, err := strconv.Atoi(v) + if err != nil { + continue + } + if int(contentType) == iv { + return true + } + } + } + return false +} diff --git a/internal/rpc/msg/seq.go b/internal/rpc/msg/seq.go index 1ebec4a719..5d40160de5 100644 --- a/internal/rpc/msg/seq.go +++ b/internal/rpc/msg/seq.go @@ -16,10 +16,10 @@ package msg import ( "context" + pbmsg "github.com/openimsdk/protocol/msg" "github.com/openimsdk/tools/errs" "github.com/redis/go-redis/v9" - - pbmsg "github.com/openimsdk/protocol/msg" + "sort" ) func (m *msgServer) GetConversationMaxSeq(ctx context.Context, req *pbmsg.GetConversationMaxSeqReq) (*pbmsg.GetConversationMaxSeqResp, error) { @@ -53,3 +53,34 @@ func (m *msgServer) GetMsgByConversationIDs(ctx context.Context, req *pbmsg.GetM } return &pbmsg.GetMsgByConversationIDsResp{MsgDatas: Msgs}, nil } + +func (m *msgServer) SetUserConversationsMinSeq(ctx context.Context, req *pbmsg.SetUserConversationsMinSeqReq) (*pbmsg.SetUserConversationsMinSeqResp, error) { + for _, userID := range req.UserIDs { + if err := m.MsgDatabase.SetUserConversationsMinSeqs(ctx, userID, map[string]int64{req.ConversationID: req.Seq}); err != nil { + return nil, err + } + } + return &pbmsg.SetUserConversationsMinSeqResp{}, nil +} + +func (m *msgServer) GetActiveConversation(ctx context.Context, req *pbmsg.GetActiveConversationReq) (*pbmsg.GetActiveConversationResp, error) { + res, err := m.MsgDatabase.GetCacheMaxSeqWithTime(ctx, req.ConversationIDs) + if err != nil { + return nil, err + } + conversations := make([]*pbmsg.ActiveConversation, 0, len(res)) + for conversationID, val := range res { + conversations = append(conversations, &pbmsg.ActiveConversation{ + MaxSeq: val.Seq, + LastTime: val.Time, + ConversationID: conversationID, + }) + } + if req.Limit > 0 { + sort.Sort(activeConversations(conversations)) + if len(conversations) > int(req.Limit) { + conversations = conversations[:req.Limit] + } + } + return &pbmsg.GetActiveConversationResp{Conversations: conversations}, nil +} diff --git a/internal/rpc/msg/sync_msg.go b/internal/rpc/msg/sync_msg.go index f5b5ebda53..2f77881673 100644 --- a/internal/rpc/msg/sync_msg.go +++ b/internal/rpc/msg/sync_msg.go @@ -16,16 +16,16 @@ package msg import ( "context" - "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" - "github.com/openimsdk/tools/utils/datautil" - "github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" + "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/utils/datautil" + "github.com/openimsdk/tools/utils/timeutil" ) func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) { @@ -86,6 +86,35 @@ func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessag return resp, nil } +func (m *msgServer) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) { + resp := &msg.GetSeqMessageResp{ + Msgs: make(map[string]*sdkws.PullMsgs), + NotificationMsgs: make(map[string]*sdkws.PullMsgs), + } + for _, conv := range req.Conversations { + _, _, msgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, conv.ConversationID, conv.Seqs) + if err != nil { + return nil, err + } + var pullMsgs *sdkws.PullMsgs + if ok := false; conversationutil.IsNotificationConversationID(conv.ConversationID) { + pullMsgs, ok = resp.NotificationMsgs[conv.ConversationID] + if !ok { + pullMsgs = &sdkws.PullMsgs{} + resp.NotificationMsgs[conv.ConversationID] = pullMsgs + } + } else { + pullMsgs, ok = resp.Msgs[conv.ConversationID] + if !ok { + pullMsgs = &sdkws.PullMsgs{} + resp.Msgs[conv.ConversationID] = pullMsgs + } + } + pullMsgs.Msgs = append(pullMsgs.Msgs, msgs...) + } + return resp, nil +} + func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil { return nil, err @@ -104,13 +133,20 @@ func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sd log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs) return nil, err } + // avoid pulling messages from sessions with a large number of max seq values of 0 + for conversationID, seq := range maxSeqs { + if seq == 0 { + delete(maxSeqs, conversationID) + } + } resp := new(sdkws.GetMaxSeqResp) resp.MaxSeqs = maxSeqs return resp, nil } func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (resp *msg.SearchMessageResp, err error) { - var chatLogs []*sdkws.MsgData + // var chatLogs []*sdkws.MsgData + var chatLogs []*msg.SearchedMsgData var total int64 resp = &msg.SearchMessageResp{} if total, chatLogs, err = m.MsgDatabase.SearchMessage(ctx, req); err != nil { @@ -125,17 +161,19 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq recvMap = make(map[string]string) groupMap = make(map[string]*sdkws.GroupInfo) ) + for _, chatLog := range chatLogs { - if chatLog.SenderNickname == "" { - sendIDs = append(sendIDs, chatLog.SendID) + if chatLog.MsgData.SenderNickname == "" { + sendIDs = append(sendIDs, chatLog.MsgData.SendID) } - switch chatLog.SessionType { + switch chatLog.MsgData.SessionType { case constant.SingleChatType, constant.NotificationChatType: - recvIDs = append(recvIDs, chatLog.RecvID) + recvIDs = append(recvIDs, chatLog.MsgData.RecvID) case constant.WriteGroupChatType, constant.ReadGroupChatType: - groupIDs = append(groupIDs, chatLog.GroupID) + groupIDs = append(groupIDs, chatLog.MsgData.GroupID) } } + // Retrieve sender and receiver information if len(sendIDs) != 0 { sendInfos, err := m.UserLocalCache.GetUsersInfo(ctx, sendIDs) @@ -146,6 +184,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq sendMap[sendInfo.UserID] = sendInfo.Nickname } } + if len(recvIDs) != 0 { recvInfos, err := m.UserLocalCache.GetUsersInfo(ctx, recvIDs) if err != nil { @@ -171,20 +210,21 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq } } } + // Construct response with updated information for _, chatLog := range chatLogs { pbchatLog := &msg.ChatLog{} - datautil.CopyStructFields(pbchatLog, chatLog) - pbchatLog.SendTime = chatLog.SendTime - pbchatLog.CreateTime = chatLog.CreateTime - if chatLog.SenderNickname == "" { - pbchatLog.SenderNickname = sendMap[chatLog.SendID] + datautil.CopyStructFields(pbchatLog, chatLog.MsgData) + pbchatLog.SendTime = chatLog.MsgData.SendTime + pbchatLog.CreateTime = chatLog.MsgData.CreateTime + if chatLog.MsgData.SenderNickname == "" { + pbchatLog.SenderNickname = sendMap[chatLog.MsgData.SendID] } - switch chatLog.SessionType { + switch chatLog.MsgData.SessionType { case constant.SingleChatType, constant.NotificationChatType: - pbchatLog.RecvNickname = recvMap[chatLog.RecvID] - case constant.WriteGroupChatType, constant.ReadGroupChatType: - groupInfo := groupMap[chatLog.GroupID] + pbchatLog.RecvNickname = recvMap[chatLog.MsgData.RecvID] + case constant.ReadGroupChatType: + groupInfo := groupMap[chatLog.MsgData.GroupID] pbchatLog.SenderFaceURL = groupInfo.FaceURL pbchatLog.GroupMemberCount = groupInfo.MemberCount // Reflects actual member count pbchatLog.RecvID = groupInfo.GroupID @@ -192,7 +232,9 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq pbchatLog.GroupOwner = groupInfo.OwnerUserID pbchatLog.GroupType = groupInfo.GroupType } - resp.ChatLogs = append(resp.ChatLogs, pbchatLog) + searchChatLog := &msg.SearchChatLog{ChatLog: pbchatLog, IsRevoked: chatLog.IsRevoked} + + resp.ChatLogs = append(resp.ChatLogs, searchChatLog) } resp.ChatLogsNum = int32(total) return resp, nil diff --git a/internal/rpc/msg/utils.go b/internal/rpc/msg/utils.go index 69b4d0bf6d..e3490848c9 100644 --- a/internal/rpc/msg/utils.go +++ b/internal/rpc/msg/utils.go @@ -15,6 +15,7 @@ package msg import ( + "github.com/openimsdk/protocol/msg" "github.com/openimsdk/tools/errs" "github.com/redis/go-redis/v9" "go.mongodb.org/mongo-driver/mongo" @@ -28,3 +29,63 @@ func IsNotFound(err error) bool { return false } } + +type activeConversations []*msg.ActiveConversation + +func (s activeConversations) Len() int { + return len(s) +} + +func (s activeConversations) Less(i, j int) bool { + return s[i].LastTime > s[j].LastTime +} + +func (s activeConversations) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +//type seqTime struct { +// ConversationID string +// Seq int64 +// Time int64 +// Unread int64 +// Pinned bool +//} +// +//func (s seqTime) String() string { +// return fmt.Sprintf("", s.Time, s.Unread, s.Pinned) +//} +// +//type seqTimes []seqTime +// +//func (s seqTimes) Len() int { +// return len(s) +//} +// +//// Less sticky priority, unread priority, time descending +//func (s seqTimes) Less(i, j int) bool { +// iv, jv := s[i], s[j] +// if iv.Pinned && (!jv.Pinned) { +// return true +// } +// if jv.Pinned && (!iv.Pinned) { +// return false +// } +// if iv.Unread > 0 && jv.Unread == 0 { +// return true +// } +// if jv.Unread > 0 && iv.Unread == 0 { +// return false +// } +// return iv.Time > jv.Time +//} +// +//func (s seqTimes) Swap(i, j int) { +// s[i], s[j] = s[j], s[i] +//} +// +//type conversationStatus struct { +// ConversationID string +// Pinned bool +// Recv bool +//} diff --git a/internal/rpc/relation/black.go b/internal/rpc/relation/black.go index e149e31653..d8d457dacc 100644 --- a/internal/rpc/relation/black.go +++ b/internal/rpc/relation/black.go @@ -23,13 +23,17 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/tools/utils/datautil" ) func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *relation.GetPaginationBlacksReq) (resp *relation.GetPaginationBlacksResp, err error) { - if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + total, blacks, err := s.blackDatabase.FindOwnerBlacks(ctx, req.UserID, req.Pagination) if err != nil { return nil, err @@ -55,7 +59,7 @@ func (s *friendServer) IsBlack(ctx context.Context, req *relation.IsBlackReq) (* } func (s *friendServer) RemoveBlack(ctx context.Context, req *relation.RemoveBlackReq) (*relation.RemoveBlackResp, error) { - if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } @@ -64,6 +68,7 @@ func (s *friendServer) RemoveBlack(ctx context.Context, req *relation.RemoveBlac } s.notificationSender.BlackDeletedNotification(ctx, req) + s.webhookAfterRemoveBlack(ctx, &s.config.WebhooksConfig.AfterRemoveBlack, req) return &relation.RemoveBlackResp{}, nil } @@ -72,6 +77,11 @@ func (s *friendServer) AddBlack(ctx context.Context, req *relation.AddBlackReq) if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + + if err := s.webhookBeforeAddBlack(ctx, &s.config.WebhooksConfig.BeforeAddBlack, req); err != nil { + return nil, err + } + _, err := s.userRpcClient.GetUsersInfo(ctx, []string{req.OwnerUserID, req.BlackUserID}) if err != nil { return nil, err @@ -90,3 +100,53 @@ func (s *friendServer) AddBlack(ctx context.Context, req *relation.AddBlackReq) s.notificationSender.BlackAddedNotification(ctx, req) return &relation.AddBlackResp{}, nil } + +func (s *friendServer) GetSpecifiedBlacks(ctx context.Context, req *relation.GetSpecifiedBlacksReq) (*relation.GetSpecifiedBlacksResp, error) { + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { + return nil, err + } + + if len(req.UserIDList) == 0 { + return nil, errs.ErrArgs.WrapMsg("userIDList is empty") + } + + if datautil.Duplicate(req.UserIDList) { + return nil, errs.ErrArgs.WrapMsg("userIDList repeated") + } + + userMap, err := s.userRpcClient.GetPublicUserInfoMap(ctx, req.UserIDList) + if err != nil { + return nil, err + } + + blacks, err := s.blackDatabase.FindBlackInfos(ctx, req.OwnerUserID, req.UserIDList) + if err != nil { + return nil, err + } + + blackMap := datautil.SliceToMap(blacks, func(e *model.Black) string { + return e.BlockUserID + }) + + resp := &relation.GetSpecifiedBlacksResp{ + Blacks: make([]*sdkws.BlackInfo, 0, len(req.UserIDList)), + } + + for _, userID := range req.UserIDList { + if black := blackMap[userID]; black != nil { + resp.Blacks = append(resp.Blacks, + &sdkws.BlackInfo{ + OwnerUserID: black.OwnerUserID, + CreateTime: black.CreateTime.UnixMilli(), + BlackUserInfo: userMap[userID], + AddSource: black.AddSource, + OperatorUserID: black.OperatorUserID, + Ex: black.Ex, + }) + } + } + + resp.Total = int32(len(resp.Blacks)) + + return resp, nil +} diff --git a/internal/rpc/relation/callback.go b/internal/rpc/relation/callback.go index 69c4c9e0ec..09debdea18 100644 --- a/internal/rpc/relation/callback.go +++ b/internal/rpc/relation/callback.go @@ -138,6 +138,18 @@ func (s *friendServer) webhookBeforeAddFriendAgree(ctx context.Context, before * }) } +func (s *friendServer) webhookAfterAddFriendAgree(ctx context.Context, after *config.AfterConfig, req *relation.RespondFriendApplyReq) { + cbReq := &cbapi.CallbackAfterAddFriendAgreeReq{ + CallbackCommand: cbapi.CallbackAfterAddFriendAgreeCommand, + FromUserID: req.FromUserID, + ToUserID: req.ToUserID, + HandleMsg: req.HandleMsg, + HandleResult: req.HandleResult, + } + resp := &cbapi.CallbackAfterAddFriendAgreeResp{} + s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, after) +} + func (s *friendServer) webhookBeforeImportFriends(ctx context.Context, before *config.BeforeConfig, req *relation.ImportFriendReq) error { return webhook.WithCondition(ctx, before, func(ctx context.Context) error { cbReq := &cbapi.CallbackBeforeImportFriendsReq{ diff --git a/internal/rpc/relation/friend.go b/internal/rpc/relation/friend.go index 3d29ad3379..9d55ba4d99 100644 --- a/internal/rpc/relation/friend.go +++ b/internal/rpc/relation/friend.go @@ -121,7 +121,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), - queue: memamq.NewMemoryQueue(128, 1024*8), + queue: memamq.NewMemoryQueue(16, 1024*1024), }) return nil } @@ -212,6 +212,7 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *relation.Res if err != nil { return nil, err } + s.webhookAfterAddFriendAgree(ctx, &s.config.WebhooksConfig.AfterAddFriendAgree, req) s.notificationSender.FriendApplicationAgreedNotification(ctx, req) return resp, nil } @@ -228,20 +229,23 @@ func (s *friendServer) RespondFriendApply(ctx context.Context, req *relation.Res // ok. func (s *friendServer) DeleteFriend(ctx context.Context, req *relation.DeleteFriendReq) (resp *relation.DeleteFriendResp, err error) { - resp = &relation.DeleteFriendResp{} - if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + _, err = s.db.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID}) if err != nil { return nil, err } + if err := s.db.Delete(ctx, req.OwnerUserID, []string{req.FriendUserID}); err != nil { return nil, err } + s.notificationSender.FriendDeletedNotification(ctx, req) s.webhookAfterDeleteFriend(ctx, &s.config.WebhooksConfig.AfterDeleteFriend, req) - return resp, nil + + return &relation.DeleteFriendResp{}, nil } // ok. @@ -249,20 +253,24 @@ func (s *friendServer) SetFriendRemark(ctx context.Context, req *relation.SetFri if err = s.webhookBeforeSetFriendRemark(ctx, &s.config.WebhooksConfig.BeforeSetFriendRemark, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - resp = &relation.SetFriendRemarkResp{} - if err := s.userRpcClient.Access(ctx, req.OwnerUserID); err != nil { + + if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + _, err = s.db.FindFriendsWithError(ctx, req.OwnerUserID, []string{req.FriendUserID}) if err != nil { return nil, err } + if err := s.db.UpdateRemark(ctx, req.OwnerUserID, req.FriendUserID, req.Remark); err != nil { return nil, err } + s.webhookAfterSetFriendRemark(ctx, &s.config.WebhooksConfig.AfterSetFriendRemark, req) s.notificationSender.FriendRemarkSetNotification(ctx, req.OwnerUserID, req.FriendUserID) - return resp, nil + + return &relation.SetFriendRemarkResp{}, nil } // ok. @@ -309,36 +317,45 @@ func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context, // Get received friend requests (i.e., those initiated by others). func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *relation.GetPaginationFriendsApplyToReq) (resp *relation.GetPaginationFriendsApplyToResp, err error) { - if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } + resp = &relation.GetPaginationFriendsApplyToResp{} resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) if err != nil { return nil, err } + resp.Total = int32(total) + return resp, nil } func (s *friendServer) GetPaginationFriendsApplyFrom(ctx context.Context, req *relation.GetPaginationFriendsApplyFromReq) (resp *relation.GetPaginationFriendsApplyFromResp, err error) { resp = &relation.GetPaginationFriendsApplyFromResp{} - if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { + + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + total, friendRequests, err := s.db.PageFriendRequestFromMe(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } + resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) if err != nil { return nil, err } + resp.Total = int32(total) + return resp, nil } @@ -353,31 +370,37 @@ func (s *friendServer) IsFriend(ctx context.Context, req *relation.IsFriendReq) } func (s *friendServer) GetPaginationFriends(ctx context.Context, req *relation.GetPaginationFriendsReq) (resp *relation.GetPaginationFriendsResp, err error) { - if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + total, friends, err := s.db.PageOwnerFriends(ctx, req.UserID, req.Pagination) if err != nil { return nil, err } + resp = &relation.GetPaginationFriendsResp{} resp.FriendsInfo, err = convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap) if err != nil { return nil, err } + resp.Total = int32(total) + return resp, nil } func (s *friendServer) GetFriendIDs(ctx context.Context, req *relation.GetFriendIDsReq) (resp *relation.GetFriendIDsResp, err error) { - if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { return nil, err } + resp = &relation.GetFriendIDsResp{} resp.FriendIDs, err = s.db.FindFriendUserIDs(ctx, req.UserID) if err != nil { return nil, err } + return resp, nil } @@ -385,35 +408,45 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *relatio if len(req.UserIDList) == 0 { return nil, errs.ErrArgs.WrapMsg("userIDList is empty") } + if datautil.Duplicate(req.UserIDList) { return nil, errs.ErrArgs.WrapMsg("userIDList repeated") } + userMap, err := s.userRpcClient.GetUsersInfoMap(ctx, req.UserIDList) if err != nil { return nil, err } + friends, err := s.db.FindFriendsWithError(ctx, req.OwnerUserID, req.UserIDList) if err != nil { return nil, err } + blacks, err := s.blackDatabase.FindBlackInfos(ctx, req.OwnerUserID, req.UserIDList) if err != nil { return nil, err } + friendMap := datautil.SliceToMap(friends, func(e *model.Friend) string { return e.FriendUserID }) + blackMap := datautil.SliceToMap(blacks, func(e *model.Black) string { return e.BlockUserID }) + resp := &relation.GetSpecifiedFriendsInfoResp{ Infos: make([]*relation.GetSpecifiedFriendsInfoInfo, 0, len(req.UserIDList)), } + for _, userID := range req.UserIDList { user := userMap[userID] + if user == nil { continue } + var friendInfo *sdkws.FriendInfo if friend := friendMap[userID]; friend != nil { friendInfo = &sdkws.FriendInfo{ @@ -426,6 +459,7 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *relatio IsPinned: friend.IsPinned, } } + var blackInfo *sdkws.BlackInfo if black := blackMap[userID]; black != nil { blackInfo = &sdkws.BlackInfo{ @@ -436,12 +470,14 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *relatio Ex: black.Ex, } } + resp.Infos = append(resp.Infos, &relation.GetSpecifiedFriendsInfoInfo{ UserInfo: user, FriendInfo: friendInfo, BlackInfo: blackInfo, }) } + return resp, nil } diff --git a/internal/rpc/third/log.go b/internal/rpc/third/log.go index 68d7088b0c..657ea16893 100644 --- a/internal/rpc/third/log.go +++ b/internal/rpc/third/log.go @@ -17,9 +17,10 @@ package third import ( "context" "crypto/rand" - relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "time" + relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/protocol/constant" diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go index f96eb73905..fb6a1157e1 100644 --- a/internal/rpc/third/s3.go +++ b/internal/rpc/third/s3.go @@ -290,6 +290,7 @@ func (t *thirdServer) apiAddress(prefix, name string) string { func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) { var conf config.Third expireTime := time.UnixMilli(req.ExpireTime) + var deltotal int findPagination := &sdkws.RequestPagination{ PageNumber: 1, ShowNumber: 1000, @@ -311,10 +312,8 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO return nil, errs.Wrap(err) } if int(count) < 1 && t.minio != nil { - thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key) - if err != nil { - return nil, errs.Wrap(err) - } + thumbnailKey, _ := t.getMinioImageThumbnailKey(ctx, key) + t.s3dataBase.DeleteObject(ctx, thumbnailKey) t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...) t.s3dataBase.DeleteObject(ctx, key) @@ -329,7 +328,9 @@ func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteO if total < int64(findPagination.ShowNumber) { break } + deltotal += int(total) } + log.ZDebug(ctx, "DeleteOutdatedData", "delete Total", deltotal) return &third.DeleteOutdatedDataResp{}, nil } diff --git a/internal/rpc/user/callback.go b/internal/rpc/user/callback.go index 1bdf399d22..b58053c5ac 100644 --- a/internal/rpc/user/callback.go +++ b/internal/rpc/user/callback.go @@ -16,6 +16,7 @@ package user import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/tools/utils/datautil" @@ -88,7 +89,6 @@ func (s *userServer) webhookBeforeUserRegister(ctx context.Context, before *conf return webhook.WithCondition(ctx, before, func(ctx context.Context) error { cbReq := &cbapi.CallbackBeforeUserRegisterReq{ CallbackCommand: cbapi.CallbackBeforeUserRegisterCommand, - Secret: req.Secret, Users: req.Users, } @@ -108,7 +108,6 @@ func (s *userServer) webhookBeforeUserRegister(ctx context.Context, before *conf func (s *userServer) webhookAfterUserRegister(ctx context.Context, after *config.AfterConfig, req *pbuser.UserRegisterReq) { cbReq := &cbapi.CallbackAfterUserRegisterReq{ CallbackCommand: cbapi.CallbackAfterUserRegisterCommand, - Secret: req.Secret, Users: req.Users, } diff --git a/internal/rpc/user/online.go b/internal/rpc/user/online.go index 99b272006f..0e5365ed93 100644 --- a/internal/rpc/user/online.go +++ b/internal/rpc/user/online.go @@ -2,6 +2,9 @@ package user import ( "context" + + "github.com/openimsdk/tools/utils/datautil" + "github.com/openimsdk/protocol/constant" pbuser "github.com/openimsdk/protocol/user" ) @@ -59,7 +62,7 @@ func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatu case constant.Online: online = []int32{req.PlatformID} case constant.Offline: - online = []int32{req.PlatformID} + offline = []int32{req.PlatformID} } if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil { return nil, err @@ -80,3 +83,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse } return &pbuser.SetUserOnlineStatusResp{}, nil } + +func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) { + resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor) + if err != nil { + return nil, err + } + resp := &pbuser.GetAllOnlineUsersResp{ + StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)), + NextCursor: nextCursor, + } + for userID, plats := range resMap { + resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{ + UserID: userID, + Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)), + PlatformIDs: plats, + }) + } + return resp, nil +} diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index a6952bd6db..4669ed513f 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -17,6 +17,11 @@ package user import ( "context" "errors" + "math/rand" + "strings" + "sync" + "time" + "github.com/openimsdk/open-im-server/v3/internal/rpc/relation" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" @@ -29,10 +34,6 @@ import ( "github.com/openimsdk/protocol/group" friendpb "github.com/openimsdk/protocol/relation" "github.com/openimsdk/tools/db/redisutil" - "math/rand" - "strings" - "sync" - "time" "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" @@ -46,7 +47,6 @@ import ( "github.com/openimsdk/tools/db/pagination" registry "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" - "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/utils/datautil" "google.golang.org/grpc" ) @@ -147,41 +147,35 @@ func (s *userServer) UpdateUserInfo(ctx context.Context, req *pbuser.UpdateUserI return nil, err } s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) - //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID) - //if err != nil { - // return nil, err - //} - //if req.UserInfo.Nickname != "" || req.UserInfo.FaceURL != "" { - // if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID,oldUser); err != nil { - // return nil, err - // } - //} - //for _, friendID := range friends { - // s.friendNotificationSender.FriendInfoUpdatedNotification(ctx, req.UserInfo.UserID, friendID) - //} + s.webhookAfterUpdateUserInfo(ctx, &s.config.WebhooksConfig.AfterUpdateUserInfo, req) if err = s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { return nil, err } return resp, nil } + func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUserInfoExReq) (resp *pbuser.UpdateUserInfoExResp, err error) { resp = &pbuser.UpdateUserInfoExResp{} err = authverify.CheckAccessV3(ctx, req.UserInfo.UserID, s.config.Share.IMAdminUserID) if err != nil { return nil, err } + if err = s.webhookBeforeUpdateUserInfoEx(ctx, &s.config.WebhooksConfig.BeforeUpdateUserInfoEx, req); err != nil { return nil, err } + oldUser, err := s.db.GetUserByID(ctx, req.UserInfo.UserID) if err != nil { return nil, err } + data := convert.UserPb2DBMapEx(req.UserInfo) if err = s.db.UpdateByMap(ctx, req.UserInfo.UserID, data); err != nil { return nil, err } + s.friendNotificationSender.UserInfoUpdatedNotification(ctx, req.UserInfo.UserID) //friends, err := s.friendRpcClient.GetFriendIDs(ctx, req.UserInfo.UserID) //if err != nil { @@ -199,6 +193,7 @@ func (s *userServer) UpdateUserInfoEx(ctx context.Context, req *pbuser.UpdateUse if err := s.NotificationUserInfoUpdate(ctx, req.UserInfo.UserID, oldUser); err != nil { return nil, err } + return resp, nil } func (s *userServer) SetGlobalRecvMessageOpt(ctx context.Context, req *pbuser.SetGlobalRecvMessageOptReq) (resp *pbuser.SetGlobalRecvMessageOptResp, err error) { @@ -267,10 +262,11 @@ func (s *userServer) UserRegister(ctx context.Context, req *pbuser.UserRegisterR if len(req.Users) == 0 { return nil, errs.ErrArgs.WrapMsg("users is empty") } - if req.Secret != s.config.Share.Secret { - log.ZDebug(ctx, "UserRegister", s.config.Share.Secret, req.Secret) - return nil, errs.ErrNoPermission.WrapMsg("secret invalid") + + if err = authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { + return nil, err } + if datautil.DuplicateAny(req.Users, func(e *sdkws.UserInfo) string { return e.UserID }) { return nil, errs.ErrArgs.WrapMsg("userID repeated") } diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go index b1d59800ce..dbb4e34f61 100644 --- a/internal/tools/cron_task.go +++ b/internal/tools/cron_task.go @@ -25,7 +25,6 @@ import ( pbconversation "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/msg" - "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mw" "google.golang.org/grpc" @@ -59,10 +58,10 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return err } - thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) - if err != nil { - return err - } + // thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) + // if err != nil { + // return err + // } conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) if err != nil { @@ -71,7 +70,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error { msgClient := msg.NewMsgClient(msgConn) conversationClient := pbconversation.NewConversationClient(conversationConn) - thirdClient := third.NewThirdClient(thirdConn) + // thirdClient := third.NewThirdClient(thirdConn) crontab := cron.New() @@ -80,12 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error { now := time.Now() deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords)) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli())) - log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) + log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) + if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil { log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now)) return } - log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) + log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) } if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil { return errs.Wrap(err) @@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error { msgDestructFunc := func() { now := time.Now() ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli())) - log.ZInfo(ctx, "msg destruct cron start", "now", now) + log.ZDebug(ctx, "msg destruct cron start", "now", now) conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{}) if err != nil { @@ -108,29 +108,29 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return } } - log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now)) + log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now)) } if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil { return errs.Wrap(err) } - // scheduled delete outdated file Objects and their datas in specific time. - deleteObjectFunc := func() { - now := time.Now() - deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) - ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) - log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) - if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { - log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) - return - } - log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) - } - if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { - return errs.Wrap(err) - } - - log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) + // // scheduled delete outdated file Objects and their datas in specific time. + // deleteObjectFunc := func() { + // now := time.Now() + // deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) + // ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) + // log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) + // if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { + // log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) + // return + // } + // log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) + // } + // if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { + // return errs.Wrap(err) + // } + + log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) crontab.Start() <-ctx.Done() return nil diff --git a/pkg/apistruct/msg.go b/pkg/apistruct/msg.go index f4a9f884c4..dc20b5104b 100644 --- a/pkg/apistruct/msg.go +++ b/pkg/apistruct/msg.go @@ -91,7 +91,7 @@ type OANotificationElem struct { NotificationType int32 `mapstructure:"notificationType" json:"notificationType" validate:"required"` Text string `mapstructure:"text" json:"text" validate:"required"` Url string `mapstructure:"url" json:"url"` - MixType int32 `mapstructure:"mixType" json:"mixType"` + MixType int32 `mapstructure:"mixType" json:"mixType" validate:"gte=0,lte=5"` PictureElem *PictureElem `mapstructure:"pictureElem" json:"pictureElem"` SoundElem *SoundElem `mapstructure:"soundElem" json:"soundElem"` VideoElem *VideoElem `mapstructure:"videoElem" json:"videoElem"` diff --git a/pkg/apistruct/msg_test.go b/pkg/apistruct/msg_test.go new file mode 100644 index 0000000000..28f878a9fd --- /dev/null +++ b/pkg/apistruct/msg_test.go @@ -0,0 +1 @@ +package apistruct diff --git a/pkg/callbackstruct/common.go b/pkg/callbackstruct/common.go index d6714f5f20..9d6a325a8d 100644 --- a/pkg/callbackstruct/common.go +++ b/pkg/callbackstruct/common.go @@ -35,6 +35,7 @@ type CommonCallbackReq struct { MsgFrom int32 `json:"msgFrom"` ContentType int32 `json:"contentType"` Status int32 `json:"status"` + SendTime int64 `json:"sendTime"` CreateTime int64 `json:"createTime"` Content string `json:"content"` Seq uint32 `json:"seq"` diff --git a/pkg/callbackstruct/constant.go b/pkg/callbackstruct/constant.go index ab393dd36e..73f89a7193 100644 --- a/pkg/callbackstruct/constant.go +++ b/pkg/callbackstruct/constant.go @@ -18,11 +18,14 @@ const ( CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand" CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand" CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand" + CallbackAfterSetGroupInfoExCommand = "callbackAfterSetGroupInfoExCommand" CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand" + CallbackBeforeSetGroupInfoExCommand = "callbackBeforeSetGroupInfoExCommand" CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand" CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand" CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand" CallbackBeforeAddFriendAgreeCommand = "callbackBeforeAddFriendAgreeCommand" + CallbackAfterAddFriendAgreeCommand = "callbackAfterAddFriendAgreeCommand" CallbackAfterDeleteFriendCommand = "callbackAfterDeleteFriendCommand" CallbackBeforeImportFriendsCommand = "callbackBeforeImportFriendsCommand" CallbackAfterImportFriendsCommand = "callbackAfterImportFriendsCommand" diff --git a/pkg/callbackstruct/friend.go b/pkg/callbackstruct/friend.go index 3674a34dab..a81746bfdd 100644 --- a/pkg/callbackstruct/friend.go +++ b/pkg/callbackstruct/friend.go @@ -90,6 +90,18 @@ type CallbackBeforeAddFriendAgreeResp struct { CommonCallbackResp } +type CallbackAfterAddFriendAgreeReq struct { + CallbackCommand `json:"callbackCommand"` + FromUserID string `json:"fromUserID" ` + ToUserID string `json:"blackUserID"` + HandleResult int32 `json:"HandleResult"` + HandleMsg string `json:"HandleMsg"` +} + +type CallbackAfterAddFriendAgreeResp struct { + CommonCallbackResp +} + type CallbackAfterDeleteFriendReq struct { CallbackCommand `json:"callbackCommand"` OwnerUserID string `json:"ownerUserID" ` diff --git a/pkg/callbackstruct/group.go b/pkg/callbackstruct/group.go index 23a73ebd23..1f9b848cf9 100644 --- a/pkg/callbackstruct/group.go +++ b/pkg/callbackstruct/group.go @@ -17,6 +17,7 @@ package callbackstruct import ( "github.com/openimsdk/open-im-server/v3/pkg/apistruct" common "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/protocol/wrapperspb" ) type CallbackCommand string @@ -242,3 +243,48 @@ type CallbackAfterSetGroupInfoReq struct { type CallbackAfterSetGroupInfoResp struct { CommonCallbackResp } + +type CallbackBeforeSetGroupInfoExReq struct { + CallbackCommand `json:"callbackCommand"` + OperationID string `json:"operationID"` + GroupID string `json:"groupID"` + GroupName *wrapperspb.StringValue `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackBeforeSetGroupInfoExResp struct { + CommonCallbackResp + GroupID string `json:"groupID"` + GroupName *wrapperspb.StringValue `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackAfterSetGroupInfoExReq struct { + CallbackCommand `json:"callbackCommand"` + OperationID string `json:"operationID"` + GroupID string `json:"groupID"` + GroupName *wrapperspb.StringValue `json:"groupName"` + Notification *wrapperspb.StringValue `json:"notification"` + Introduction *wrapperspb.StringValue `json:"introduction"` + FaceURL *wrapperspb.StringValue `json:"faceURL"` + Ex *wrapperspb.StringValue `json:"ex"` + NeedVerification *wrapperspb.Int32Value `json:"needVerification"` + LookMemberInfo *wrapperspb.Int32Value `json:"lookMemberInfo"` + ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` +} + +type CallbackAfterSetGroupInfoExResp struct { + CommonCallbackResp +} diff --git a/pkg/callbackstruct/user.go b/pkg/callbackstruct/user.go index 504c7ffb7e..d8640da29a 100644 --- a/pkg/callbackstruct/user.go +++ b/pkg/callbackstruct/user.go @@ -72,7 +72,6 @@ type CallbackAfterUpdateUserInfoExResp struct { type CallbackBeforeUserRegisterReq struct { CallbackCommand `json:"callbackCommand"` - Secret string `json:"secret"` Users []*sdkws.UserInfo `json:"users"` } @@ -83,7 +82,6 @@ type CallbackBeforeUserRegisterResp struct { type CallbackAfterUserRegisterReq struct { CallbackCommand `json:"callbackCommand"` - Secret string `json:"secret"` Users []*sdkws.UserInfo `json:"users"` } diff --git a/pkg/common/cmd/msg_gateway_test.go b/pkg/common/cmd/msg_gateway_test.go index 2b68a3e3ab..929abcd863 100644 --- a/pkg/common/cmd/msg_gateway_test.go +++ b/pkg/common/cmd/msg_gateway_test.go @@ -15,13 +15,14 @@ package cmd import ( + "math" + "testing" + "github.com/openimsdk/protocol/auth" "github.com/openimsdk/tools/apiresp" "github.com/openimsdk/tools/utils/jsonutil" "github.com/stretchr/testify/mock" "go.mongodb.org/mongo-driver/bson/primitive" - "math" - "testing" ) // MockRootCmd is a mock type for the RootCmd type @@ -39,7 +40,7 @@ func TestName(t *testing.T) { ErrCode: 1234, ErrMsg: "test", ErrDlt: "4567", - Data: &auth.UserTokenResp{ + Data: &auth.GetUserTokenResp{ Token: "1234567", ExpireTimeSeconds: math.MaxInt64, }, @@ -51,7 +52,7 @@ func TestName(t *testing.T) { t.Log(string(data)) var rReso apiresp.ApiResponse - rReso.Data = &auth.UserTokenResp{} + rReso.Data = &auth.GetUserTokenResp{} if err := jsonutil.JsonUnmarshal(data, &rReso); err != nil { panic(err) diff --git a/pkg/common/cmd/push.go b/pkg/common/cmd/push.go index c9b8b1c245..ca22a697d2 100644 --- a/pkg/common/cmd/push.go +++ b/pkg/common/cmd/push.go @@ -37,7 +37,6 @@ func NewPushRpcCmd() *PushRpcCmd { ret.configMap = map[string]any{ OpenIMPushCfgFileName: &pushConfig.RpcConfig, RedisConfigFileName: &pushConfig.RedisConfig, - MongodbConfigFileName: &pushConfig.MongodbConfig, KafkaConfigFileName: &pushConfig.KafkaConfig, ShareFileName: &pushConfig.Share, NotificationFileName: &pushConfig.NotificationConfig, diff --git a/pkg/common/cmd/root.go b/pkg/common/cmd/root.go index b43f86557f..5edea43773 100644 --- a/pkg/common/cmd/root.go +++ b/pkg/common/cmd/root.go @@ -129,10 +129,11 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts { } func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error { - err := log.InitFromConfig( + err := log.InitLoggerFromConfig( cmdOpts.loggerPrefixName, r.processName, + "", "", r.log.RemainLogLevel, r.log.IsStdout, r.log.IsJson, diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index c6c672eb8d..77fcbb8aa1 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -73,18 +73,21 @@ type Mongo struct { MaxRetry int `mapstructure:"maxRetry"` } type Kafka struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - ProducerAck string `mapstructure:"producerAck"` - CompressType string `mapstructure:"compressType"` - Address []string `mapstructure:"address"` - ToRedisTopic string `mapstructure:"toRedisTopic"` - ToMongoTopic string `mapstructure:"toMongoTopic"` - ToPushTopic string `mapstructure:"toPushTopic"` - ToRedisGroupID string `mapstructure:"toRedisGroupID"` - ToMongoGroupID string `mapstructure:"toMongoGroupID"` - ToPushGroupID string `mapstructure:"toPushGroupID"` - Tls TLSConfig `mapstructure:"tls"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + ProducerAck string `mapstructure:"producerAck"` + CompressType string `mapstructure:"compressType"` + Address []string `mapstructure:"address"` + ToRedisTopic string `mapstructure:"toRedisTopic"` + ToMongoTopic string `mapstructure:"toMongoTopic"` + ToPushTopic string `mapstructure:"toPushTopic"` + ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"` + ToRedisGroupID string `mapstructure:"toRedisGroupID"` + ToMongoGroupID string `mapstructure:"toMongoGroupID"` + ToPushGroupID string `mapstructure:"toPushGroupID"` + ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"` + + Tls TLSConfig `mapstructure:"tls"` } type TLSConfig struct { EnableTLS bool `mapstructure:"enableTLS"` @@ -97,8 +100,9 @@ type TLSConfig struct { type API struct { Api struct { - ListenIP string `mapstructure:"listenIP"` - Ports []int `mapstructure:"ports"` + ListenIP string `mapstructure:"listenIP"` + Ports []int `mapstructure:"ports"` + CompressionLevel int `mapstructure:"compressionLevel"` } `mapstructure:"api"` Prometheus struct { Enable bool `mapstructure:"enable"` @@ -181,7 +185,6 @@ type MsgGateway struct { WebsocketMaxMsgLen int `mapstructure:"websocketMaxMsgLen"` WebsocketTimeout int `mapstructure:"websocketTimeout"` } `mapstructure:"longConnSvr"` - MultiLoginPolicy int `mapstructure:"multiLoginPolicy"` } type MsgTransfer struct { @@ -220,6 +223,7 @@ type Push struct { BadgeCount bool `mapstructure:"badgeCount"` Production bool `mapstructure:"production"` } `mapstructure:"iosPush"` + FullUserCache bool `mapstructure:"fullUserCache"` } type Auth struct { @@ -258,7 +262,8 @@ type Group struct { ListenIP string `mapstructure:"listenIP"` Ports []int `mapstructure:"ports"` } `mapstructure:"rpc"` - Prometheus Prometheus `mapstructure:"prometheus"` + Prometheus Prometheus `mapstructure:"prometheus"` + EnableHistoryForNewMembers bool `mapstructure:"enableHistoryForNewMembers"` } type Msg struct { @@ -335,25 +340,31 @@ type Redis struct { Password string `mapstructure:"password"` ClusterMode bool `mapstructure:"clusterMode"` DB int `mapstructure:"storage"` - MaxRetry int `mapstructure:"MaxRetry"` + MaxRetry int `mapstructure:"maxRetry"` + PoolSize int `mapstructure:"poolSize"` } type BeforeConfig struct { - Enable bool `mapstructure:"enable"` - Timeout int `mapstructure:"timeout"` - FailedContinue bool `mapstructure:"failedContinue"` + Enable bool `mapstructure:"enable"` + Timeout int `mapstructure:"timeout"` + FailedContinue bool `mapstructure:"failedContinue"` + AllowedTypes []string `mapstructure:"allowedTypes"` + DeniedTypes []string `mapstructure:"deniedTypes"` } type AfterConfig struct { Enable bool `mapstructure:"enable"` Timeout int `mapstructure:"timeout"` AttentionIds []string `mapstructure:"attentionIds"` + AllowedTypes []string `mapstructure:"allowedTypes"` + DeniedTypes []string `mapstructure:"deniedTypes"` } type Share struct { - Secret string `mapstructure:"secret"` - RpcRegisterName RpcRegisterName `mapstructure:"rpcRegisterName"` - IMAdminUserID []string `mapstructure:"imAdminUserID"` + Secret string `mapstructure:"secret"` + RpcRegisterName RpcRegisterName `mapstructure:"rpcRegisterName"` + IMAdminUserID []string `mapstructure:"imAdminUserID"` + MultiLoginPolicy int `mapstructure:"multiLoginPolicy"` } type RpcRegisterName struct { User string `mapstructure:"user"` @@ -421,10 +432,13 @@ type Webhooks struct { BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"` AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"` BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"` + AfterSetGroupInfoEx AfterConfig `mapstructure:"afterSetGroupInfoEx"` + BeforeSetGroupInfoEx BeforeConfig `mapstructure:"beforeSetGroupInfoEx"` AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"` BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"` AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"` BeforeAddFriendAgree BeforeConfig `mapstructure:"beforeAddFriendAgree"` + AfterAddFriendAgree AfterConfig `mapstructure:"afterAddFriendAgree"` AfterDeleteFriend AfterConfig `mapstructure:"afterDeleteFriend"` BeforeImportFriends BeforeConfig `mapstructure:"beforeImportFriends"` AfterImportFriends AfterConfig `mapstructure:"afterImportFriends"` @@ -471,6 +485,7 @@ func (r *Redis) Build() *redisutil.Config { Password: r.Password, DB: r.DB, MaxRetry: r.MaxRetry, + PoolSize: r.PoolSize, } } diff --git a/pkg/common/config/load_config_test.go b/pkg/common/config/load_config_test.go index 256214565b..a0345fc7a5 100644 --- a/pkg/common/config/load_config_test.go +++ b/pkg/common/config/load_config_test.go @@ -36,3 +36,26 @@ func TestLoadOpenIMRpcUserConfig(t *testing.T) { //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112" assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports) } + +func TestLoadNotificationConfig(t *testing.T) { + var noti Notification + err := LoadConfig("../../../config/notification.yml", "IMENV_NOTIFICATION", ¬i) + assert.Nil(t, err) + assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title) +} + +func TestLoadOpenIMThirdConfig(t *testing.T) { + var third Third + err := LoadConfig("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", &third) + assert.Nil(t, err) + assert.Equal(t, "enabled", third.Object.Enable) + assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint) + assert.Equal(t, "my_bucket_name", third.Object.Oss.Bucket) + assert.Equal(t, "https://my_bucket_name.oss-cn-chengdu.aliyuncs.com", third.Object.Oss.BucketURL) + assert.Equal(t, "AKID1234567890", third.Object.Oss.AccessKeyID) + assert.Equal(t, "abc123xyz789", third.Object.Oss.AccessKeySecret) + assert.Equal(t, "session_token_value", third.Object.Oss.SessionToken) // Uncomment if session token is needed + assert.Equal(t, true, third.Object.Oss.PublicRead) + + // Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true +} diff --git a/pkg/common/prommetrics/grpc_push.go b/pkg/common/prommetrics/grpc_push.go index 0b6c3e76f3..5c966310f7 100644 --- a/pkg/common/prommetrics/grpc_push.go +++ b/pkg/common/prommetrics/grpc_push.go @@ -23,4 +23,8 @@ var ( Name: "msg_offline_push_failed_total", Help: "The number of msg failed offline pushed", }) + MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "msg_long_time_push_total", + Help: "The number of messages with a push time exceeding 10 seconds", + }) ) diff --git a/pkg/common/prommetrics/rpc.go b/pkg/common/prommetrics/rpc.go index dc16322dab..7162fa7e80 100644 --- a/pkg/common/prommetrics/rpc.go +++ b/pkg/common/prommetrics/rpc.go @@ -47,9 +47,17 @@ func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Co case share.RpcRegisterName.MessageGateway: return []prometheus.Collector{OnlineUserGauge} case share.RpcRegisterName.Msg: - return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter} + return []prometheus.Collector{ + SingleChatMsgProcessSuccessCounter, + SingleChatMsgProcessFailedCounter, + GroupChatMsgProcessSuccessCounter, + GroupChatMsgProcessFailedCounter, + } case share.RpcRegisterName.Push: - return []prometheus.Collector{MsgOfflinePushFailedCounter} + return []prometheus.Collector{ + MsgOfflinePushFailedCounter, + MsgLoneTimePushCounter, + } case share.RpcRegisterName.Auth: return []prometheus.Collector{UserLoginCounter} case share.RpcRegisterName.User: diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index 4091a5f6e6..fb8782d304 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -25,7 +25,6 @@ import ( "os" "os/signal" "strconv" - "sync" "syscall" "time" @@ -35,7 +34,6 @@ import ( "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mw" - "github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/utils/network" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -54,6 +52,7 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort, "prometheusPorts", prometheusConfig.Ports) rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) + listener, err := net.Listen( "tcp", rpcTcpAddr, @@ -61,7 +60,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo if err != nil { return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) } - defer listener.Close() client, err := kdisc.NewDiscoveryRegister(discovery, share) if err != nil { @@ -92,10 +90,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo } srv := grpc.NewServer(options...) - once := sync.Once{} - defer func() { - once.Do(srv.GracefulStop) - }() err = rpcFn(ctx, config, client, srv) if err != nil { @@ -113,9 +107,8 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo } var ( - netDone = make(chan struct{}, 2) - netErr error - httpServer *http.Server + netDone = make(chan struct{}, 2) + netErr error ) if prometheusConfig.Enable { go func() { @@ -152,18 +145,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo signal.Notify(sigs, syscall.SIGTERM) select { case <-sigs: - program.SIGTERMExit() - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { return err } - ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - err := httpServer.Shutdown(ctx) - if err != nil { - return errs.WrapMsg(err, "shutdown err") - } return nil case <-netDone: return netErr diff --git a/pkg/common/storage/cache/cachekey/conversation.go b/pkg/common/storage/cache/cachekey/conversation.go index d19fcc5767..909774288b 100644 --- a/pkg/common/storage/cache/cachekey/conversation.go +++ b/pkg/common/storage/cache/cachekey/conversation.go @@ -17,6 +17,8 @@ package cachekey const ( ConversationKey = "CONVERSATION:" ConversationIDsKey = "CONVERSATION_IDS:" + NotNotifyConversationIDsKey = "NOT_NOTIFY_CONVERSATION_IDS:" + PinnedConversationIDsKey = "PINNED_CONVERSATION_IDS:" ConversationIDsHashKey = "CONVERSATION_IDS_HASH:" ConversationHasReadSeqKey = "CONVERSATION_HAS_READ_SEQ:" RecvMsgOptKey = "RECV_MSG_OPT:" @@ -34,6 +36,14 @@ func GetConversationIDsKey(ownerUserID string) string { return ConversationIDsKey + ownerUserID } +func GetNotNotifyConversationIDsKey(ownerUserID string) string { + return NotNotifyConversationIDsKey + ownerUserID +} + +func GetPinnedConversationIDs(ownerUserID string) string { + return PinnedConversationIDsKey + ownerUserID +} + func GetSuperGroupRecvNotNotifyUserIDsKey(groupID string) string { return SuperGroupRecvMsgNotNotifyUserIDsKey + groupID } diff --git a/pkg/common/storage/cache/cachekey/group.go b/pkg/common/storage/cache/cachekey/group.go index 2ef42c0ff4..72eb7c2950 100644 --- a/pkg/common/storage/cache/cachekey/group.go +++ b/pkg/common/storage/cache/cachekey/group.go @@ -20,16 +20,17 @@ import ( ) const ( - groupExpireTime = time.Second * 60 * 60 * 12 - GroupInfoKey = "GROUP_INFO:" - GroupMemberIDsKey = "GROUP_MEMBER_IDS:" - GroupMembersHashKey = "GROUP_MEMBERS_HASH2:" - GroupMemberInfoKey = "GROUP_MEMBER_INFO:" - JoinedGroupsKey = "JOIN_GROUPS_KEY:" - GroupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:" - GroupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:" - GroupMemberMaxVersionKey = "GROUP_MEMBER_MAX_VERSION:" - GroupJoinMaxVersionKey = "GROUP_JOIN_MAX_VERSION:" + groupExpireTime = time.Second * 60 * 60 * 12 + GroupInfoKey = "GROUP_INFO:" + GroupMemberIDsKey = "GROUP_MEMBER_IDS:" + GroupMembersHashKey = "GROUP_MEMBERS_HASH2:" + GroupMemberInfoKey = "GROUP_MEMBER_INFO:" + JoinedGroupsKey = "JOIN_GROUPS_KEY:" + GroupMemberNumKey = "GROUP_MEMBER_NUM_CACHE:" + GroupRoleLevelMemberIDsKey = "GROUP_ROLE_LEVEL_MEMBER_IDS:" + GroupAdminLevelMemberIDsKey = "GROUP_ADMIN_LEVEL_MEMBER_IDS:" + GroupMemberMaxVersionKey = "GROUP_MEMBER_MAX_VERSION:" + GroupJoinMaxVersionKey = "GROUP_JOIN_MAX_VERSION:" ) func GetGroupInfoKey(groupID string) string { diff --git a/pkg/common/storage/cache/cachekey/online.go b/pkg/common/storage/cache/cachekey/online.go index 164e5f2f46..40f09cb5ae 100644 --- a/pkg/common/storage/cache/cachekey/online.go +++ b/pkg/common/storage/cache/cachekey/online.go @@ -1,6 +1,9 @@ package cachekey -import "time" +import ( + "strings" + "time" +) const ( OnlineKey = "ONLINE:" @@ -11,3 +14,7 @@ const ( func GetOnlineKey(userID string) string { return OnlineKey + userID } + +func GetOnlineKeyUserID(key string) string { + return strings.TrimPrefix(key, OnlineKey) +} diff --git a/pkg/common/storage/cache/conversation.go b/pkg/common/storage/cache/conversation.go index bc17614836..ac3011107c 100644 --- a/pkg/common/storage/cache/conversation.go +++ b/pkg/common/storage/cache/conversation.go @@ -25,6 +25,8 @@ type ConversationCache interface { CloneConversationCache() ConversationCache // get user's conversationIDs from msgCache GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) + GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) + GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) DelConversationIDs(userIDs ...string) ConversationCache GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) @@ -54,7 +56,8 @@ type ConversationCache interface { GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache - + DelConversationNotNotifyMessageUserIDs(userIDs ...string) ConversationCache + DelConversationPinnedMessageUserIDs(userIDs ...string) ConversationCache DelConversationVersionUserIDs(userIDs ...string) ConversationCache FindMaxConversationUserVersion(ctx context.Context, userID string) (*relationtb.VersionLog, error) diff --git a/pkg/common/storage/cache/group.go b/pkg/common/storage/cache/group.go index 1ec0462956..05b75745a1 100644 --- a/pkg/common/storage/cache/group.go +++ b/pkg/common/storage/cache/group.go @@ -16,6 +16,7 @@ package cache import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/common" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" ) diff --git a/pkg/common/storage/cache/online.go b/pkg/common/storage/cache/online.go index 7669c8a118..d21ae616a6 100644 --- a/pkg/common/storage/cache/online.go +++ b/pkg/common/storage/cache/online.go @@ -5,4 +5,5 @@ import "context" type OnlineCache interface { GetOnline(ctx context.Context, userID string) ([]int32, error) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error + GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) } diff --git a/pkg/common/storage/cache/redis/batch.go b/pkg/common/storage/cache/redis/batch.go index 4d65c59298..1810ac9939 100644 --- a/pkg/common/storage/cache/redis/batch.go +++ b/pkg/common/storage/cache/redis/batch.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "github.com/dtm-labs/rockscache" + "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" "golang.org/x/sync/singleflight" @@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac } bs, err := json.Marshal(value) if err != nil { + log.ZError(ctx, "marshal failed", err) return nil, err } cacheIndex[index] = string(bs) @@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac return cacheIndex, nil }) if err != nil { - return nil, err + return nil, errs.WrapMsg(err, "FetchBatch2 failed") } for index, data := range indexCache { if data == "" { @@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac } var value V if err := json.Unmarshal([]byte(data), &value); err != nil { - return nil, err + return nil, errs.WrapMsg(err, "Unmarshal failed") } if cb, ok := any(&value).(BatchCacheCallback[K]); ok { cb.BatchCache(keyId[keys[index]]) diff --git a/pkg/common/storage/cache/redis/batch_handler.go b/pkg/common/storage/cache/redis/batch_handler.go index f9923e198e..420ebdf777 100644 --- a/pkg/common/storage/cache/redis/batch_handler.go +++ b/pkg/common/storage/cache/redis/batch_handler.go @@ -28,6 +28,10 @@ import ( "time" ) +const ( + rocksCacheTimeout = 11 * time.Second +) + // BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache. type BatchDeleterRedis struct { redisClient redis.UniversalClient @@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) { // GetRocksCacheOptions returns the default configuration options for RocksCache. func GetRocksCacheOptions() *rockscache.Options { opts := rockscache.NewDefaultOptions() + opts.LockExpire = rocksCacheTimeout + opts.WaitReplicasTimeout = rocksCacheTimeout opts.StrongConsistency = true opts.RandomExpireAdjustment = 0.2 @@ -118,7 +124,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { t, err = fn(ctx) if err != nil { - log.ZError(ctx, "getCache query database failed", err, "key", key) + //log.ZError(ctx, "getCache query database failed", err, "key", key) return "", err } bs, err := json.Marshal(t) diff --git a/pkg/common/storage/cache/redis/conversation.go b/pkg/common/storage/cache/redis/conversation.go index 95e680afb4..326f60b96a 100644 --- a/pkg/common/storage/cache/redis/conversation.go +++ b/pkg/common/storage/cache/redis/conversation.go @@ -71,6 +71,14 @@ func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) strin return cachekey.GetConversationIDsKey(ownerUserID) } +func (c *ConversationRedisCache) getNotNotifyConversationIDsKey(ownerUserID string) string { + return cachekey.GetNotNotifyConversationIDsKey(ownerUserID) +} + +func (c *ConversationRedisCache) getPinnedConversationIDsKey(ownerUserID string) string { + return cachekey.GetPinnedConversationIDs(ownerUserID) +} + func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string { return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID) } @@ -105,6 +113,18 @@ func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, own }) } +func (c *ConversationRedisCache) GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) { + return getCache(ctx, c.rcClient, c.getNotNotifyConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) { + return c.conversationDB.FindUserIDAllNotNotifyConversationID(ctx, userID) + }) +} + +func (c *ConversationRedisCache) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) { + return getCache(ctx, c.rcClient, c.getPinnedConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) { + return c.conversationDB.FindUserIDAllPinnedConversationID(ctx, userID) + }) +} + func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) cache.ConversationCache { keys := make([]string, 0, len(userIDs)) for _, userID := range userIDs { @@ -242,6 +262,22 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers return cache } +func (c *ConversationRedisCache) DelConversationNotNotifyMessageUserIDs(userIDs ...string) cache.ConversationCache { + cache := c.CloneConversationCache() + for _, userID := range userIDs { + cache.AddKeys(c.getNotNotifyConversationIDsKey(userID)) + } + return cache +} + +func (c *ConversationRedisCache) DelConversationPinnedMessageUserIDs(userIDs ...string) cache.ConversationCache { + cache := c.CloneConversationCache() + for _, userID := range userIDs { + cache.AddKeys(c.getPinnedConversationIDsKey(userID)) + } + return cache +} + func (c *ConversationRedisCache) DelConversationVersionUserIDs(userIDs ...string) cache.ConversationCache { cache := c.CloneConversationCache() for _, userID := range userIDs { diff --git a/pkg/common/storage/cache/redis/online.go b/pkg/common/storage/cache/redis/online.go index a012e1cd2d..b6c90264e1 100644 --- a/pkg/common/storage/cache/redis/online.go +++ b/pkg/common/storage/cache/redis/online.go @@ -2,12 +2,15 @@ package redis import ( "context" + "fmt" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/protocol/constant" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" "strconv" + "strings" "time" ) @@ -48,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err return platformIDs, nil } +func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) { + result := make(map[string][]int32) + + keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result() + if err != nil { + return nil, 0, err + } + + for _, key := range keys { + userID := cachekey.GetOnlineKeyUserID(key) + strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result() + if err != nil { + return nil, 0, err + } + + values := make([]int32, 0, len(strValues)) + for _, value := range strValues { + intValue, err := strconv.Atoi(value) + if err != nil { + return nil, 0, errs.Wrap(err) + } + values = append(values, int32(intValue)) + } + + result[userID] = values + } + + return result, nextCursor, nil +} + func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { script := ` local key = KEYS[1] @@ -66,11 +99,10 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o local change = (num1 ~= num2) or (num2 ~= num3) if change then local members = redis.call("ZRANGE", key, 0, -1) - table.insert(members, KEYS[2]) - redis.call("PUBLISH", KEYS[3], table.concat(members, ":")) - return 1 + table.insert(members, "1") + return members else - return 0 + return {"0"} end ` now := time.Now() @@ -82,12 +114,24 @@ func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, o for _, platformID := range online { argv = append(argv, platformID) } - keys := []string{s.getUserOnlineKey(userID), userID, s.channelName} - status, err := s.rdb.Eval(ctx, script, keys, argv).Result() + keys := []string{s.getUserOnlineKey(userID)} + platformIDs, err := s.rdb.Eval(ctx, script, keys, argv).StringSlice() if err != nil { log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline) return err } - log.ZDebug(ctx, "redis SetUserOnline", "userID", userID, "online", online, "offline", offline, "status", status) + if len(platformIDs) == 0 { + return errs.ErrInternalServer.WrapMsg("SetUserOnline redis lua invalid return value") + } + if platformIDs[len(platformIDs)-1] != "0" { + log.ZDebug(ctx, "redis SetUserOnline push", "userID", userID, "online", online, "offline", offline, "platformIDs", platformIDs[:len(platformIDs)-1]) + platformIDs[len(platformIDs)-1] = userID + msg := strings.Join(platformIDs, ":") + if err := s.rdb.Publish(ctx, s.channelName, msg).Err(); err != nil { + return errs.Wrap(err) + } + } else { + log.ZDebug(ctx, "redis SetUserOnline not push", "userID", userID, "online", online, "offline", offline) + } return nil } diff --git a/pkg/common/storage/cache/redis/online_test.go b/pkg/common/storage/cache/redis/online_test.go new file mode 100644 index 0000000000..0306f6f5d7 --- /dev/null +++ b/pkg/common/storage/cache/redis/online_test.go @@ -0,0 +1,51 @@ +package redis + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/tools/db/redisutil" + "testing" + "time" +) + +/* +address: [ 172.16.8.48:7001, 172.16.8.48:7002, 172.16.8.48:7003, 172.16.8.48:7004, 172.16.8.48:7005, 172.16.8.48:7006 ] +username: +password: passwd123 +clusterMode: true +db: 0 +maxRetry: 10 +*/ +func TestName111111(t *testing.T) { + conf := config.Redis{ + Address: []string{ + "172.16.8.124:7001", + "172.16.8.124:7002", + "172.16.8.124:7003", + "172.16.8.124:7004", + "172.16.8.124:7005", + "172.16.8.124:7006", + }, + ClusterMode: true, + Password: "passwd123", + //Address: []string{"localhost:16379"}, + //Password: "openIM123", + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*1000) + defer cancel() + rdb, err := redisutil.NewRedisClient(ctx, conf.Build()) + if err != nil { + panic(err) + } + online := NewUserOnline(rdb) + + userID := "a123456" + t.Log(online.GetOnline(ctx, userID)) + t.Log(online.SetUserOnline(ctx, userID, []int32{1, 2, 3, 4}, nil)) + t.Log(online.GetOnline(ctx, userID)) + +} + +func TestName111(t *testing.T) { + +} diff --git a/pkg/common/storage/cache/redis/seq_conversation.go b/pkg/common/storage/cache/redis/seq_conversation.go index 7fe849193e..71705cef7e 100644 --- a/pkg/common/storage/cache/redis/seq_conversation.go +++ b/pkg/common/storage/cache/redis/seq_conversation.go @@ -12,6 +12,7 @@ import ( "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" + "strconv" "time" ) @@ -57,6 +58,14 @@ func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, convers return map[string]int64{conversationID: seq}, nil } +func (s *seqConversationCacheRedis) getSingleMaxSeqWithTime(ctx context.Context, conversationID string) (map[string]database.SeqTime, error) { + seq, err := s.GetMaxSeqWithTime(ctx, conversationID) + if err != nil { + return nil, err + } + return map[string]database.SeqTime{conversationID: seq}, nil +} + func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error { result := make([]*redis.StringCmd, len(keys)) pipe := s.rdb.Pipeline() @@ -88,6 +97,46 @@ func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []s return nil } +func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error { + result := make([]*redis.SliceCmd, len(keys)) + pipe := s.rdb.Pipeline() + for i, key := range keys { + result[i] = pipe.HMGet(ctx, key, "CURR", "TIME") + } + if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) { + return errs.Wrap(err) + } + var notFoundKey []string + for i, r := range result { + val, err := r.Result() + if len(val) != 2 { + return errs.WrapMsg(err, "batchGetMaxSeqWithTime invalid result", "key", keys[i], "res", val) + } + if val[0] == nil { + notFoundKey = append(notFoundKey, keys[i]) + continue + } + seq, err := s.parseInt64(val[0]) + if err != nil { + return err + } + mill, err := s.parseInt64(val[1]) + if err != nil { + return err + } + seqs[keyConversationID[keys[i]]] = database.SeqTime{Seq: seq, Time: mill} + } + for _, key := range notFoundKey { + conversationID := keyConversationID[key] + seq, err := s.GetMaxSeqWithTime(ctx, conversationID) + if err != nil { + return err + } + seqs[conversationID] = seq + } + return nil +} + func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) { switch len(conversationIDs) { case 0: @@ -121,11 +170,44 @@ func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversation return seqs, nil } +func (s *seqConversationCacheRedis) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) { + switch len(conversationIDs) { + case 0: + return map[string]database.SeqTime{}, nil + case 1: + return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0]) + } + keys := make([]string, 0, len(conversationIDs)) + keyConversationID := make(map[string]string, len(conversationIDs)) + for _, conversationID := range conversationIDs { + key := s.getSeqMallocKey(conversationID) + if _, ok := keyConversationID[key]; ok { + continue + } + keys = append(keys, key) + keyConversationID[key] = conversationID + } + if len(keys) == 1 { + return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0]) + } + slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) + if err != nil { + return nil, err + } + seqs := make(map[string]database.SeqTime, len(conversationIDs)) + for _, keys := range slotKeys { + if err := s.batchGetMaxSeqWithTime(ctx, keys, keyConversationID, seqs); err != nil { + return nil, err + } + } + return seqs, nil +} + func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string { return cachekey.GetMallocSeqKey(conversationID) } -func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) (int64, error) { +func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) (int64, error) { if lastSeq < currSeq { return 0, errs.New("lastSeq must be greater than currSeq") } @@ -138,8 +220,9 @@ local lockValue = ARGV[1] local dataSecond = ARGV[2] local curr_seq = tonumber(ARGV[3]) local last_seq = tonumber(ARGV[4]) +local mallocTime = ARGV[5] if redis.call("EXISTS", key) == 0 then - redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq) + redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime) redis.call("EXPIRE", key, dataSecond) return 1 end @@ -147,11 +230,11 @@ if redis.call("HGET", key, "LOCK") ~= lockValue then return 2 end redis.call("HDEL", key, "LOCK") -redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq) +redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime) redis.call("EXPIRE", key, dataSecond) return 0 ` - result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq).Int64() + result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64() if err != nil { return 0, errs.Wrap(err) } @@ -169,6 +252,7 @@ local key = KEYS[1] local size = tonumber(ARGV[1]) local lockSecond = ARGV[2] local dataSecond = ARGV[3] +local mallocTime = ARGV[4] local result = {} if redis.call("EXISTS", key) == 0 then local lockValue = math.random(0, 999999999) @@ -176,6 +260,7 @@ if redis.call("EXISTS", key) == 0 then redis.call("EXPIRE", key, lockSecond) table.insert(result, 1) table.insert(result, lockValue) + table.insert(result, mallocTime) return result end if redis.call("HEXISTS", key, "LOCK") == 1 then @@ -189,6 +274,12 @@ if size == 0 then table.insert(result, 0) table.insert(result, curr_seq) table.insert(result, last_seq) + local setTime = redis.call("HGET", key, "TIME") + if setTime then + table.insert(result, setTime) + else + table.insert(result, 0) + end return result end local max_seq = curr_seq + size @@ -196,21 +287,25 @@ if max_seq > last_seq then local lockValue = math.random(0, 999999999) redis.call("HSET", key, "LOCK", lockValue) redis.call("HSET", key, "CURR", last_seq) + redis.call("HSET", key, "TIME", mallocTime) redis.call("EXPIRE", key, lockSecond) table.insert(result, 3) table.insert(result, curr_seq) table.insert(result, last_seq) table.insert(result, lockValue) + table.insert(result, mallocTime) return result end redis.call("HSET", key, "CURR", max_seq) +redis.call("HSET", key, "TIME", ARGV[4]) redis.call("EXPIRE", key, dataSecond) table.insert(result, 0) table.insert(result, curr_seq) table.insert(result, last_seq) +table.insert(result, mallocTime) return result ` - result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second)).Int64Slice() + result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice() if err != nil { return nil, errs.Wrap(err) } @@ -228,9 +323,9 @@ func (s *seqConversationCacheRedis) wait(ctx context.Context) error { } } -func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) { +func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) { for i := 0; i < 10; i++ { - state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq) + state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq, mill) if err != nil { log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1) if err := s.wait(ctx); err != nil { @@ -267,60 +362,74 @@ func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size in } func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) { + seq, _, err := s.mallocTime(ctx, conversationID, size) + return seq, err +} + +func (s *seqConversationCacheRedis) mallocTime(ctx context.Context, conversationID string, size int64) (int64, int64, error) { if size < 0 { - return 0, errs.New("size must be greater than 0") + return 0, 0, errs.New("size must be greater than 0") } key := s.getSeqMallocKey(conversationID) for i := 0; i < 10; i++ { states, err := s.malloc(ctx, key, size) if err != nil { - return 0, err + return 0, 0, err } switch states[0] { case 0: // success - return states[1], nil + return states[1], states[3], nil case 1: // not found mallocSize := s.getMallocSize(conversationID, size) seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize) if err != nil { - return 0, err + return 0, 0, err } - s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize) - return seq, nil + s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize, states[2]) + return seq, 0, nil case 2: // locked if err := s.wait(ctx); err != nil { - return 0, err + return 0, 0, err } continue case 3: // exceeded cache max value currSeq := states[1] lastSeq := states[2] + mill := states[4] mallocSize := s.getMallocSize(conversationID, size) seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize) if err != nil { - return 0, err + return 0, 0, err } if lastSeq == seq { - s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize) - return currSeq, nil + s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize, mill) + return currSeq, states[4], nil } else { log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq) - s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize) - return seq, nil + s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize, mill) + return seq, mill, nil } default: log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size) - return 0, errs.New(fmt.Sprintf("unknown state: %d", states[0])) + return 0, 0, errs.New(fmt.Sprintf("unknown state: %d", states[0])) } } log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size) - return 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size) + return 0, 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size) } func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) { return s.Malloc(ctx, conversationID, 0) } +func (s *seqConversationCacheRedis) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) { + seq, mill, err := s.mallocTime(ctx, conversationID, 0) + if err != nil { + return database.SeqTime{}, err + } + return database.SeqTime{Seq: seq, Time: mill}, nil +} + func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error { keys := make([]string, 0, len(seqs)) for conversationID, seq := range seqs { @@ -331,3 +440,80 @@ func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[str } return DeleteCacheBySlot(ctx, s.rocks, keys) } + +// GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated +func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) { + if len(conversationIDs) == 0 { + return map[string]database.SeqTime{}, nil + } + key2conversationID := make(map[string]string) + keys := make([]string, 0, len(conversationIDs)) + for _, conversationID := range conversationIDs { + key := s.getSeqMallocKey(conversationID) + if _, ok := key2conversationID[key]; ok { + continue + } + key2conversationID[key] = conversationID + keys = append(keys, key) + } + slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) + if err != nil { + return nil, err + } + res := make(map[string]database.SeqTime) + for _, keys := range slotKeys { + if len(keys) == 0 { + continue + } + pipe := s.rdb.Pipeline() + cmds := make([]*redis.SliceCmd, 0, len(keys)) + for _, key := range keys { + cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME")) + } + if _, err := pipe.Exec(ctx); err != nil { + return nil, errs.Wrap(err) + } + for i, cmd := range cmds { + val, err := cmd.Result() + if err != nil { + return nil, err + } + if len(val) != 2 { + return nil, errs.WrapMsg(err, "GetCacheMaxSeqWithTime invalid result", "key", keys[i], "res", val) + } + if val[0] == nil { + continue + } + seq, err := s.parseInt64(val[0]) + if err != nil { + return nil, err + } + mill, err := s.parseInt64(val[1]) + if err != nil { + return nil, err + } + conversationID := key2conversationID[keys[i]] + res[conversationID] = database.SeqTime{Seq: seq, Time: mill} + } + } + return res, nil +} + +func (s *seqConversationCacheRedis) parseInt64(val any) (int64, error) { + switch v := val.(type) { + case nil: + return 0, nil + case int: + return int64(v), nil + case int64: + return v, nil + case string: + res, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, errs.WrapMsg(err, "invalid string not int64", "value", v) + } + return res, nil + default: + return 0, errs.New("invalid result not int64", "resType", fmt.Sprintf("%T", v), "value", v) + } +} diff --git a/pkg/common/storage/cache/redis/seq_conversation_test.go b/pkg/common/storage/cache/redis/seq_conversation_test.go index 1a40624b8c..d8bfdfbfbf 100644 --- a/pkg/common/storage/cache/redis/seq_conversation_test.go +++ b/pkg/common/storage/cache/redis/seq_conversation_test.go @@ -14,7 +14,7 @@ import ( ) func newTestSeq() *seqConversationCacheRedis { - mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)) + mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@127.0.0.1:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)) if err != nil { panic(err) } @@ -23,7 +23,7 @@ func newTestSeq() *seqConversationCacheRedis { panic(err) } opt := &redis.Options{ - Addr: "172.16.8.48:16379", + Addr: "127.0.0.1:16379", Password: "openIM123", DB: 1, } @@ -107,3 +107,37 @@ func TestMinSeq(t *testing.T) { ts := newTestSeq() t.Log(ts.GetMinSeq(context.Background(), "10000000")) } + +func TestMalloc(t *testing.T) { + ts := newTestSeq() + t.Log(ts.mallocTime(context.Background(), "10000000", 100)) +} + +func TestHMGET(t *testing.T) { + ts := newTestSeq() + res, err := ts.GetCacheMaxSeqWithTime(context.Background(), []string{"10000000", "123456"}) + if err != nil { + panic(err) + } + t.Log(res) +} + +func TestGetMaxSeqWithTime(t *testing.T) { + ts := newTestSeq() + t.Log(ts.GetMaxSeqWithTime(context.Background(), "10000000")) +} + +func TestGetMaxSeqWithTime1(t *testing.T) { + ts := newTestSeq() + t.Log(ts.GetMaxSeqsWithTime(context.Background(), []string{"10000000", "12345", "111"})) +} + +// +//func TestHMGET(t *testing.T) { +// ts := newTestSeq() +// res, err := ts.rdb.HMGet(context.Background(), "MALLOC_SEQ:1", "CURR", "TIME1").Result() +// if err != nil { +// panic(err) +// } +// t.Log(res) +//} diff --git a/pkg/common/storage/cache/redis/seq_user.go b/pkg/common/storage/cache/redis/seq_user.go index edbc66b21b..0cedfeee12 100644 --- a/pkg/common/storage/cache/redis/seq_user.go +++ b/pkg/common/storage/cache/redis/seq_user.go @@ -74,17 +74,22 @@ func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID s } func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { - if seq%s.readSeqWriteRatio == 0 { - if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } + dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID) + if err != nil { + return err } - if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { - return errs.Wrap(err) + if dbSeq < seq { + if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { + return errs.Wrap(err) + } } return nil } +func (s *seqUserCacheRedis) SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error { + return s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq) +} + func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error { keys := make([]string, 0, len(seqs)) for conversationID, seq := range seqs { @@ -128,13 +133,6 @@ func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string, if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil { return err } - for conversationID, seq := range seqs { - if seq%s.readSeqWriteRatio == 0 { - if err := s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } - } - } return nil } diff --git a/pkg/common/storage/cache/redis/token.go b/pkg/common/storage/cache/redis/token.go index b822596582..24e9c30050 100644 --- a/pkg/common/storage/cache/redis/token.go +++ b/pkg/common/storage/cache/redis/token.go @@ -19,8 +19,8 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/tools/errs" - "github.com/openimsdk/tools/utils/stringutil" "github.com/redis/go-redis/v9" + "strconv" "time" ) @@ -58,9 +58,12 @@ func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, p } mm := make(map[string]int) for k, v := range m { - mm[k] = stringutil.StringToInt(v) + state, err := strconv.Atoi(v) + if err != nil { + return nil, errs.WrapMsg(err, "redis token value is not int", "value", v, "userID", userID, "platformID", platformID) + } + mm[k] = state } - return mm, nil } diff --git a/pkg/common/storage/cache/seq_conversation.go b/pkg/common/storage/cache/seq_conversation.go index 2c893a5e86..f35d7bf528 100644 --- a/pkg/common/storage/cache/seq_conversation.go +++ b/pkg/common/storage/cache/seq_conversation.go @@ -1,6 +1,9 @@ package cache -import "context" +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" +) type SeqConversationCache interface { Malloc(ctx context.Context, conversationID string, size int64) (int64, error) @@ -9,4 +12,7 @@ type SeqConversationCache interface { GetMinSeq(ctx context.Context, conversationID string) (int64, error) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) SetMinSeqs(ctx context.Context, seqs map[string]int64) error + GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) + GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) + GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) } diff --git a/pkg/common/storage/cache/seq_user.go b/pkg/common/storage/cache/seq_user.go index 61dbc0ab45..cef414e16e 100644 --- a/pkg/common/storage/cache/seq_user.go +++ b/pkg/common/storage/cache/seq_user.go @@ -9,6 +9,7 @@ type SeqUser interface { SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error + SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) diff --git a/pkg/common/storage/controller/auth.go b/pkg/common/storage/controller/auth.go index b725513d9e..94f18b3ae3 100644 --- a/pkg/common/storage/controller/auth.go +++ b/pkg/common/storage/controller/auth.go @@ -16,6 +16,7 @@ package controller import ( "context" + "github.com/openimsdk/tools/log" "github.com/golang-jwt/jwt/v4" "github.com/openimsdk/open-im-server/v3/pkg/authverify" @@ -35,13 +36,14 @@ type AuthDatabase interface { } type authDatabase struct { - cache cache.TokenModel - accessSecret string - accessExpire int64 + cache cache.TokenModel + accessSecret string + accessExpire int64 + multiLoginPolicy int } -func NewAuthDatabase(cache cache.TokenModel, accessSecret string, accessExpire int64) AuthDatabase { - return &authDatabase{cache: cache, accessSecret: accessSecret, accessExpire: accessExpire} +func NewAuthDatabase(cache cache.TokenModel, accessSecret string, accessExpire int64, policy int) AuthDatabase { + return &authDatabase{cache: cache, accessSecret: accessSecret, accessExpire: accessExpire, multiLoginPolicy: policy} } // If the result is empty. @@ -55,15 +57,19 @@ func (a *authDatabase) SetTokenMapByUidPid(ctx context.Context, userID string, p // Create Token. func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformID int) (string, error) { + // todo: get all platform token tokens, err := a.cache.GetTokensWithoutError(ctx, userID, platformID) if err != nil { return "", err } var deleteTokenKey []string + var kickedTokenKey []string for k, v := range tokens { - _, err = tokenverify.GetClaimFromToken(k, authverify.Secret(a.accessSecret)) + t, err := tokenverify.GetClaimFromToken(k, authverify.Secret(a.accessSecret)) if err != nil || v != constant.NormalToken { deleteTokenKey = append(deleteTokenKey, k) + } else if a.checkKickToken(ctx, platformID, t) { + kickedTokenKey = append(kickedTokenKey, k) } } if len(deleteTokenKey) != 0 { @@ -73,6 +79,25 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI } } + const adminTokenMaxNum = 30 + if platformID == constant.AdminPlatformID { + if len(kickedTokenKey) > adminTokenMaxNum { + kickedTokenKey = kickedTokenKey[:len(kickedTokenKey)-adminTokenMaxNum] + } else { + kickedTokenKey = nil + } + } + + if len(kickedTokenKey) != 0 { + for _, k := range kickedTokenKey { + err := a.cache.SetTokenFlagEx(ctx, userID, platformID, k, constant.KickedToken) + if err != nil { + return "", err + } + log.ZDebug(ctx, "kicked token in create token", "token", k) + } + } + claims := tokenverify.BuildClaims(userID, platformID, a.accessExpire) token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) tokenString, err := token.SignedString([]byte(a.accessSecret)) @@ -85,3 +110,23 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI } return tokenString, nil } + +func (a *authDatabase) checkKickToken(ctx context.Context, platformID int, token *tokenverify.Claims) bool { + switch a.multiLoginPolicy { + case constant.DefalutNotKick: + return false + case constant.PCAndOther: + if constant.PlatformIDToClass(platformID) == constant.TerminalPC || + constant.PlatformIDToClass(token.PlatformID) == constant.TerminalPC { + return false + } + return true + case constant.AllLoginButSameTermKick: + if platformID == token.PlatformID { + return true + } + return false + default: + return false + } +} diff --git a/pkg/common/storage/controller/conversation.go b/pkg/common/storage/controller/conversation.go index c804d1cc51..06a0733658 100644 --- a/pkg/common/storage/controller/conversation.go +++ b/pkg/common/storage/controller/conversation.go @@ -69,6 +69,10 @@ type ConversationDatabase interface { FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*relationtb.VersionLog, error) FindMaxConversationUserVersionCache(ctx context.Context, userID string) (*relationtb.VersionLog, error) GetOwnerConversation(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*relationtb.Conversation, error) + // GetNotNotifyConversationIDs gets not notify conversationIDs by userID + GetNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) + // GetPinnedConversationIDs gets pinned conversationIDs by userID + GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) } func NewConversationDatabase(conversation database.Conversation, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase { @@ -108,6 +112,10 @@ func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context, } if _, ok := fieldMap["recv_msg_opt"]; ok { cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID) + cache = cache.DelConversationNotNotifyMessageUserIDs(userIDs...) + } + if _, ok := fieldMap["is_pinned"]; ok { + cache = cache.DelConversationPinnedMessageUserIDs(userIDs...) } cache = cache.DelConversationVersionUserIDs(haveUserIDs...) } @@ -144,6 +152,10 @@ func (c *conversationDatabase) UpdateUsersConversationField(ctx context.Context, cache = cache.DelUsersConversation(conversationID, userIDs...).DelConversationVersionUserIDs(userIDs...) if _, ok := args["recv_msg_opt"]; ok { cache = cache.DelConversationNotReceiveMessageUserIDs(conversationID) + cache = cache.DelConversationNotNotifyMessageUserIDs(userIDs...) + } + if _, ok := args["is_pinned"]; ok { + cache = cache.DelConversationPinnedMessageUserIDs(userIDs...) } return cache.ChainExecDel(ctx) } @@ -152,14 +164,30 @@ func (c *conversationDatabase) CreateConversation(ctx context.Context, conversat if err := c.conversationDB.Create(ctx, conversations); err != nil { return err } - var userIDs []string + var ( + userIDs []string + notNotifyUserIDs []string + pinnedUserIDs []string + ) + cache := c.cache.CloneConversationCache() for _, conversation := range conversations { cache = cache.DelConversations(conversation.OwnerUserID, conversation.ConversationID) cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID) userIDs = append(userIDs, conversation.OwnerUserID) + if conversation.RecvMsgOpt == constant.ReceiveNotNotifyMessage { + notNotifyUserIDs = append(notNotifyUserIDs, conversation.OwnerUserID) + } + if conversation.IsPinned == true { + pinnedUserIDs = append(pinnedUserIDs, conversation.OwnerUserID) + } } - return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).DelConversationVersionUserIDs(userIDs...).ChainExecDel(ctx) + return cache.DelConversationIDs(userIDs...). + DelUserConversationIDsHash(userIDs...). + DelConversationVersionUserIDs(userIDs...). + DelConversationNotNotifyMessageUserIDs(notNotifyUserIDs...). + DelConversationPinnedMessageUserIDs(pinnedUserIDs...). + ChainExecDel(ctx) } func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Context, conversations []*relationtb.Conversation) error { @@ -212,7 +240,10 @@ func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, owner func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.Conversation) error { return c.tx.Transaction(ctx, func(ctx context.Context) error { cache := c.cache.CloneConversationCache() - cache = cache.DelConversationVersionUserIDs(ownerUserID) + cache = cache.DelConversationVersionUserIDs(ownerUserID). + DelConversationNotNotifyMessageUserIDs(ownerUserID). + DelConversationPinnedMessageUserIDs(ownerUserID) + groupIDs := datautil.Distinct(datautil.Filter(conversations, func(e *relationtb.Conversation) (string, bool) { return e.GroupID, e.GroupID != "" })) @@ -353,3 +384,19 @@ func (c *conversationDatabase) GetOwnerConversation(ctx context.Context, ownerUs } return int64(len(conversationIDs)), conversations, nil } + +func (c *conversationDatabase) GetNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) { + conversationIDs, err := c.cache.GetUserNotNotifyConversationIDs(ctx, userID) + if err != nil { + return nil, err + } + return conversationIDs, nil +} + +func (c *conversationDatabase) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) { + conversationIDs, err := c.cache.GetPinnedConversationIDs(ctx, userID) + if err != nil { + return nil, err + } + return conversationIDs, nil +} diff --git a/pkg/common/storage/controller/friend.go b/pkg/common/storage/controller/friend.go index 636371198a..88a5fc863d 100644 --- a/pkg/common/storage/controller/friend.go +++ b/pkg/common/storage/controller/friend.go @@ -160,7 +160,7 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string, if err != nil { return err } - opUserID := mcontext.GetOperationID(ctx) + opUserID := mcontext.GetOpUserID(ctx) friends := make([]*model.Friend, 0, len(friendUserIDs)*2) myFriendsSet := datautil.SliceSetAny(myFriends, func(friend *model.Friend) string { return friend.FriendUserID @@ -192,6 +192,7 @@ func (f *friendDatabase) BecomeFriends(ctx context.Context, ownerUserID string, if err != nil { return err } + cache = cache.DelFriendIDs(ownerUserID).DelMaxFriendVersion(ownerUserID) if len(newMyFriendIDs) > 0 { cache = cache.DelFriendIDs(newMyFriendIDs...) cache = cache.DelFriends(ownerUserID, newMyFriendIDs).DelMaxFriendVersion(newMyFriendIDs...) diff --git a/pkg/common/storage/controller/msg.go b/pkg/common/storage/controller/msg.go index 49268e0493..d579069b69 100644 --- a/pkg/common/storage/controller/msg.go +++ b/pkg/common/storage/controller/msg.go @@ -18,14 +18,14 @@ import ( "context" "encoding/json" "errors" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "strings" "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" - "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/protocol/constant" pbmsg "github.com/openimsdk/protocol/msg" @@ -46,16 +46,10 @@ const ( // CommonMsgDatabase defines the interface for message database operations. type CommonMsgDatabase interface { - // BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation. - BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error // RevokeMsg revokes a message in a conversation. RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error // MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers. MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error - // DeleteMessagesFromCache deletes message caches from Redis by sequence numbers. - DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error - // BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache. - BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error) // GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers. GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) // GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers. @@ -80,25 +74,29 @@ type CommonMsgDatabase interface { GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error + GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) + GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) + GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) + //GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error) //GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error) SetSendMsgStatus(ctx context.Context, id string, status int32) error GetSendMsgStatus(ctx context.Context, id string) (int32, error) - SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error) + SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*pbmsg.SearchedMsgData, err error) FindOneByDocIDs(ctx context.Context, docIDs []string, seqs map[string]int64) (map[string]*sdkws.MsgData, error) // to mq MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error - MsgToPushMQ(ctx context.Context, key, conversarionID string, msg2mq *sdkws.MsgData) (int32, int64, error) - MsgToMongoMQ(ctx context.Context, key, conversarionID string, msgs []*sdkws.MsgData, lastSeq int64) error RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) // clear msg - GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) + GetBeforeMsg(ctx context.Context, ts int64, docIds []string, limit int) ([]*model.MsgDocModel, error) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) + + GetDocIDs(ctx context.Context) ([]string, error) } func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) { @@ -110,22 +108,12 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser if err != nil { return nil, err } - producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic) - if err != nil { - return nil, err - } - producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic) - if err != nil { - return nil, err - } return &commonMsgDatabase{ msgDocDatabase: msgDocModel, msg: msg, seqUser: seqUser, seqConversation: seqConversation, producer: producerToRedis, - producerToMongo: producerToMongo, - producerToPush: producerToPush, }, nil } @@ -136,8 +124,6 @@ type commonMsgDatabase struct { seqConversation cache.SeqConversationCache seqUser cache.SeqUser producer *kafka.Producer - producerToMongo *kafka.Producer - producerToPush *kafka.Producer } func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error { @@ -145,23 +131,6 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd return err } -func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) { - partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID}) - if err != nil { - log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq) - return 0, 0, err - } - return partition, offset, nil -} - -func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error { - if len(messages) > 0 { - _, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages}) - return err - } - return nil -} - func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { if len(fields) == 0 { return nil @@ -263,52 +232,6 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI return nil } -func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error { - if len(msgList) == 0 { - return errs.ErrArgs.WrapMsg("msgList is empty") - } - msgs := make([]any, len(msgList)) - for i, msg := range msgList { - if msg == nil { - continue - } - var offlinePushModel *model.OfflinePushModel - if msg.OfflinePushInfo != nil { - offlinePushModel = &model.OfflinePushModel{ - Title: msg.OfflinePushInfo.Title, - Desc: msg.OfflinePushInfo.Desc, - Ex: msg.OfflinePushInfo.Ex, - IOSPushSound: msg.OfflinePushInfo.IOSPushSound, - IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount, - } - } - msgs[i] = &model.MsgDataModel{ - SendID: msg.SendID, - RecvID: msg.RecvID, - GroupID: msg.GroupID, - ClientMsgID: msg.ClientMsgID, - ServerMsgID: msg.ServerMsgID, - SenderPlatformID: msg.SenderPlatformID, - SenderNickname: msg.SenderNickname, - SenderFaceURL: msg.SenderFaceURL, - SessionType: msg.SessionType, - MsgFrom: msg.MsgFrom, - ContentType: msg.ContentType, - Content: string(msg.Content), - Seq: msg.Seq, - SendTime: msg.SendTime, - CreateTime: msg.CreateTime, - Status: msg.Status, - Options: msg.Options, - OfflinePush: offlinePushModel, - AtUserIDList: msg.AtUserIDList, - AttachedInfo: msg.AttachedInfo, - Ex: msg.Ex, - } - } - return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq) -} - func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error { return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq) } @@ -328,56 +251,6 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI return nil } -func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { - return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) -} - -func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { - for userID, seq := range userSeqMap { - if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { - return err - } - } - return nil -} - -func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) { - lenList := len(msgs) - if int64(lenList) > db.msgTable.GetSingleGocMsgNum() { - return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() - } - if lenList < 1 { - return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap() - } - currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs))) - if err != nil { - log.ZError(ctx, "storage.seq.Malloc", err) - return 0, false, err - } - isNew = currentMaxSeq == 0 - lastMaxSeq := currentMaxSeq - userSeqMap := make(map[string]int64) - for _, m := range msgs { - currentMaxSeq++ - m.Seq = currentMaxSeq - userSeqMap[m.SendID] = m.Seq - } - - failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs) - if err != nil { - prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum)) - log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID) - } else { - prommetrics.MsgInsertRedisSuccessCounter.Inc() - } - err = db.setHasReadSeqs(ctx, conversationID, userSeqMap) - if err != nil { - log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) - prommetrics.SeqSetFailedCounter.Inc() - } - return lastMaxSeq, isNew, errs.Wrap(err) -} - func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) { for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { // log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs) @@ -577,7 +450,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) { userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID) - if err != nil && errs.Unwrap(err) != redis.Nil { + if err != nil { return 0, 0, nil, err } minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID) @@ -588,15 +461,28 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co if err != nil { return 0, 0, nil, err } - if userMinSeq < minSeq { + userMaxSeq, err := db.seqUser.GetUserMaxSeq(ctx, conversationID, userID) + if err != nil { + return 0, 0, nil, err + } + if userMinSeq > minSeq { minSeq = userMinSeq } - var newSeqs []int64 + if userMaxSeq > 0 && userMaxSeq < maxSeq { + maxSeq = userMaxSeq + } + newSeqs := make([]int64, 0, len(seqs)) for _, seq := range seqs { + if seq <= 0 { + continue + } if seq >= minSeq && seq <= maxSeq { newSeqs = append(newSeqs, seq) } } + if len(newSeqs) == 0 { + return minSeq, maxSeq, nil, nil + } successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs) if err != nil { if err != redis.Nil { @@ -878,8 +764,8 @@ func (db *commonMsgDatabase) RangeGroupSendCount( return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber) } -func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error) { - var totalMsgs []*sdkws.MsgData +func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*pbmsg.SearchedMsgData, err error) { + var totalMsgs []*pbmsg.SearchedMsgData total, msgs, err := db.msgDocDatabase.SearchMessage(ctx, req) if err != nil { return 0, nil, err @@ -888,7 +774,13 @@ func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.Searc if msg.IsRead { msg.Msg.IsRead = true } - totalMsgs = append(totalMsgs, convert.MsgDB2Pb(msg.Msg)) + searchedMsgData := &pbmsg.SearchedMsgData{MsgData: convert.MsgDB2Pb(msg.Msg)} + + if msg.Revoke != nil { + searchedMsgData.IsRevoked = true + } + + totalMsgs = append(totalMsgs, searchedMsgData) } return total, totalMsgs, nil } @@ -912,8 +804,25 @@ func (db *commonMsgDatabase) ConvertMsgsDocLen(ctx context.Context, conversation db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs) } -func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { - return db.msgDocDatabase.GetBeforeMsg(ctx, ts, limit) +func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { + var msgs []*model.MsgDocModel + for i := 0; i < len(docIDs); i += 1000 { + end := i + 1000 + if end > len(docIDs) { + end = len(docIDs) + } + + res, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, docIDs[i:end], limit) + if err != nil { + return nil, err + } + msgs = append(msgs, res...) + + if len(msgs) >= limit { + return msgs[:limit], nil + } + } + return msgs, nil } func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) { @@ -936,8 +845,10 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d return index, err } if len(index) == notNull { + log.ZDebug(ctx, "Delete db in Doc", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) return index, db.msgDocDatabase.DeleteDoc(ctx, doc.DocID) } else { + log.ZDebug(ctx, "delete db in index", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) return index, db.msgDocDatabase.DeleteMsgByIndex(ctx, doc.DocID, index) } } @@ -955,3 +866,20 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin } return db.seqConversation.SetMinSeq(ctx, conversationID, seq) } + +func (db *commonMsgDatabase) GetDocIDs(ctx context.Context) ([]string, error) { + return db.msgDocDatabase.GetDocIDs(ctx) +} + +func (db *commonMsgDatabase) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) { + return db.seqConversation.GetCacheMaxSeqWithTime(ctx, conversationIDs) +} + +func (db *commonMsgDatabase) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) { + return db.seqConversation.GetMaxSeqWithTime(ctx, conversationID) +} + +func (db *commonMsgDatabase) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) { + // todo: only the time in the redis cache will be taken, not the message time + return db.seqConversation.GetMaxSeqsWithTime(ctx, conversationIDs) +} diff --git a/pkg/common/storage/controller/msg_transfer.go b/pkg/common/storage/controller/msg_transfer.go new file mode 100644 index 0000000000..5e540a2c33 --- /dev/null +++ b/pkg/common/storage/controller/msg_transfer.go @@ -0,0 +1,286 @@ +package controller + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + pbmsg "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" + "go.mongodb.org/mongo-driver/mongo" +) + +type MsgTransferDatabase interface { + // BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation. + BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error + // DeleteMessagesFromCache deletes message caches from Redis by sequence numbers. + DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error + + // BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache. + BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error) + SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error + + // to mq + MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) + MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error +} + +func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) { + conf, err := kafka.BuildProducerConfig(*kafkaConf.Build()) + if err != nil { + return nil, err + } + producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic) + if err != nil { + return nil, err + } + producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic) + if err != nil { + return nil, err + } + return &msgTransferDatabase{ + msgDocDatabase: msgDocModel, + msg: msg, + seqUser: seqUser, + seqConversation: seqConversation, + producerToMongo: producerToMongo, + producerToPush: producerToPush, + }, nil +} + +type msgTransferDatabase struct { + msgDocDatabase database.Msg + msgTable model.MsgDocModel + msg cache.MsgCache + seqConversation cache.SeqConversationCache + seqUser cache.SeqUser + producerToMongo *kafka.Producer + producerToPush *kafka.Producer +} + +func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error { + if len(msgList) == 0 { + return errs.ErrArgs.WrapMsg("msgList is empty") + } + msgs := make([]any, len(msgList)) + for i, msg := range msgList { + if msg == nil { + continue + } + var offlinePushModel *model.OfflinePushModel + if msg.OfflinePushInfo != nil { + offlinePushModel = &model.OfflinePushModel{ + Title: msg.OfflinePushInfo.Title, + Desc: msg.OfflinePushInfo.Desc, + Ex: msg.OfflinePushInfo.Ex, + IOSPushSound: msg.OfflinePushInfo.IOSPushSound, + IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount, + } + } + msgs[i] = &model.MsgDataModel{ + SendID: msg.SendID, + RecvID: msg.RecvID, + GroupID: msg.GroupID, + ClientMsgID: msg.ClientMsgID, + ServerMsgID: msg.ServerMsgID, + SenderPlatformID: msg.SenderPlatformID, + SenderNickname: msg.SenderNickname, + SenderFaceURL: msg.SenderFaceURL, + SessionType: msg.SessionType, + MsgFrom: msg.MsgFrom, + ContentType: msg.ContentType, + Content: string(msg.Content), + Seq: msg.Seq, + SendTime: msg.SendTime, + CreateTime: msg.CreateTime, + Status: msg.Status, + Options: msg.Options, + OfflinePush: offlinePushModel, + AtUserIDList: msg.AtUserIDList, + AttachedInfo: msg.AttachedInfo, + Ex: msg.Ex, + } + } + return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq) +} + +func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { + if len(fields) == 0 { + return nil + } + num := db.msgTable.GetSingleGocMsgNum() + // num = 100 + for i, field := range fields { // Check the type of the field + var ok bool + switch key { + case updateKeyMsg: + var msg *model.MsgDataModel + msg, ok = field.(*model.MsgDataModel) + if msg != nil && msg.Seq != firstSeq+int64(i) { + return errs.ErrInternalServer.WrapMsg("seq is invalid") + } + case updateKeyRevoke: + _, ok = field.(*model.RevokeModel) + default: + return errs.ErrInternalServer.WrapMsg("key is invalid") + } + if !ok { + return errs.ErrInternalServer.WrapMsg("field type is invalid") + } + } + // Returns true if the document exists in the database, false if the document does not exist in the database + updateMsgModel := func(seq int64, i int) (bool, error) { + var ( + res *mongo.UpdateResult + err error + ) + docID := db.msgTable.GetDocID(conversationID, seq) + index := db.msgTable.GetMsgIndex(seq) + field := fields[i] + switch key { + case updateKeyMsg: + res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "msg", field) + case updateKeyRevoke: + res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "revoke", field) + } + if err != nil { + return false, err + } + return res.MatchedCount > 0, nil + } + tryUpdate := true + for i := 0; i < len(fields); i++ { + seq := firstSeq + int64(i) // Current sequence number + if tryUpdate { + matched, err := updateMsgModel(seq, i) + if err != nil { + return err + } + if matched { + continue // The current data has been updated, skip the current data + } + } + doc := model.MsgDocModel{ + DocID: db.msgTable.GetDocID(conversationID, seq), + Msg: make([]*model.MsgInfoModel, num), + } + var insert int // Inserted data number + for j := i; j < len(fields); j++ { + seq = firstSeq + int64(j) + if db.msgTable.GetDocID(conversationID, seq) != doc.DocID { + break + } + insert++ + switch key { + case updateKeyMsg: + doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{ + Msg: fields[j].(*model.MsgDataModel), + } + case updateKeyRevoke: + doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{ + Revoke: fields[j].(*model.RevokeModel), + } + } + } + for i, msgInfo := range doc.Msg { + if msgInfo == nil { + msgInfo = &model.MsgInfoModel{} + doc.Msg[i] = msgInfo + } + if msgInfo.DelList == nil { + doc.Msg[i].DelList = []string{} + } + } + if err := db.msgDocDatabase.Create(ctx, &doc); err != nil { + if mongo.IsDuplicateKeyError(err) { + i-- // already inserted + tryUpdate = true // next block use update mode + continue + } + return err + } + tryUpdate = false // The current block is inserted successfully, and the next block is inserted preferentially + i += insert - 1 // Skip the inserted data + } + return nil +} + +func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { + return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) +} + +func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) { + lenList := len(msgs) + if int64(lenList) > db.msgTable.GetSingleGocMsgNum() { + return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap() + } + if lenList < 1 { + return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap() + } + currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs))) + if err != nil { + log.ZError(ctx, "storage.seq.Malloc", err) + return 0, false, err + } + isNew = currentMaxSeq == 0 + lastMaxSeq := currentMaxSeq + userSeqMap := make(map[string]int64) + for _, m := range msgs { + currentMaxSeq++ + m.Seq = currentMaxSeq + userSeqMap[m.SendID] = m.Seq + } + + failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs) + if err != nil { + prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum)) + log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID) + } else { + prommetrics.MsgInsertRedisSuccessCounter.Inc() + } + err = db.setHasReadSeqs(ctx, conversationID, userSeqMap) + if err != nil { + log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID) + prommetrics.SeqSetFailedCounter.Inc() + } + return lastMaxSeq, isNew, errs.Wrap(err) +} + +func (db *msgTransferDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { + for userID, seq := range userSeqMap { + if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil { + return err + } + } + return nil +} + +func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error { + return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq) +} + +func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) { + partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID}) + if err != nil { + log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq) + return 0, 0, err + } + return partition, offset, nil +} + +func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error { + if len(messages) > 0 { + _, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages}) + if err != nil { + log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq) + return err + } + } + return nil +} diff --git a/pkg/common/storage/controller/push.go b/pkg/common/storage/controller/push.go index 199a0ba678..91ef126fe5 100644 --- a/pkg/common/storage/controller/push.go +++ b/pkg/common/storage/controller/push.go @@ -17,21 +17,45 @@ package controller import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/protocol/push" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mq/kafka" ) type PushDatabase interface { DelFcmToken(ctx context.Context, userID string, platformID int) error + MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error } type pushDataBase struct { - cache cache.ThirdCache + cache cache.ThirdCache + producerToOfflinePush *kafka.Producer } -func NewPushDatabase(cache cache.ThirdCache) PushDatabase { - return &pushDataBase{cache: cache} +func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase { + conf, err := kafka.BuildProducerConfig(*kafkaConf.Build()) + if err != nil { + return nil + } + producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic) + if err != nil { + return nil + } + return &pushDataBase{ + cache: cache, + producerToOfflinePush: producerToOfflinePush, + } } func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error { return p.cache.DelFcmToken(ctx, userID, platformID) } + +func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error { + _, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs}) + log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String()) + return err +} diff --git a/pkg/common/storage/controller/user.go b/pkg/common/storage/controller/user.go index 533eac78f1..3f34481a36 100644 --- a/pkg/common/storage/controller/user.go +++ b/pkg/common/storage/controller/user.go @@ -16,12 +16,13 @@ package controller import ( "context" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/db/tx" "github.com/openimsdk/tools/utils/datautil" - "time" "github.com/openimsdk/protocol/user" "github.com/openimsdk/tools/errs" @@ -111,10 +112,14 @@ func (u *userDatabase) InitOnce(ctx context.Context, users []*model.User) error // FindWithError Get the information of the specified user and return an error if the userID is not found. func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error) { userIDs = datautil.Distinct(userIDs) + + // TODO: Add logic to identify which user IDs are distinct and which user IDs were not found. + users, err = u.cache.GetUsersInfo(ctx, userIDs) if err != nil { return } + if len(users) != len(userIDs) { err = errs.ErrRecordNotFound.WrapMsg("userID not found") } diff --git a/pkg/common/storage/database/conversation.go b/pkg/common/storage/database/conversation.go index 85f3dd668a..5a9b19035d 100644 --- a/pkg/common/storage/database/conversation.go +++ b/pkg/common/storage/database/conversation.go @@ -27,6 +27,8 @@ type Conversation interface { Find(ctx context.Context, ownerUserID string, conversationIDs []string) (conversations []*model.Conversation, err error) FindUserID(ctx context.Context, userIDs []string, conversationIDs []string) ([]string, error) FindUserIDAllConversationID(ctx context.Context, userID string) ([]string, error) + FindUserIDAllNotNotifyConversationID(ctx context.Context, userID string) ([]string, error) + FindUserIDAllPinnedConversationID(ctx context.Context, userID string) ([]string, error) Take(ctx context.Context, userID, conversationID string) (conversation *model.Conversation, err error) FindConversationID(ctx context.Context, userID string, conversationIDs []string) (existConversationID []string, err error) FindUserIDAllConversations(ctx context.Context, userID string) (conversations []*model.Conversation, err error) diff --git a/pkg/common/storage/database/mgo/conversation.go b/pkg/common/storage/database/mgo/conversation.go index 3d505f1d34..f7ced1c2cf 100644 --- a/pkg/common/storage/database/mgo/conversation.go +++ b/pkg/common/storage/database/mgo/conversation.go @@ -124,6 +124,20 @@ func (c *ConversationMgo) FindUserIDAllConversationID(ctx context.Context, userI return mongoutil.Find[string](ctx, c.coll, bson.M{"owner_user_id": userID}, options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1})) } +func (c *ConversationMgo) FindUserIDAllNotNotifyConversationID(ctx context.Context, userID string) ([]string, error) { + return mongoutil.Find[string](ctx, c.coll, bson.M{ + "owner_user_id": userID, + "recv_msg_opt": constant.ReceiveNotNotifyMessage, + }, options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1})) +} + +func (c *ConversationMgo) FindUserIDAllPinnedConversationID(ctx context.Context, userID string) ([]string, error) { + return mongoutil.Find[string](ctx, c.coll, bson.M{ + "owner_user_id": userID, + "is_pinned": true, + }, options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1})) +} + func (c *ConversationMgo) Take(ctx context.Context, userID, conversationID string) (conversation *model.Conversation, err error) { return mongoutil.FindOne[*model.Conversation](ctx, c.coll, bson.M{"owner_user_id": userID, "conversation_id": conversationID}) } diff --git a/pkg/common/storage/database/mgo/msg.go b/pkg/common/storage/database/mgo/msg.go index 7dc308a7c4..fc1fe47eab 100644 --- a/pkg/common/storage/database/mgo/msg.go +++ b/pkg/common/storage/database/mgo/msg.go @@ -8,6 +8,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/tools/utils/datautil" + "golang.org/x/exp/rand" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" @@ -117,9 +118,9 @@ func (m *MsgMgo) GetMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin } func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) { - indexs := make([]int64, 0, len(seqs)) + indexes := make([]int64, 0, len(seqs)) for _, seq := range seqs { - indexs = append(indexs, m.model.GetMsgIndex(seq)) + indexes = append(indexes, m.model.GetMsgIndex(seq)) } pipeline := mongo.Pipeline{ bson.D{{Key: "$match", Value: bson.D{ @@ -130,7 +131,7 @@ func (m *MsgMgo) getMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID strin {Key: "doc_id", Value: 1}, {Key: "msgs", Value: bson.D{ {Key: "$map", Value: bson.D{ - {Key: "input", Value: indexs}, + {Key: "input", Value: indexes}, {Key: "as", Value: "index"}, {Key: "in", Value: bson.D{ {Key: "$arrayElemAt", Value: bson.A{"$msgs", "$$index"}}, @@ -1226,10 +1227,53 @@ func (m *MsgMgo) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string } } -func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { +func (m *MsgMgo) GetDocIDs(ctx context.Context) ([]string, error) { + limit := 5000 + var skip int + var docIDs []string + var offset int + + count, err := m.coll.CountDocuments(ctx, bson.M{}) + if err != nil { + return nil, err + } + + if count < int64(limit) { + skip = 0 + } else { + rand.Seed(uint64(time.Now().UnixMilli())) + skip = rand.Intn(int(count / int64(limit))) + offset = skip * limit + } + log.ZDebug(ctx, "offset", "skip", skip, "offset", offset) + res, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ + { + "$project": bson.M{ + "doc_id": 1, + }, + }, + { + "$skip": offset, + }, + { + "$limit": limit, + }, + }) + + for _, doc := range res { + docIDs = append(docIDs, doc.DocID) + } + + return docIDs, errs.Wrap(err) +} + +func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { return mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ { "$match": bson.M{ + "doc_id": bson.M{ + "$in": docIDs, + }, "msgs.msg.send_time": bson.M{ "$lt": ts, }, diff --git a/pkg/common/storage/database/mgo/seq_user.go b/pkg/common/storage/database/mgo/seq_user.go index 9faad416ae..244de30000 100644 --- a/pkg/common/storage/database/mgo/seq_user.go +++ b/pkg/common/storage/database/mgo/seq_user.go @@ -115,5 +115,12 @@ func (s *seqUserMongo) GetUserReadSeqs(ctx context.Context, userID string, conve } func (s *seqUserMongo) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { + dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID) + if err != nil { + return err + } + if dbSeq > seq { + return nil + } return s.setSeq(ctx, conversationID, userID, seq, "read_seq") } diff --git a/pkg/common/storage/database/mgo/user.go b/pkg/common/storage/database/mgo/user.go index 8978e64ebf..ee92b75544 100644 --- a/pkg/common/storage/database/mgo/user.go +++ b/pkg/common/storage/database/mgo/user.go @@ -167,6 +167,10 @@ func (u *UserMgo) DeleteUserCommand(ctx context.Context, userID string, Type int filter := bson.M{"userID": userID, "type": Type, "uuid": UUID} result, err := collection.DeleteOne(ctx, filter) + // when err is not nil, result might be nil + if err != nil { + return errs.Wrap(err) + } if result.DeletedCount == 0 { // No records found to update return errs.Wrap(errs.ErrRecordNotFound) diff --git a/pkg/common/storage/database/msg.go b/pkg/common/storage/database/msg.go index 84f3a9e3e2..23a99f5b96 100644 --- a/pkg/common/storage/database/msg.go +++ b/pkg/common/storage/database/msg.go @@ -16,10 +16,11 @@ package database import ( "context" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/protocol/msg" "go.mongodb.org/mongo-driver/mongo" - "time" ) type Msg interface { @@ -44,5 +45,7 @@ type Msg interface { DeleteDoc(ctx context.Context, docID string) error DeleteMsgByIndex(ctx context.Context, docID string, index []int) error - GetBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) + GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) + + GetDocIDs(ctx context.Context) ([]string, error) } diff --git a/pkg/common/storage/database/seq.go b/pkg/common/storage/database/seq.go index cf93b795f4..a97ca2d1f9 100644 --- a/pkg/common/storage/database/seq.go +++ b/pkg/common/storage/database/seq.go @@ -2,6 +2,11 @@ package database import "context" +type SeqTime struct { + Seq int64 + Time int64 +} + type SeqConversation interface { Malloc(ctx context.Context, conversationID string, size int64) (int64, error) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) diff --git a/pkg/common/storage/model/msg.go b/pkg/common/storage/model/msg.go index 8095665d2f..e16233973b 100644 --- a/pkg/common/storage/model/msg.go +++ b/pkg/common/storage/model/msg.go @@ -92,15 +92,15 @@ type GroupCount struct { Count int64 `bson:"count"` } -func (MsgDocModel) TableName() string { +func (*MsgDocModel) TableName() string { return MsgTableName } -func (MsgDocModel) GetSingleGocMsgNum() int64 { +func (*MsgDocModel) GetSingleGocMsgNum() int64 { return singleGocMsgNum } -func (MsgDocModel) GetSingleGocMsgNum5000() int64 { +func (*MsgDocModel) GetSingleGocMsgNum5000() int64 { return singleGocMsgNum5000 } @@ -108,12 +108,12 @@ func (m *MsgDocModel) IsFull() bool { return m.Msg[len(m.Msg)-1].Msg != nil } -func (m MsgDocModel) GetDocID(conversationID string, seq int64) string { +func (m *MsgDocModel) GetDocID(conversationID string, seq int64) string { seqSuffix := (seq - 1) / singleGocMsgNum return m.indexGen(conversationID, seqSuffix) } -func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 { +func (m *MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[string][]int64 { t := make(map[string][]int64) for i := 0; i < len(seqs); i++ { docID := m.GetDocID(conversationID, seqs[i]) @@ -127,15 +127,15 @@ func (m MsgDocModel) GetDocIDSeqsMap(conversationID string, seqs []int64) map[st return t } -func (MsgDocModel) GetMsgIndex(seq int64) int64 { +func (*MsgDocModel) GetMsgIndex(seq int64) int64 { return (seq - 1) % singleGocMsgNum } -func (MsgDocModel) indexGen(conversationID string, seqSuffix int64) string { +func (*MsgDocModel) indexGen(conversationID string, seqSuffix int64) string { return conversationID + ":" + strconv.FormatInt(seqSuffix, 10) } -func (MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) { +func (*MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdkws.MsgData) { for _, v := range seqs { msgModel := new(sdkws.MsgData) msgModel.Seq = v diff --git a/pkg/localcache/lru/lru.go b/pkg/localcache/lru/lru.go index 2fedffc48b..726535c48c 100644 --- a/pkg/localcache/lru/lru.go +++ b/pkg/localcache/lru/lru.go @@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V] type LRU[K comparable, V any] interface { Get(key K, fetch func() (V, error)) (V, error) + Set(key K, value V) SetHas(key K, value V) bool + GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) Del(key K) bool Stop() } diff --git a/pkg/localcache/lru/lru_expiration.go b/pkg/localcache/lru/lru_expiration.go index d27e670574..df6bacbf43 100644 --- a/pkg/localcache/lru/lru_expiration.go +++ b/pkg/localcache/lru/lru_expiration.go @@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct { target Target } +func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + //TODO implement me + panic("implement me") +} + func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { x.lock.Lock() v, ok := x.core.Get(key) @@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool { return false } +func (x *ExpirationLRU[K, V]) Set(key K, value V) { + x.lock.Lock() + defer x.lock.Unlock() + x.core.Add(key, &expirationLruItem[V]{value: value}) +} + func (x *ExpirationLRU[K, V]) Stop() { } diff --git a/pkg/localcache/lru/lru_lazy.go b/pkg/localcache/lru/lru_lazy.go index e935c687c4..b4f0377a70 100644 --- a/pkg/localcache/lru/lru_lazy.go +++ b/pkg/localcache/lru/lru_lazy.go @@ -88,18 +88,75 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return v.value, v.err } -//func (x *LayLRU[K, V]) Set(key K, value V) { -// x.lock.Lock() -// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) -// x.lock.Unlock() -//} -// +func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + var ( + err error + once sync.Once + ) + + res := make(map[K]V) + queries := make([]K, 0) + setVs := make(map[K]*layLruItem[V]) + for _, key := range keys { + x.lock.Lock() + v, ok := x.core.Get(key) + x.lock.Unlock() + if ok { + v.lock.Lock() + expires, value, err1 := v.expires, v.value, v.err + v.lock.Unlock() + if expires != 0 && expires > time.Now().UnixMilli() { + x.target.IncrGetHit() + res[key] = value + if err1 != nil { + once.Do(func() { + err = err1 + }) + } + continue + } + } + queries = append(queries, key) + } + values, err1 := fetch(queries) + if err1 != nil { + once.Do(func() { + err = err1 + }) + } + for key, val := range values { + v := &layLruItem[V]{} + v.value = val + + if err == nil { + v.expires = time.Now().Add(x.successTTL).UnixMilli() + x.target.IncrGetSuccess() + } else { + v.expires = time.Now().Add(x.failedTTL).UnixMilli() + x.target.IncrGetFailed() + } + setVs[key] = v + x.lock.Lock() + x.core.Add(key, v) + x.lock.Unlock() + res[key] = val + } + + return res, err +} + //func (x *LayLRU[K, V]) Has(key K) bool { // x.lock.Lock() // defer x.lock.Unlock() // return x.core.Contains(key) //} +func (x *LayLRU[K, V]) Set(key K, value V) { + x.lock.Lock() + defer x.lock.Unlock() + x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) +} + func (x *LayLRU[K, V]) SetHas(key K, value V) bool { x.lock.Lock() defer x.lock.Unlock() diff --git a/pkg/localcache/lru/lru_slot.go b/pkg/localcache/lru/lru_slot.go index 4538ca20e4..077219b75f 100644 --- a/pkg/localcache/lru/lru_slot.go +++ b/pkg/localcache/lru/lru_slot.go @@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct { hash func(k K) uint64 } +func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) { + var ( + slotKeys = make(map[uint64][]K) + vs = make(map[K]V) + ) + + for _, k := range keys { + index := x.getIndex(k) + slotKeys[index] = append(slotKeys[index], k) + } + + for k, v := range slotKeys { + batches, err := x.slots[k].GetBatch(v, fetch) + if err != nil { + return nil, err + } + for key, value := range batches { + vs[key] = value + } + } + return vs, nil +} + func (x *slotLRU[K, V]) getIndex(k K) uint64 { return x.hash(k) % x.n } @@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { return x.slots[x.getIndex(key)].Get(key, fetch) } +func (x *slotLRU[K, V]) Set(key K, value V) { + x.slots[x.getIndex(key)].Set(key, value) +} + func (x *slotLRU[K, V]) SetHas(key K, value V) bool { return x.slots[x.getIndex(key)].SetHas(key, value) } diff --git a/pkg/msgprocessor/conversation.go b/pkg/msgprocessor/conversation.go index f8140cc7df..04d772d161 100644 --- a/pkg/msgprocessor/conversation.go +++ b/pkg/msgprocessor/conversation.go @@ -39,7 +39,9 @@ func GetNotificationConversationIDByMsg(msg *sdkws.MsgData) string { case constant.ReadGroupChatType: return "n_" + msg.GroupID case constant.NotificationChatType: - return "n_" + msg.SendID + "_" + msg.RecvID + l := []string{msg.SendID, msg.RecvID} + sort.Strings(l) + return "n_" + strings.Join(l, "_") } return "" } @@ -55,21 +57,11 @@ func GetChatConversationIDByMsg(msg *sdkws.MsgData) string { case constant.ReadGroupChatType: return "sg_" + msg.GroupID case constant.NotificationChatType: - return "sn_" + msg.SendID + "_" + msg.RecvID - } - - return "" -} - -func GenConversationUniqueKey(msg *sdkws.MsgData) string { - switch msg.SessionType { - case constant.SingleChatType, constant.NotificationChatType: l := []string{msg.SendID, msg.RecvID} sort.Strings(l) - return strings.Join(l, "_") - case constant.ReadGroupChatType: - return msg.GroupID + return "sn_" + strings.Join(l, "_") } + return "" } @@ -94,10 +86,12 @@ func GetConversationIDByMsg(msg *sdkws.MsgData) string { } return "sg_" + msg.GroupID // super group chat case constant.NotificationChatType: + l := []string{msg.SendID, msg.RecvID} + sort.Strings(l) if !options.IsNotNotification() { - return "n_" + msg.SendID + "_" + msg.RecvID // super group chat + return "n_" + strings.Join(l, "_") } - return "sn_" + msg.SendID + "_" + msg.RecvID // server notification chat + return "sn_" + strings.Join(l, "_") } return "" } @@ -120,30 +114,6 @@ func GetConversationIDBySessionType(sessionType int, ids ...string) string { return "" } -func GetNotificationConversationIDByConversationID(conversationID string) string { - l := strings.Split(conversationID, "_") - if len(l) > 1 { - l[0] = "n" - return strings.Join(l, "_") - } - - return "" -} - -func GetNotificationConversationID(sessionType int, ids ...string) string { - sort.Strings(ids) - if len(ids) > 2 || len(ids) < 1 { - return "" - } - switch sessionType { - case constant.SingleChatType: - return "n_" + strings.Join(ids, "_") // single chat - case constant.ReadGroupChatType: - return "n_" + ids[0] // super group chat - } - return "" -} - func IsNotification(conversationID string) bool { return strings.HasPrefix(conversationID, "n_") } @@ -152,30 +122,6 @@ func IsNotificationByMsg(msg *sdkws.MsgData) bool { return !Options(msg.Options).IsNotNotification() } -func ParseConversationID(msg *sdkws.MsgData) (isNotification bool, conversationID string) { - options := Options(msg.Options) - switch msg.SessionType { - case constant.SingleChatType: - l := []string{msg.SendID, msg.RecvID} - sort.Strings(l) - if !options.IsNotNotification() { - return true, "n_" + strings.Join(l, "_") - } - return false, "si_" + strings.Join(l, "_") // single chat - case constant.ReadGroupChatType: - if !options.IsNotNotification() { - return true, "n_" + msg.GroupID // super group chat - } - return false, "sg_" + msg.GroupID // super group chat - case constant.NotificationChatType: - if !options.IsNotNotification() { - return true, "n_" + msg.SendID + "_" + msg.RecvID // super group chat - } - return false, "sn_" + msg.SendID + "_" + msg.RecvID // server notification chat - } - return false, "" -} - type MsgBySeq []*sdkws.MsgData func (s MsgBySeq) Len() int { diff --git a/pkg/msgprocessor/conversation_test.go b/pkg/msgprocessor/conversation_test.go deleted file mode 100644 index 32601baec4..0000000000 --- a/pkg/msgprocessor/conversation_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright Β© 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package msgprocessor - -import ( - "testing" - - "github.com/openimsdk/protocol/sdkws" - "google.golang.org/protobuf/proto" -) - -func TestGetNotificationConversationIDByMsg(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetNotificationConversationIDByMsg(tt.args.msg); got != tt.want { - t.Errorf("GetNotificationConversationIDByMsg() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetChatConversationIDByMsg(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetChatConversationIDByMsg(tt.args.msg); got != tt.want { - t.Errorf("GetChatConversationIDByMsg() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGenConversationUniqueKey(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GenConversationUniqueKey(tt.args.msg); got != tt.want { - t.Errorf("GenConversationUniqueKey() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetConversationIDByMsg(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetConversationIDByMsg(tt.args.msg); got != tt.want { - t.Errorf("GetConversationIDByMsg() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetConversationIDBySessionType(t *testing.T) { - type args struct { - sessionType int - ids []string - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetConversationIDBySessionType(tt.args.sessionType, tt.args.ids...); got != tt.want { - t.Errorf("GetConversationIDBySessionType() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetNotificationConversationIDByConversationID(t *testing.T) { - type args struct { - conversationID string - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetNotificationConversationIDByConversationID(tt.args.conversationID); got != tt.want { - t.Errorf("GetNotificationConversationIDByConversationID() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetNotificationConversationID(t *testing.T) { - type args struct { - sessionType int - ids []string - } - tests := []struct { - name string - args args - want string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetNotificationConversationID(tt.args.sessionType, tt.args.ids...); got != tt.want { - t.Errorf("GetNotificationConversationID() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestIsNotification(t *testing.T) { - type args struct { - conversationID string - } - tests := []struct { - name string - args args - want bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := IsNotification(tt.args.conversationID); got != tt.want { - t.Errorf("IsNotification() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestIsNotificationByMsg(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - want bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := IsNotificationByMsg(tt.args.msg); got != tt.want { - t.Errorf("IsNotificationByMsg() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestParseConversationID(t *testing.T) { - type args struct { - msg *sdkws.MsgData - } - tests := []struct { - name string - args args - wantIsNotification bool - wantConversationID string - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotIsNotification, gotConversationID := ParseConversationID(tt.args.msg) - if gotIsNotification != tt.wantIsNotification { - t.Errorf("ParseConversationID() gotIsNotification = %v, want %v", gotIsNotification, tt.wantIsNotification) - } - if gotConversationID != tt.wantConversationID { - t.Errorf("ParseConversationID() gotConversationID = %v, want %v", gotConversationID, tt.wantConversationID) - } - }) - } -} - -func TestMsgBySeq_Len(t *testing.T) { - tests := []struct { - name string - s MsgBySeq - want int - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.s.Len(); got != tt.want { - t.Errorf("MsgBySeq.Len() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestMsgBySeq_Less(t *testing.T) { - type args struct { - i int - j int - } - tests := []struct { - name string - s MsgBySeq - args args - want bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.s.Less(tt.args.i, tt.args.j); got != tt.want { - t.Errorf("MsgBySeq.Less() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestMsgBySeq_Swap(t *testing.T) { - type args struct { - i int - j int - } - tests := []struct { - name string - s MsgBySeq - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.s.Swap(tt.args.i, tt.args.j) - }) - } -} - -func TestPb2String(t *testing.T) { - type args struct { - pb proto.Message - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := Pb2String(tt.args.pb) - if (err != nil) != tt.wantErr { - t.Errorf("Pb2String() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("Pb2String() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestString2Pb(t *testing.T) { - type args struct { - s string - pb proto.Message - } - tests := []struct { - name string - args args - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := String2Pb(tt.args.s, tt.args.pb); (err != nil) != tt.wantErr { - t.Errorf("String2Pb() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go index 2a62c7bbd5..925d2a37ca 100644 --- a/pkg/rpccache/conversation.go +++ b/pkg/rpccache/conversation.go @@ -86,7 +86,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co if err == nil { log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val) } else { - log.ZError(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) + log.ZWarn(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) } }() var cache cacheProto[pbconversation.Conversation] diff --git a/pkg/rpccache/online.go b/pkg/rpccache/online.go index 2ffa1f1577..a02a0662d4 100644 --- a/pkg/rpccache/online.go +++ b/pkg/rpccache/online.go @@ -2,60 +2,197 @@ package rpccache import ( "context" + "fmt" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/user" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/useronline" + "github.com/openimsdk/tools/db/cacheutil" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mcontext" "github.com/redis/go-redis/v9" - "math/rand" - "strconv" - "time" ) -func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache { +func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) { + l := &sync.Mutex{} x := &OnlineCache{ - user: user, - group: group, - local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { + user: user, + group: group, + fullUserCache: fullUserCache, + Lock: l, + Cond: sync.NewCond(l), + } + + ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10)) + + switch x.fullUserCache { + case true: + log.ZDebug(ctx, "fullUserCache is true") + x.mapCache = cacheutil.NewCache[string, []int32]() + go func() { + if err := x.initUsersOnlineStatus(ctx); err != nil { + log.ZError(ctx, "initUsersOnlineStatus failed", err) + } + }() + case false: + log.ZDebug(ctx, "fullUserCache is false") + x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {}) - }), + }) + x.CurrentPhase.Store(DoSubscribeOver) + x.Cond.Broadcast() } + go func() { - ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10)) - for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() { - userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) + x.doSubscribe(ctx, rdb, fn) + }() + return x, nil +} + +const ( + Begin uint32 = iota + DoOnlineStatusOver + DoSubscribeOver +) + +type OnlineCache struct { + user rpcclient.UserRpcClient + group *GroupLocalCache + + // fullUserCache if enabled, caches the online status of all users using mapCache; + // otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache. + fullUserCache bool + + lruCache lru.LRU[string, []int32] + mapCache *cacheutil.Cache[string, []int32] + + Lock *sync.Mutex + Cond *sync.Cond + CurrentPhase atomic.Uint32 +} + +func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) { + log.ZDebug(ctx, "init users online status begin") + + var ( + totalSet atomic.Int64 + maxTries = 5 + retryInterval = time.Second * 5 + + resp *user.GetAllOnlineUsersResp + ) + + defer func(t time.Time) { + log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load()) + o.CurrentPhase.Store(DoOnlineStatusOver) + o.Cond.Broadcast() + }(time.Now()) + + retryOperation := func(operation func() error, operationName string) error { + for i := 0; i < maxTries; i++ { + if err = operation(); err != nil { + log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err) + time.Sleep(retryInterval) + } else { + return nil + } + } + return err + } + + cursor := uint64(0) + for resp == nil || resp.NextCursor != 0 { + if err = retryOperation(func() error { + resp, err = o.user.GetAllOnlineUsers(ctx, cursor) if err != nil { - log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) - continue + return err + } + + for _, u := range resp.StatusList { + if u.Status == constant.Online { + o.setUserOnline(u.UserID, u.PlatformIDs) + } + totalSet.Add(1) } - storageCache := x.setUserOnline(userID, platformIDs) - log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) + cursor = resp.NextCursor + return nil + }, "getAllOnlineUsers"); err != nil { + return err + } + } + + return nil +} + +func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) { + o.Lock.Lock() + ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() + for o.CurrentPhase.Load() < DoOnlineStatusOver { + o.Cond.Wait() + } + o.Lock.Unlock() + log.ZInfo(ctx, "begin doSubscribe") + + doMessage := func(message *redis.Message) { + userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload) + if err != nil { + log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel) + return + } + log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs) + switch o.fullUserCache { + case true: + if len(platformIDs) == 0 { + // offline + o.mapCache.Delete(userID) + } else { + o.mapCache.Store(userID, platformIDs) + } + case false: + storageCache := o.setHasUserOnline(userID, platformIDs) + log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache) if fn != nil { fn(ctx, userID, platformIDs) } } - }() - return x -} + } -type OnlineCache struct { - user rpcclient.UserRpcClient - group *GroupLocalCache - local lru.LRU[string, []int32] + if o.CurrentPhase.Load() == DoOnlineStatusOver { + for done := false; !done; { + select { + case message := <-ch: + doMessage(message) + default: + o.CurrentPhase.Store(DoSubscribeOver) + o.Cond.Broadcast() + done = true + } + } + } + + for message := range ch { + doMessage(message) + } } func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { - platformIDs, err := o.local.Get(userID, func() ([]int32, error) { + platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) { return o.user.GetUserOnlinePlatform(ctx, userID) }) if err != nil { log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID) return nil, err } - log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) + //log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) return platformIDs, nil } @@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) return platformIDs, nil } +// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) { +// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs) +// if err != nil { +// return nil, err +// } +// tmp := make([]int32, len(platformIDs)) +// copy(tmp, platformIDs) +// return platformIDs, nil +// } + func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) { platformIDs, err := o.getUserOnlinePlatform(ctx, userID) if err != nil { @@ -77,10 +224,68 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e return len(platformIDs) > 0, nil } +func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) { + platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) { + platformIDsMap := make(map[string][]int32) + + usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers) + if err != nil { + return nil, err + } + + for _, u := range usersStatus { + platformIDsMap[u.UserID] = u.PlatformIDs + } + + return platformIDsMap, nil + }) + if err != nil { + log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs) + return nil, err + } + return platformIDsMap, nil +} + +func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) { + t := time.Now() + + var ( + onlineUserIDs = make([]string, 0, len(userIDs)) + offlineUserIDs = make([]string, 0, len(userIDs)) + ) + + switch o.fullUserCache { + case true: + for _, userID := range userIDs { + if _, ok := o.mapCache.Load(userID); ok { + onlineUserIDs = append(onlineUserIDs, userID) + } else { + offlineUserIDs = append(offlineUserIDs, userID) + } + } + case false: + userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs) + if err != nil { + return nil, nil, err + } + + for key, value := range userOnlineMap { + if len(value) > 0 { + onlineUserIDs = append(onlineUserIDs, key) + } else { + offlineUserIDs = append(offlineUserIDs, key) + } + } + } + + log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t)) + return userIDs, offlineUserIDs, nil +} + //func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) { // onlineUserIDs := make([]string, 0, len(userIDs)) // for _, userID := range userIDs { -// online, err := o.GetUserOnline(ctx, userID) +// online, err := o.GetUserOnline(ctx, userID) // if err != nil { // return nil, err // } @@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e // return onlineUserIDs, nil //} -func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool { - return o.local.SetHas(userID, platformIDs) +func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) { + switch o.fullUserCache { + case true: + o.mapCache.Store(userID, platformIDs) + case false: + o.lruCache.Set(userID, platformIDs) + } +} + +func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool { + return o.lruCache.SetHas(userID, platformIDs) } diff --git a/pkg/rpcclient/conversation.go b/pkg/rpcclient/conversation.go index 8f95f86a6c..ccca856194 100644 --- a/pkg/rpcclient/conversation.go +++ b/pkg/rpcclient/conversation.go @@ -77,6 +77,11 @@ func (c *ConversationRpcClient) SetConversationMaxSeq(ctx context.Context, owner return err } +func (c *ConversationRpcClient) SetConversationMinSeq(ctx context.Context, ownerUserIDs []string, conversationID string, minSeq int64) error { + _, err := c.Client.SetConversationMinSeq(ctx, &pbconversation.SetConversationMinSeqReq{OwnerUserID: ownerUserIDs, ConversationID: conversationID, MinSeq: minSeq}) + return err +} + func (c *ConversationRpcClient) SetConversations(ctx context.Context, userIDs []string, conversation *pbconversation.ConversationReq) error { _, err := c.Client.SetConversations(ctx, &pbconversation.SetConversationsReq{UserIDs: userIDs, Conversation: conversation}) return err diff --git a/pkg/rpcclient/friend.go b/pkg/rpcclient/friend.go index fd00be3292..359ed3a8b8 100644 --- a/pkg/rpcclient/friend.go +++ b/pkg/rpcclient/friend.go @@ -78,8 +78,8 @@ func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string) return resp.FriendIDs, nil } -func (b *FriendRpcClient) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (bool, error) { - r, err := b.Client.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID}) +func (f *FriendRpcClient) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (bool, error) { + r, err := f.Client.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID}) if err != nil { return false, err } diff --git a/pkg/rpcclient/msg.go b/pkg/rpcclient/msg.go index 124cc49af3..9b26a7abda 100644 --- a/pkg/rpcclient/msg.go +++ b/pkg/rpcclient/msg.go @@ -17,21 +17,22 @@ package rpcclient import ( "context" "encoding/json" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/log" - "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mq/memamq" "github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/utils/idutil" "github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/timeutil" - "google.golang.org/grpc" - "google.golang.org/protobuf/proto" - "time" ) func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig { @@ -159,6 +160,15 @@ func (m *MessageRpcClient) SendMsg(ctx context.Context, req *msg.SendMsgReq) (*m return resp, nil } +// SetUserConversationsMinSeq set min seq +func (m *MessageRpcClient) SetUserConversationsMinSeq(ctx context.Context, req *msg.SetUserConversationsMinSeqReq) (*msg.SetUserConversationsMinSeqResp, error) { + resp, err := m.Client.SetUserConversationsMinSeq(ctx, req) + if err != nil { + return nil, err + } + return resp, nil +} + // GetMaxSeq retrieves the maximum sequence number from the gRPC client. // Errors during the gRPC call are wrapped to provide additional context. func (m *MessageRpcClient) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { @@ -174,6 +184,9 @@ func (m *MessageRpcClient) GetMaxSeqs(ctx context.Context, conversationIDs []str resp, err := m.Client.GetMaxSeqs(ctx, &msg.GetMaxSeqsReq{ ConversationIDs: conversationIDs, }) + if err != nil { + return nil, err + } return resp.MaxSeqs, err } @@ -182,6 +195,9 @@ func (m *MessageRpcClient) GetHasReadSeqs(ctx context.Context, userID string, co UserID: userID, ConversationIDs: conversationIDs, }) + if err != nil { + return nil, err + } return resp.MaxSeqs, err } @@ -190,6 +206,9 @@ func (m *MessageRpcClient) GetMsgByConversationIDs(ctx context.Context, docIDs [ ConversationIDs: docIDs, MaxSeqs: seqs, }) + if err != nil { + return nil, err + } return resp.MsgDatas, err } @@ -204,6 +223,19 @@ func (m *MessageRpcClient) PullMessageBySeqList(ctx context.Context, req *sdkws. return resp, nil } +func (m *MessageRpcClient) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *msg.GetConversationsHasReadAndMaxSeqReq) (*msg.GetConversationsHasReadAndMaxSeqResp, error) { + resp, err := m.Client.GetConversationsHasReadAndMaxSeq(ctx, req) + if err != nil { + // Wrap the error to provide more context if the gRPC call fails. + return nil, err + } + return resp, nil +} + +func (m *MessageRpcClient) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) { + return m.Client.GetSeqMessage(ctx, req) +} + func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID}) if err != nil { @@ -252,8 +284,8 @@ func WithUserRpcClient(userRpcClient *UserRpcClient) NotificationSenderOptions { } const ( - notificationWorkerCount = 2 - notificationBufferSize = 200 + notificationWorkerCount = 16 + notificationBufferSize = 1024 * 1024 * 2 ) func NewNotificationSender(conf *config.Notification, opts ...NotificationSenderOptions) *NotificationSender { @@ -280,7 +312,8 @@ func WithRpcGetUserName() NotificationOptions { } func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { - ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)}) + //ctx = mcontext.WithMustInfoCtx([]string{mcontext.GetOperationID(ctx), mcontext.GetOpUserID(ctx), mcontext.GetOpUserPlatform(ctx), mcontext.GetConnID(ctx)}) + ctx = context.WithoutCancel(ctx) ctx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(5)) defer cancel() n := sdkws.NotificationElem{Detail: jsonutil.StructToJsonString(m)} @@ -337,7 +370,9 @@ func (s *NotificationSender) send(ctx context.Context, sendID, recvID string, co } func (s *NotificationSender) NotificationWithSessionType(ctx context.Context, sendID, recvID string, contentType, sessionType int32, m proto.Message, opts ...NotificationOptions) { - s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }) + if err := s.queue.Push(func() { s.send(ctx, sendID, recvID, contentType, sessionType, m, opts...) }); err != nil { + log.ZWarn(ctx, "Push to queue failed", err, "sendID", sendID, "recvID", recvID, "msg", jsonutil.StructToJsonString(m)) + } } func (s *NotificationSender) Notification(ctx context.Context, sendID, recvID string, contentType int32, m proto.Message, opts ...NotificationOptions) { diff --git a/pkg/rpcclient/user.go b/pkg/rpcclient/user.go index eabe77b942..bdc1a2e012 100644 --- a/pkg/rpcclient/user.go +++ b/pkg/rpcclient/user.go @@ -109,12 +109,12 @@ func (u *UserRpcClient) GetUsersInfoMap(ctx context.Context, userIDs []string) ( func (u *UserRpcClient) GetPublicUserInfos( ctx context.Context, userIDs []string, - complete bool, ) ([]*sdkws.PublicUserInfo, error) { users, err := u.GetUsersInfo(ctx, userIDs) if err != nil { return nil, err } + return datautil.Slice(users, func(e *sdkws.UserInfo) *sdkws.PublicUserInfo { return &sdkws.PublicUserInfo{ UserID: e.UserID, @@ -127,10 +127,11 @@ func (u *UserRpcClient) GetPublicUserInfos( // GetPublicUserInfo retrieves public information for a single user based on the provided user ID. func (u *UserRpcClient) GetPublicUserInfo(ctx context.Context, userID string) (*sdkws.PublicUserInfo, error) { - users, err := u.GetPublicUserInfos(ctx, []string{userID}, true) + users, err := u.GetPublicUserInfos(ctx, []string{userID}) if err != nil { return nil, err } + return users[0], nil } @@ -138,12 +139,12 @@ func (u *UserRpcClient) GetPublicUserInfo(ctx context.Context, userID string) (* func (u *UserRpcClient) GetPublicUserInfoMap( ctx context.Context, userIDs []string, - complete bool, ) (map[string]*sdkws.PublicUserInfo, error) { - users, err := u.GetPublicUserInfos(ctx, userIDs, complete) + users, err := u.GetPublicUserInfos(ctx, userIDs) if err != nil { return nil, err } + return datautil.SliceToMap(users, func(e *sdkws.PublicUserInfo) string { return e.UserID }), nil @@ -169,6 +170,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error { return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID) } +// GetAllUserID retrieves all user IDs with pagination options. +func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) { + resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) + if err != nil { + return nil, err + } + return resp, nil +} + // GetAllUserIDs retrieves all user IDs with pagination options. func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) @@ -215,3 +225,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string } return resp[0].PlatformIDs, nil } + +func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) { + return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor}) +} diff --git a/pkg/util/conversationutil/conversationutil.go b/pkg/util/conversationutil/conversationutil.go index 5683d8df89..f0a44ab1e1 100644 --- a/pkg/util/conversationutil/conversationutil.go +++ b/pkg/util/conversationutil/conversationutil.go @@ -19,6 +19,14 @@ func GenGroupConversationID(groupID string) string { return "sg_" + groupID } +func IsGroupConversationID(conversationID string) bool { + return strings.HasPrefix(conversationID, "sg_") +} + +func IsNotificationConversationID(conversationID string) bool { + return strings.HasPrefix(conversationID, "n_") +} + func GenConversationUniqueKeyForSingle(sendID, recvID string) string { l := []string{sendID, recvID} sort.Strings(l) diff --git a/scripts/create-topic.sh b/scripts/create-topic.sh index 206075fb83..bbc739287f 100755 --- a/scripts/create-topic.sh +++ b/scripts/create-topic.sh @@ -35,7 +35,7 @@ done echo "Kafka is ready. Creating topics..." -topics=("toRedis" "toMongo" "toPush") +topics=("toRedis" "toMongo" "toPush" "toOfflinePush") partitions=8 replicationFactor=1 diff --git a/scripts/githooks/commit-msg.sh b/scripts/githooks/commit-msg.sh deleted file mode 100644 index d2d96645bc..0000000000 --- a/scripts/githooks/commit-msg.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash -# Copyright Β© 2023 OpenIMSDK. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -# -# Store this file as .git/hooks/commit-msg in your repository in order to -# enforce checking for proper commit message format before actual commits. -# You may need to make the scripts executable by 'chmod +x .git/hooks/commit-msg'. - -# commit-msg use go-gitlint tool, install go-gitlint via `go get github.com/llorllale/go-gitlint/cmd/go-gitlint` -# go-gitlint --msg-file="$1" - -# An example hook scripts to check the commit log message. -# Called by "git commit" with one argument, the name of the file -# that has the commit message. The hook should exit with non-zero -# status after issuing an appropriate message if it wants to stop the -# commit. The hook is allowed to edit the commit message file. - -YELLOW="\e[93m" -GREEN="\e[32m" -RED="\e[31m" -ENDCOLOR="\e[0m" - -printMessage() { - printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" -} - -printSuccess() { - printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" -} - -printError() { - printf "${RED}OpenIM : $1${ENDCOLOR}\n" -} - -printMessage "Running the OpenIM commit-msg hook." - -# This example catches duplicate Signed-off-by lines. - -test "" = "$(grep '^Signed-off-by: ' "$1" | -sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { -echo >&2 Duplicate Signed-off-by lines. -exit 1 -} - -# TODO: go-gitlint dir set -OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. -GITLINT_DIR="$OPENIM_ROOT/_output/tools/go-gitlint" - -$GITLINT_DIR \ ---msg-file=$1 \ ---subject-regex="^(build|chore|ci|docs|feat|feature|fix|perf|refactor|revert|style|bot|test)(.*)?:\s?.*" \ ---subject-maxlen=150 \ ---subject-minlen=10 \ ---body-regex=".*" \ ---max-parents=1 - -if [ $? -ne 0 ] -then -if ! command -v $GITLINT_DIR &>/dev/null; then - printError "$GITLINT_DIR not found. Please run 'make tools' OR 'make tools.verify.go-gitlint' make verto install it." -fi -printError "Please fix your commit message to match kubecub coding standards" -printError "https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md" -exit 1 -fi - -### Add Sign-off-by line to the end of the commit message -# Get local git config -NAME=$(git config user.name) -EMAIL=$(git config user.email) - -# Check if the commit message contains a sign-off line -grep -qs "^Signed-off-by: " "$1" -SIGNED_OFF_BY_EXISTS=$? - -# Add "Signed-off-by" line if it doesn't exist -if [ $SIGNED_OFF_BY_EXISTS -ne 0 ]; then -echo -e "\nSigned-off-by: $NAME <$EMAIL>" >> "$1" -fi \ No newline at end of file diff --git a/scripts/githooks/pre-commit.sh b/scripts/githooks/pre-commit.sh deleted file mode 100644 index d8396b5605..0000000000 --- a/scripts/githooks/pre-commit.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash -# Copyright Β© 2023 OpenIMSDK. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -# This is a pre-commit hook that ensures attempts to commit files that are -# are larger than $limit to your _local_ repo fail, with a helpful error message. - -# You can override the default limit of 2MB by supplying the environment variable: -# GIT_FILE_SIZE_LIMIT=50000000 git commit -m "test: this commit is allowed file sizes up to 50MB" -# -# ============================================================================== -# - -LC_ALL=C - -local_branch="$(git rev-parse --abbrev-ref HEAD)" -valid_branch_regex="^(main|master|develop|release(-[a-zA-Z0-9._-]+)?)$|(feature|feat|openim|hotfix|test|bug|bot|refactor|revert|ci|cicd|style|)\/[a-z0-9._-]+$|^HEAD$" - -YELLOW="\e[93m" -GREEN="\e[32m" -RED="\e[31m" -ENDCOLOR="\e[0m" - -printMessage() { - printf "${YELLOW}openim : $1${ENDCOLOR}\n" -} - -printSuccess() { - printf "${GREEN}openim : $1${ENDCOLOR}\n" -} - -printError() { - printf "${RED}openim : $1${ENDCOLOR}\n" -} - -printMessage "Running local openim pre-commit hook." - -# flutter format . -# https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md -# TODO! GIT_FILE_SIZE_LIMIT=50000000 git commit -m "test: this commit is allowed file sizes up to 50MB" -# Maximum file size limit in bytes -limit=${GIT_FILE_SIZE_LIMIT:-2000000} # Default 2MB -limitInMB=$(( $limit / 1000000 )) - -function file_too_large(){ - filename=$0 - filesize=$(( $1 / 2**20 )) - - cat < /dev/null 2>&1 -then - against=HEAD -else - against="$empty_tree" -fi - -# Set split so that for loop below can handle spaces in file names by splitting on line breaks -IFS=' -' - -shouldFail=false -for file in $( git diff-index --cached --name-only $against ); do - file_size=$(([ ! -f $file ] && echo 0) || (ls -la $file | awk '{ print $5 }')) - if [ "$file_size" -gt "$limit" ]; then - printError "File $file is $(( $file_size / 10**6 )) MB, which is larger than our configured limit of $limitInMB MB" - shouldFail=true - fi -done - -if $shouldFail -then - printMessage "If you really need to commit this file, you can override the size limit by setting the GIT_FILE_SIZE_LIMIT environment variable, e.g. GIT_FILE_SIZE_LIMIT=42000000 for 42MB. Or, commit with the --no-verify switch to skip the check entirely." - printError "Commit aborted" - exit 1; -fi - -if [[ ! $local_branch =~ $valid_branch_regex ]] -then - printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. -Your commit will be rejected. You should rename your branch to a valid name(feat/name OR fix/name) and try again." - printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694" - exit 1 -fi \ No newline at end of file diff --git a/scripts/githooks/pre-push.sh b/scripts/githooks/pre-push.sh deleted file mode 100644 index 9bd9389158..0000000000 --- a/scripts/githooks/pre-push.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -# Copyright Β© 2023 OpenIMSDK. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -# - -YELLOW="\e[93m" -GREEN="\e[32m" -RED="\e[31m" -ENDCOLOR="\e[0m" - -local_branch="$(git rev-parse --abbrev-ref HEAD)" -valid_branch_regex="^(main|master|develop|release(-[a-zA-Z0-9._-]+)?)$|(feature|feat|openim|hotfix|test|bug|ci|cicd|style|)\/[a-z0-9._-]+$|^HEAD$" - -printMessage() { - printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n" -} - -printSuccess() { - printf "${GREEN}OpenIM : $1${ENDCOLOR}\n" -} - -printError() { - printf "${RED}OpenIM : $1${ENDCOLOR}\n" -} - -printMessage "Running local OpenIM pre-push hook." - -if [[ $(git status --porcelain) ]]; then - printError "This scripts needs to run against committed code only. Please commit or stash you changes." - exit 1 -fi - -COLOR_SUFFIX="\033[0m" - -BLACK_PREFIX="\033[30m" -RED_PREFIX="\033[31m" -GREEN_PREFIX="\033[32m" -BACKGROUND_GREEN="\033[33m" -BLUE_PREFIX="\033[34m" -PURPLE_PREFIX="\033[35m" -SKY_BLUE_PREFIX="\033[36m" -WHITE_PREFIX="\033[37m" -BOLD_PREFIX="\033[1m" -UNDERLINE_PREFIX="\033[4m" -ITALIC_PREFIX="\033[3m" - -# Function to print colored text -print_color() { - local text=$1 - local color=$2 - echo -e "${color}${text}${COLOR_SUFFIX}" -} - -# Function to print section separator -print_separator() { - print_color "==========================================================" ${PURPLE_PREFIX} -} - -# Get current time -time=$(date +"%Y-%m-%d %H:%M:%S") - -# Print section separator -print_separator - -# Print time of submission -print_color "PTIME: ${time}" "${BOLD_PREFIX}${CYAN_PREFIX}" -echo "" -author=$(git config user.name) -repository=$(basename -s .git $(git config --get remote.origin.url)) - -# Print additional information if needed -print_color "Repository: ${repository}" "${BLUE_PREFIX}" -echo "" - -print_color "Author: ${author}" "${PURPLE_PREFIX}" - -# Print section separator -print_separator - -file_list=$(git diff --name-status HEAD @{u}) -added_files=$(grep -c '^A' <<< "$file_list") -modified_files=$(grep -c '^M' <<< "$file_list") -deleted_files=$(grep -c '^D' <<< "$file_list") - -print_color "Added Files: ${added_files}" "${BACKGROUND_GREEN}" -print_color "Modified Files: ${modified_files}" "${BACKGROUND_GREEN}" -print_color "Deleted Files: ${deleted_files}" "${BACKGROUND_GREEN}" - -if [[ ! $local_branch =~ $valid_branch_regex ]] -then - printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex. -Your commit will be rejected. You should rename your branch to a valid name(feat/name OR fix/name) and try again." - printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694" - exit 1 -fi - -# -#printMessage "Running the Flutter analyzer" -#flutter analyze -# -#if [ $? -ne 0 ]; then -# printError "Flutter analyzer error" -# exit 1 -#fi -# -#printMessage "Finished running the Flutter analyzer" diff --git a/start-config.yml b/start-config.yml index 21436d7a9a..1231b5d0d4 100644 --- a/start-config.yml +++ b/start-config.yml @@ -3,8 +3,8 @@ serviceBinaries: openim-crontask: 1 openim-rpc-user: 1 openim-msggateway: 1 - openim-push: 1 - openim-msgtransfer: 4 + openim-push: 8 + openim-msgtransfer: 8 openim-rpc-conversation: 1 openim-rpc-auth: 1 openim-rpc-group: 1 diff --git a/test/e2e/api/token/token.go b/test/e2e/api/token/token.go index 908f9b8a0f..c862dc6db4 100644 --- a/test/e2e/api/token/token.go +++ b/test/e2e/api/token/token.go @@ -53,8 +53,7 @@ type User struct { // UserRegisterRequest represents a request to register a user. type UserRegisterRequest struct { - Secret string `json:"secret"` - Users []User `json:"users"` + Users []User `json:"users"` } /* func main() { @@ -109,8 +108,7 @@ func RegisterUser(token, userID, nickname, faceURL string) error { FaceURL: faceURL, } reqBody := UserRegisterRequest{ - Secret: SecretKey, - Users: []User{user}, + Users: []User{user}, } reqBytes, err := json.Marshal(reqBody) if err != nil { diff --git a/tools/check-component/main.go b/tools/check-component/main.go index 5fa84ac36f..4f4c08c16a 100644 --- a/tools/check-component/main.go +++ b/tools/check-component/main.go @@ -18,6 +18,12 @@ import ( "context" "flag" "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/cmd" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/tools/db/mongoutil" @@ -27,11 +33,6 @@ import ( "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/system/program" - "io/ioutil" - "log" - "os" - "path/filepath" - "time" ) const maxRetry = 180 @@ -65,7 +66,7 @@ func CheckMinIO(ctx context.Context, config *config.Minio) error { } func CheckKafka(ctx context.Context, conf *config.Kafka) error { - return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic}) + return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic, conf.ToOfflinePushTopic}) } func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka, *config.Minio, *config.Discovery, error) { diff --git a/tools/url2im/pkg/api.go b/tools/url2im/pkg/api.go index 5bf48c4ea5..0ef8c1db77 100644 --- a/tools/url2im/pkg/api.go +++ b/tools/url2im/pkg/api.go @@ -23,7 +23,6 @@ import ( "net/http" "github.com/openimsdk/protocol/auth" - "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/errs" ) @@ -88,14 +87,13 @@ func (a *Api) apiPost(ctx context.Context, path string, req any, resp any) error return nil } -func (a *Api) GetToken(ctx context.Context) (string, error) { - req := auth.UserTokenReq{ - UserID: a.UserID, - Secret: a.Secret, - PlatformID: constant.AdminPlatformID, - } - var resp auth.UserTokenResp - if err := a.apiPost(ctx, "/auth/user_token", &req, &resp); err != nil { +func (a *Api) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + UserID: a.UserID, + Secret: a.Secret, + } + var resp auth.GetAdminTokenResp + if err := a.apiPost(ctx, "/auth/get_admin_token", &req, &resp); err != nil { return "", err } return resp.Token, nil diff --git a/tools/url2im/pkg/manage.go b/tools/url2im/pkg/manage.go index 5e1626da9f..9dc1de8010 100644 --- a/tools/url2im/pkg/manage.go +++ b/tools/url2im/pkg/manage.go @@ -21,7 +21,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/openimsdk/tools/errs" "io" "log" "net/http" @@ -34,6 +33,8 @@ import ( "sync/atomic" "time" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/protocol/third" ) @@ -95,7 +96,7 @@ func (m *Manage) Run() error { } var err error ctx := context.WithValue(m.ctx, "operationID", fmt.Sprintf("%s_init", m.prefix)) - m.api.Token, err = m.api.GetToken(ctx) + m.api.Token, err = m.api.GetAdminToken(ctx) if err != nil { return err } @@ -234,7 +235,7 @@ func (m *Manage) RunTask(ctx context.Context, task Task) (string, error) { } for i, currentPartSize := range part.PartSizes { md5Reader := NewMd5Reader(io.LimitReader(reader, currentPartSize)) - if m.doPut(ctx, m.api.Client, initiateMultipartUploadResp.Upload.Sign, uploadParts[i], md5Reader, currentPartSize); err != nil { + if err := m.doPut(ctx, m.api.Client, initiateMultipartUploadResp.Upload.Sign, uploadParts[i], md5Reader, currentPartSize); err != nil { return "", err } if md5val := md5Reader.Md5(); md5val != part.PartMd5s[i] { diff --git a/version/version b/version/version index 0be1fc7d24..aaaff91926 100644 --- a/version/version +++ b/version/version @@ -1 +1 @@ -3.8.0 \ No newline at end of file +3.8.1 \ No newline at end of file