diff --git a/.github/workflows/composite/docker-build/action.yml b/.github/workflows/composite/docker-build/action.yml new file mode 100644 index 000000000..1765044f9 --- /dev/null +++ b/.github/workflows/composite/docker-build/action.yml @@ -0,0 +1,81 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Source: https://github.com/intel/ai-containers/blob/main/.github/action.yml + +--- +name: Build Container Group +description: Given the inputs found below, build all containers found in a docker-compose.yaml file for a given configuration +author: tyler.titsworth@intel.com +inputs: + group_dir: + description: Directory with docker-compose.yaml to build + required: true + type: string + env_overrides: + description: Bash Env Variable Overrides in `KEY=VAL && KEY2=VAL2` format + required: false + type: string + registry: + description: Container Registry URL + required: false + default: 'opea-project' + type: string +outputs: + container-group: + description: "Container Group" + value: ${{ steps.container-output.outputs.group }} +runs: + using: composite + steps: + # This step generates a random number to use as the project number + # which can help avoid collisions with parallel builds on the same system + - name: Generate Project Number + shell: bash + run: echo "project-number=$(shuf -i 0-10000 -n1)" >> $GITHUB_ENV + - name: Build Containers + shell: bash + run: | + REGISTRY=${{ inputs.registry }} \ + COMPOSE_PROJECT_NAME=${{ env.project-number }} \ + ${{ inputs.env_overrides }} docker compose -p ${{ env.project-number }} up --build --force-recreate --always-recreate-deps --no-start + working-directory: ${{ inputs.group_dir }} + - name: Print Containers + id: container-output + shell: bash + run: | + mkdir matrix + images=$(REGISTRY=${{ inputs.registry }} \ + COMPOSE_PROJECT_NAME=${{ env.project-number }} \ + ${{ inputs.env_overrides }} docker compose -p ${{ env.project-number }} images --format json) + for image in $(echo $images | jq -r --arg registry "$REGISTRY" '.[] | select(.Repository | contains($registry)) | .Tag'); do + echo "$image" > matrix/$image.txt + done + echo "group=${{ inputs.group_dir }}" | tr '/' '_' >> $GITHUB_OUTPUT + working-directory: ${{ inputs.group_dir }} + - uses: actions/upload-artifact@v4 + with: + name: ${{ env.project-number }}-${{ steps.container-output.outputs.group }} + path: ${{ inputs.group_dir }}/matrix/* + retention-days: 1 + overwrite: true + - name: Push Containers + shell: bash + if: ${{ !fromJson(inputs.no-push) }} + run: | + REGISTRY=${{ inputs.registry }} \ + COMPOSE_PROJECT_NAME=${{ env.project-number }} \ + ${{ inputs.env_overrides }} docker compose -p ${{ env.project-number }} push + working-directory: ${{ inputs.group_dir }} + - name: Un-Tag Containers + if: ${{ always() }} + shell: bash + run: | + REGISTRY=${{ inputs.registry }} \ + COMPOSE_PROJECT_NAME=${{ env.project-number }} \ + ${{ inputs.env_overrides }} docker compose -p ${{ env.project-number }} down --rmi all + working-directory: ${{ inputs.group_dir }} + - name: Remove Containers + if: ${{ always() }} + shell: bash + run: docker system prune --force diff --git a/.github/workflows/composite/scan/action.yml b/.github/workflows/composite/scan/action.yml new file mode 100644 index 000000000..704787d6c --- /dev/null +++ b/.github/workflows/composite/scan/action.yml @@ -0,0 +1,26 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Source: https://github.com/intel/ai-containers/blob/main/.github/scan/action.yml + +name: 'Aqua Security Trivy' +description: 'Scans container images for vulnerabilities with Trivy without building the image. For use behind firewalls.' +author: 'tyler.titsworth@intel.com' +inputs: + image-ref: + description: 'image reference(for backward compatibility)' + required: true + output: + description: 'writes results to a file with the specified file name' + required: true +runs: + using: 'docker' + image: "docker://ghcr.io/aquasecurity/trivy" + entrypoint: trivy + args: + - '--timeout=30m' + - image + - '--format=sarif' + - '--no-progress' + - '--output=${{ inputs.output }}' + - ${{ inputs.image-ref }} diff --git a/.github/workflows/container-ci.yml b/.github/workflows/container-ci.yml index 0f93aa6ca..69ed7543b 100644 --- a/.github/workflows/container-ci.yml +++ b/.github/workflows/container-ci.yml @@ -1,20 +1,9 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 name: Container Integration Tests on: - pull_request: null + pull_request permissions: read-all concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/reuse-container-ci.yaml b/.github/workflows/reuse-container-ci.yaml index 40c929762..665f26dfe 100644 --- a/.github/workflows/reuse-container-ci.yaml +++ b/.github/workflows/reuse-container-ci.yaml @@ -1,16 +1,5 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 name: Container CI permissions: read-all @@ -30,56 +19,17 @@ on: default: test-runner required: true type: string - no_build: - description: No build - default: false - required: true - type: boolean - no_push: - description: Do not push to Registry - required: true - default: false - type: boolean - no_start: - description: No Start - default: false - required: true - type: boolean workflow_call: inputs: group_dir: required: true type: string - no_push: - required: false - type: boolean - no_start: - required: false - type: boolean jobs: - #################################################################################################### - # Compose Build - #################################################################################################### - setup-build: - outputs: - matrix: ${{ steps.build-matrix.outputs.matrix }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} - steps: - - name: Harden Runner - uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 - with: - egress-policy: audit - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Set Matrix - id: build-matrix - run: echo "matrix=$(jq -c . < ${{ inputs.group_dir }}/.actions.json)" >> $GITHUB_OUTPUT - - name: Print Inputs - if: ${{ inputs.env_overrides }} - run: echo "Overrides - ${{ inputs.env_overrides }}" >> $GITHUB_STEP_SUMMARY +#################################################################################################### +# Compose Build +#################################################################################################### build-containers: - needs: [setup-build] - env: ${{ matrix }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ubuntu-latest strategy: matrix: ${{ fromJson(needs.setup-build.outputs.matrix) }} fail-fast: false @@ -87,7 +37,6 @@ jobs: group: ${{ steps.build-group.outputs.container-group }} steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - if: ${{ !inputs.no_build }} - uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ secrets.REGISTRY }} @@ -97,21 +46,18 @@ jobs: - name: Build Container Group if: ${{ !inputs.no_build }} id: build-group - uses: intel/ai-containers/.github@main + uses: ./.github/workflows/composite/docker-build with: group_dir: ${{ inputs.group_dir }} env_overrides: ${{ inputs.env_overrides || env.env_overrides || '' }} registry: ${{ secrets.REGISTRY }} - repo: ${{ secrets.REPO }} - no-push: ${{ inputs.no_push }} - no-start: ${{ inputs.no_start }} - #################################################################################################### - # Trivy Scan - #################################################################################################### +#################################################################################################### +# Trivy Scan +#################################################################################################### setup-scan: needs: [build-containers] if: ${{ github.event_name == 'pull_request' }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ubuntu-latest outputs: matrix: ${{ steps.scan-matrix.outputs.matrix }} steps: @@ -128,7 +74,7 @@ jobs: scan-containers: needs: [setup-scan] if: ${{ !inputs.no_build && github.event_name == 'pull_request' }} - runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ubuntu-latest env: GROUP_DIR: ${{ inputs.group_dir }} strategy: @@ -153,7 +99,7 @@ jobs: env: GROUP_DIR: ${{ inputs.group_dir }} - name: Scan Container - uses: intel/ai-containers/.github/scan@main + uses: ./.github/workflows/composite/scan with: image-ref: ${{ secrets.REGISTRY }}/${{ env.group_dir }}:${{ matrix.container }} output: ${{ matrix.container }}-${{ env.group_dir }}-scan.sarif @@ -165,48 +111,45 @@ jobs: sarif_file: '${{ matrix.container }}-${{ env.group_dir }}-scan.sarif' category: '${{ matrix.container }}' continue-on-error: true - #################################################################################################### - # Generic Test Runner - #################################################################################################### - setup-test: - needs: [build-containers] - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} - outputs: - matrix: ${{ steps.test-matrix.outputs.matrix }} - steps: - - name: Harden Runner - uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 - with: - egress-policy: audit - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Get Recipes - id: test-matrix - run: echo "matrix=$(find ${{ inputs.group_dir }} -type f -name 'tests.yaml' -exec dirname {} \; | jq -R -s -c 'split("\n")[:-1]')" >> $GITHUB_OUTPUT - test-containers: - needs: [setup-build, setup-test] - if: ${{ needs.setup-test.outputs.matrix != '[]' }} - runs-on: ${{ fromJson(needs.setup-build.outputs.matrix).runner_label }} - strategy: - matrix: - tests: ${{ fromJson(needs.setup-test.outputs.matrix) }} - experimental: [true] - fail-fast: false - steps: - - uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 - with: - egress-policy: audit - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 - with: - registry: ${{ secrets.REGISTRY }} - username: ${{ secrets.REGISTRY_USER }} - password: ${{ secrets.REGISTRY_TOKEN }} - - name: Test Container Group - uses: intel/ai-containers/test-runner@main - with: - cache_registry: ${{ secrets.CACHE_REGISTRY }} - recipe_dir: ${{ inputs.group_dir }} - registry: ${{ secrets.REGISTRY }} - repo: ${{ secrets.REPO }} - test_dir: ${{ matrix.tests }} - token: ${{ secrets.GITHUB_TOKEN }} +#################################################################################################### +# Generic Test Runner +#################################################################################################### + # setup-test: + # needs: [build-containers] + # runs-on: ubuntu-latest + # outputs: + # matrix: ${{ steps.test-matrix.outputs.matrix }} + # steps: + # - name: Harden Runner + # uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 + # with: + # egress-policy: audit + # - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + # - name: Get Recipes + # id: test-matrix + # run: echo "matrix=$(find ${{ inputs.group_dir }} -type f -name 'tests.yaml' -exec dirname {} \; | jq -R -s -c 'split("\n")[:-1]')" >> $GITHUB_OUTPUT + # test-containers: + # needs: [setup-build, setup-test] + # if: ${{ needs.setup-test.outputs.matrix != '[]' }} + # runs-on: ${{ fromJson(needs.setup-build.outputs.matrix).runner_label }} + # strategy: + # matrix: + # tests: ${{ fromJson(needs.setup-test.outputs.matrix) }} + # experimental: [true] + # fail-fast: false + # steps: + # - uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 + # with: + # egress-policy: audit + # - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + # - uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 + # with: + # registry: ${{ secrets.REGISTRY }} + # username: ${{ secrets.REGISTRY_USER }} + # password: ${{ secrets.REGISTRY_TOKEN }} + # - name: Test Container Group + # uses: ./.github/workflows/composite/test + # with: + # registry: ${{ secrets.REGISTRY }} + # test_dir: ${{ matrix.tests }} + # token: ${{ secrets.GITHUB_TOKEN }} diff --git a/AudioQnA/.actions.json b/AudioQnA/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/AudioQnA/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/AudioQnA/docker/gaudi/docker_compose.yaml b/AudioQnA/docker/gaudi/docker-compose.yaml similarity index 97% rename from AudioQnA/docker/gaudi/docker_compose.yaml rename to AudioQnA/docker/gaudi/docker-compose.yaml index b7fb8426b..155c697f5 100644 --- a/AudioQnA/docker/gaudi/docker_compose.yaml +++ b/AudioQnA/docker/gaudi/docker-compose.yaml @@ -90,7 +90,7 @@ services: HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped audioqna-gaudi-backend-server: - image: opea/audioqna:latest + image: ${REGISTRY:-opea}/audioqna:${GITHUB_RUN_NUMBER:-latest} container_name: audioqna-gaudi-backend-server depends_on: - asr diff --git a/AudioQnA/docker/xeon/docker_compose.yaml b/AudioQnA/docker/xeon/docker-compose.yaml similarity index 97% rename from AudioQnA/docker/xeon/docker_compose.yaml rename to AudioQnA/docker/xeon/docker-compose.yaml index b8966c719..846e46f4a 100644 --- a/AudioQnA/docker/xeon/docker_compose.yaml +++ b/AudioQnA/docker/xeon/docker-compose.yaml @@ -2,8 +2,6 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -version: "3.8" - services: whisper-service: image: opea/whisper:latest @@ -73,7 +71,7 @@ services: HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped audioqna-xeon-backend-server: - image: opea/audioqna:latest + image: ${REGISTRY:-opea}/audioqna:${GITHUB_RUN_NUMBER:-latest} container_name: audioqna-xeon-backend-server depends_on: - asr diff --git a/AudioQnA/tests/tests.yaml b/AudioQnA/tests/tests.yaml deleted file mode 100644 index 7b0749c0d..000000000 --- a/AudioQnA/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/audioqna:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import AudioQnAGateway, MicroService, ServiceOrchestrator, ServiceType" - entrypoint: "python" diff --git a/ChatQnA/.actions.json b/ChatQnA/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/ChatQnA/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/ChatQnA/docker-compose.yaml b/ChatQnA/docker-compose.yaml deleted file mode 100644 index 1fa9e7b04..000000000 --- a/ChatQnA/docker-compose.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for ChatQnA -services: - chatqna: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/chatqna:${GITHUB_RUN_NUMBER:-latest} - chatqna-ui: - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: chatqna - image: ${REGISTRY:-opea}/chatqna-ui:${GITHUB_RUN_NUMBER:-latest} - chatqna-conversation-ui: - build: - args: - BACKEND_SERVING_ENDPOINT: "http://${HOST_IP:-127.0.0.1}:8888/v1/chatqna" - DATAPREP_SERVICE_ENDPOINT: "http://${HOST_IP:-127.0.0.1}:6007/v1/dataprep" - DATAPREP_GET_FILE_ENDPOINT: "http://${HOST_IP:-127.0.0.1}:6008/v1/dataprep/get_file" - context: ./docker/ui - dockerfile: ./docker/Dockerfile.react - extends: chatqna - image: ${REGISTRY:-opea}/chatqna-conversation-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/ChatQnA/docker/aipc/README.md b/ChatQnA/docker/aipc/README.md index 06bac75ea..9f5a6fddd 100644 --- a/ChatQnA/docker/aipc/README.md +++ b/ChatQnA/docker/aipc/README.md @@ -44,14 +44,25 @@ docker build --no-cache -t opea/dataprep-redis:latest -f comps/dataprep/redis/la cd .. ``` -### 6. Build MegaService Docker Images +### 6. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build MegaService Docker images via below command: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build MegaService Docker image via below command: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/ChatQnA -docker compose build chatqna chatqna-ui +cd GenAIExamples/ChatQnA/docker +docker build --no-cache -t opea/chatqna:latest -f Dockerfile . +cd ../../.. +``` + +### 7. Build UI Docker Image + +Build frontend Docker image via below command: + +```bash +cd GenAIExamples/ChatQnA/docker/ui/ +docker build --no-cache -t opea/chatqna-ui:latest -f ./docker/Dockerfile . +cd ../../../.. ``` Then run the command `docker images`, you will have the following 7 Docker Images: diff --git a/ChatQnA/docker/gaudi/README.md b/ChatQnA/docker/gaudi/README.md index b2bb2f0a5..2ef7b8e64 100644 --- a/ChatQnA/docker/gaudi/README.md +++ b/ChatQnA/docker/gaudi/README.md @@ -54,14 +54,39 @@ docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest . cd ../.. ``` -### 8. Build MegaService Docker Images +### 8. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/ChatQnA -docker compose build chatqna chatqna-ui +cd GenAIExamples/ChatQnA/docker +docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +cd ../../.. +``` + +### 9. Build UI Docker Image + +Construct the frontend Docker image using the command below: + +```bash +cd GenAIExamples/ChatQnA/docker/ui/ +docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +cd ../../../.. +``` + +### 10. Build Conversational React UI Docker Image (Optional) + +Build frontend Docker image that enables Conversational experience with ChatQnA megaservice via below command: + +**Export the value of the public IP address of your Gaudi node to the `host_ip` environment variable** + +```bash +cd GenAIExamples/ChatQnA/docker/ui/ +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep" +docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT --build-arg DATAPREP_SERVICE_ENDPOINT=$DATAPREP_SERVICE_ENDPOINT -f ./docker/Dockerfile.react . +cd ../../../.. ``` ### 10. Build Conversational React UI Docker Image (Optional) diff --git a/ChatQnA/docker/gpu/README.md b/ChatQnA/docker/gpu/README.md index 76a04a990..d00a7ce14 100644 --- a/ChatQnA/docker/gpu/README.md +++ b/ChatQnA/docker/gpu/README.md @@ -43,14 +43,25 @@ docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_pr docker build --no-cache -t opea/dataprep-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/redis/langchain/docker/Dockerfile . ``` -### 7. Build MegaService Docker Images +### 7. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/ChatQnA -docker compose build chatqna chatqna-ui +cd GenAIExamples/ChatQnA/docker +docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +cd ../../.. +``` + +### 8. Build UI Docker Image + +Construct the frontend Docker image using the command below: + +```bash +cd GenAIExamples/ChatQnA/docker/ui/ +docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +cd ../../../.. ``` Then run the command `docker images`, you will have the following 7 Docker Images: diff --git a/ChatQnA/docker/xeon/README.md b/ChatQnA/docker/xeon/README.md index eba791189..c74070bc0 100644 --- a/ChatQnA/docker/xeon/README.md +++ b/ChatQnA/docker/xeon/README.md @@ -97,20 +97,40 @@ docker build --no-cache -t opea/dataprep-redis:latest --build-arg https_proxy=$h cd .. ``` -### 6. Build MegaService Docker Images +### 6. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build MegaService Docker images via below command: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `chatqna.py` Python script. Build MegaService Docker image via below command: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/ChatQnA -docker compose build chatqna chatqna-ui +cd GenAIExamples/ChatQnA/docker +docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +cd ../../.. ``` -Alternatively, you can build the ChatQnA Conversational UI image: +### 7. Build UI Docker Image + +Build frontend Docker image via below command: + +```bash +cd GenAIExamples/ChatQnA/docker/ui/ +docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +cd ../../../.. +``` + +### 8. Build Conversational React UI Docker Image (Optional) + +Build frontend Docker image that enables Conversational experience with ChatQnA megaservice via below command: + +**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** ```bash -HOST_IP= docker compose build chatqna chatqna-conversation-ui +cd GenAIExamples/ChatQnA/docker/ui/ +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep" +export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6008/v1/dataprep/get_file" +docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT --build-arg DATAPREP_SERVICE_ENDPOINT=$DATAPREP_SERVICE_ENDPOINT --build-arg DATAPREP_GET_FILE_ENDPOINT=$DATAPREP_GET_FILE_ENDPOINT -f ./docker/Dockerfile.react . +cd ../../../.. ``` Then run the command `docker images`, you will have the following 7 Docker Images: @@ -121,7 +141,7 @@ Then run the command `docker images`, you will have the following 7 Docker Image 4. `opea/reranking-tei:latest` 5. `opea/llm-tgi:latest` 6. `opea/chatqna:latest` -7. `opea/chatqna-ui:latest` or `opea/chatqna-conversation-ui:latest` +7. `opea/chatqna-ui:latest` ## 🚀 Start Microservices @@ -129,12 +149,12 @@ Then run the command `docker images`, you will have the following 7 Docker Image Since the `docker_compose.yaml` will consume some environment variables, you need to setup them in advance as below. -**Export the value of the public IP address of your Xeon server to the `HOST_IP` environment variable** +**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable** > Change the External_Public_IP below with the actual IPV4 value ``` -export HOST_IP="External_Public_IP" +export host_ip="External_Public_IP" ``` **Export the value of your Huggingface API token to the `your_hf_api_token` environment variable** @@ -158,24 +178,24 @@ export https_proxy=${your_http_proxy} export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export RERANK_MODEL_ID="BAAI/bge-reranker-base" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" -export TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:6006" -export TEI_RERANKING_ENDPOINT="http://${HOST_IP}:8808" -export TGI_LLM_ENDPOINT="http://${HOST_IP}:9009" -export REDIS_URL="redis://${HOST_IP}:6379" +export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006" +export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808" +export TGI_LLM_ENDPOINT="http://${host_ip}:9009" +export REDIS_URL="redis://${host_ip}:6379" export INDEX_NAME="rag-redis" export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} -export MEGA_SERVICE_HOST_IP=${HOST_IP} -export EMBEDDING_SERVICE_HOST_IP=${HOST_IP} -export RETRIEVER_SERVICE_HOST_IP=${HOST_IP} -export RERANK_SERVICE_HOST_IP=${HOST_IP} -export LLM_SERVICE_HOST_IP=${HOST_IP} -export BACKEND_SERVICE_ENDPOINT="http://${HOST_IP}:8888/v1/chatqna" -export DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP}:6007/v1/dataprep" -export DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP}:6008/v1/dataprep/get_file" -export DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP}:6009/v1/dataprep/delete_file" +export MEGA_SERVICE_HOST_IP=${host_ip} +export EMBEDDING_SERVICE_HOST_IP=${host_ip} +export RETRIEVER_SERVICE_HOST_IP=${host_ip} +export RERANK_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/chatqna" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep" +export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6008/v1/dataprep/get_file" +export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6009/v1/dataprep/delete_file" ``` -Note: Please replace with `HOST_IP` with you external IP address, do not use localhost. +Note: Please replace with `host_ip` with you external IP address, do not use localhost. ### Start all the services Docker Containers @@ -191,7 +211,7 @@ docker compose -f docker_compose.yaml up -d 1. TEI Embedding Service ```bash -curl ${HOST_IP}:6006/embed \ +curl ${host_ip}:6006/embed \ -X POST \ -d '{"inputs":"What is Deep Learning?"}' \ -H 'Content-Type: application/json' @@ -200,7 +220,7 @@ curl ${HOST_IP}:6006/embed \ 2. Embedding Microservice ```bash -curl http://${HOST_IP}:6000/v1/embeddings\ +curl http://${host_ip}:6000/v1/embeddings\ -X POST \ -d '{"text":"hello"}' \ -H 'Content-Type: application/json' @@ -218,7 +238,7 @@ print(embedding) Then substitute your mock embedding vector for the `${your_embedding}` in the following cURL command: ```bash -curl http://${HOST_IP}:7000/v1/retrieval \ +curl http://${host_ip}:7000/v1/retrieval \ -X POST \ -d '{"text":"What is the revenue of Nike in 2023?","embedding":"'"${your_embedding}"'"}' \ -H 'Content-Type: application/json' @@ -227,7 +247,7 @@ curl http://${HOST_IP}:7000/v1/retrieval \ 4. TEI Reranking Service ```bash -curl http://${HOST_IP}:8808/rerank \ +curl http://${host_ip}:8808/rerank \ -X POST \ -d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \ -H 'Content-Type: application/json' @@ -236,7 +256,7 @@ curl http://${HOST_IP}:8808/rerank \ 5. Reranking Microservice ```bash -curl http://${HOST_IP}:8000/v1/reranking\ +curl http://${host_ip}:8000/v1/reranking\ -X POST \ -d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' \ -H 'Content-Type: application/json' @@ -245,7 +265,7 @@ curl http://${HOST_IP}:8000/v1/reranking\ 6. TGI Service ```bash -curl http://${HOST_IP}:9009/generate \ +curl http://${host_ip}:9009/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \ -H 'Content-Type: application/json' @@ -254,7 +274,7 @@ curl http://${HOST_IP}:9009/generate \ 7. LLM Microservice ```bash -curl http://${HOST_IP}:9000/v1/chat/completions\ +curl http://${host_ip}:9000/v1/chat/completions\ -X POST \ -d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":true}' \ -H 'Content-Type: application/json' @@ -263,7 +283,7 @@ curl http://${HOST_IP}:9000/v1/chat/completions\ 8. MegaService ```bash -curl http://${HOST_IP}:8888/v1/chatqna -H "Content-Type: application/json" -d '{ +curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{ "messages": "What is the revenue of Nike in 2023?" }' ``` @@ -275,7 +295,7 @@ If you want to update the default knowledge base, you can use the following comm Update Knowledge Base via Local File Upload: ```bash -curl -X POST "http://${HOST_IP}:6007/v1/dataprep" \ +curl -X POST "http://${host_ip}:6007/v1/dataprep" \ -H "Content-Type: multipart/form-data" \ -F "files=@./nke-10k-2023.pdf" ``` @@ -285,7 +305,7 @@ This command updates a knowledge base by uploading a local file for processing. Add Knowledge Base via HTTP Links: ```bash -curl -X POST "http://${HOST_IP}:6007/v1/dataprep" \ +curl -X POST "http://${host_ip}:6007/v1/dataprep" \ -H "Content-Type: multipart/form-data" \ -F 'link_list=["https://opea.dev"]' ``` @@ -295,7 +315,7 @@ This command updates a knowledge base by submitting a list of HTTP links for pro Also, you are able to get the file list that you uploaded: ```bash -curl -X POST "http://${HOST_IP}:6008/v1/dataprep/get_file" \ +curl -X POST "http://${host_ip}:6008/v1/dataprep/get_file" \ -H "Content-Type: application/json" ``` @@ -303,17 +323,17 @@ To delete the file/link you uploaded: ```bash # delete link -curl -X POST "http://${HOST_IP}:6009/v1/dataprep/delete_file" \ +curl -X POST "http://${host_ip}:6009/v1/dataprep/delete_file" \ -d '{"file_path": "https://opea.dev"}' \ -H "Content-Type: application/json" # delete file -curl -X POST "http://${HOST_IP}:6009/v1/dataprep/delete_file" \ +curl -X POST "http://${host_ip}:6009/v1/dataprep/delete_file" \ -d '{"file_path": "nke-10k-2023.pdf"}' \ -H "Content-Type: application/json" # delete all uploaded files and links -curl -X POST "http://${HOST_IP}:6009/v1/dataprep/delete_file" \ +curl -X POST "http://${host_ip}:6009/v1/dataprep/delete_file" \ -d '{"file_path": "all"}' \ -H "Content-Type: application/json" ``` @@ -339,7 +359,7 @@ export LANGCHAIN_API_KEY=ls_... ## 🚀 Launch the UI -To access the frontend, open the following URL in your browser: http://{HOST_IP}:5173. By default, the UI runs on port 5173 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `docker_compose.yaml` file as shown below: +To access the frontend, open the following URL in your browser: http://{host_ip}:5173. By default, the UI runs on port 5173 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `docker_compose.yaml` file as shown below: ```yaml chaqna-gaudi-ui-server: @@ -351,7 +371,7 @@ To access the frontend, open the following URL in your browser: http://{HOST_IP} ## 🚀 Launch the Conversational UI (react) -To access the Conversational UI frontend, open the following URL in your browser: http://{HOST_IP}:5174. By default, the UI runs on port 80 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `docker_compose.yaml` file as shown below: +To access the Conversational UI frontend, open the following URL in your browser: http://{host_ip}:5174. By default, the UI runs on port 80 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `docker_compose.yaml` file as shown below: ```yaml chaqna-xeon-conversation-ui-server: diff --git a/ChatQnA/tests/tests.yaml b/ChatQnA/tests/tests.yaml deleted file mode 100644 index ffe53707c..000000000 --- a/ChatQnA/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/chatqna:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import ChatQnAGateway, MicroService, ServiceOrchestrator, ServiceType" - entrypoint: "python" diff --git a/CodeGen/.actions.json b/CodeGen/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/CodeGen/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/CodeGen/docker-compose.yaml b/CodeGen/docker-compose.yaml deleted file mode 100644 index b63b4cdee..000000000 --- a/CodeGen/docker-compose.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for CodeGen -services: - codegen: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/codegen:${GITHUB_RUN_NUMBER:-latest} - codegen-ui: - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: codegen - image: ${REGISTRY:-opea}/codegen-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/CodeGen/docker/gaudi/README.md b/CodeGen/docker/gaudi/README.md index e00eb6509..05f0d2056 100644 --- a/CodeGen/docker/gaudi/README.md +++ b/CodeGen/docker/gaudi/README.md @@ -19,14 +19,23 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 3. Build the MegaService Docker Images +### 3. Build the MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build the MegaService Docker images via the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build the MegaService Docker image via the command below: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/CodeGen -docker compose build codegen codegen-ui +cd GenAIExamples/CodeGen/docker +docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build the UI Docker Image + +Construct the frontend Docker image via the command below: + +```bash +cd GenAIExamples/CodeGen/docker/ui/ +docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following 3 Docker images: diff --git a/CodeGen/docker/xeon/README.md b/CodeGen/docker/xeon/README.md index 9269bd897..7ed57c671 100644 --- a/CodeGen/docker/xeon/README.md +++ b/CodeGen/docker/xeon/README.md @@ -27,14 +27,23 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 3. Build the MegaService Docker Images +### 3. Build the MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build MegaService Docker images via the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `codegen.py` Python script. Build MegaService Docker image via the command below: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/CodeGen -docker compose build codegen codegen-ui +cd GenAIExamples/CodeGen/docker +docker build -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build the UI Docker Image + +Build the frontend Docker image via the command below: + +```bash +cd GenAIExamples/CodeGen/docker/ui/ +docker build -t opea/codegen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following 3 Docker Images: diff --git a/CodeGen/tests/test_manifest_on_xeon.sh b/CodeGen/tests/test_manifest_on_xeon.sh deleted file mode 100755 index 09a507c65..000000000 --- a/CodeGen/tests/test_manifest_on_xeon.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -set -xe -USER_ID=$(whoami) -LOG_PATH=/home/$(whoami)/logs -MOUNT_DIR=/home/$USER_ID/charts-mnt -IMAGE_REPO=${IMAGE_REPO:-} -IMAGE_TAG=${IMAGE_TAG:-latest} - -function init_codegen() { - # executed under path manifest/codegen/xeon - # replace the mount dir "path: /mnt/model" with "path: $CHART_MOUNT" - find . -name '*.yaml' -type f -exec sed -i "s#path: /mnt#path: $MOUNT_DIR#g" {} \; - # replace megaservice image tag - find . -name '*.yaml' -type f -exec sed -i "s#image: opea/codegen:latest#image: opea/codegen:${IMAGE_TAG}#g" {} \; - # replace the repository "image: opea/*" with "image: $IMAGE_REPO/opea/" - find . -name '*.yaml' -type f -exec sed -i "s#image: \"opea/*#image: \"${IMAGE_REPO}opea/#g" {} \; - # set huggingface token - find . -name '*.yaml' -type f -exec sed -i "s#insert-your-huggingface-token-here#$(cat /home/$USER_ID/.cache/huggingface/token)#g" {} \; -} - -function install_codegen { - echo "namespace is $NAMESPACE" - kubectl apply -f . -n $NAMESPACE -} - -function validate_codegen() { - ip_address=$(kubectl get svc $SERVICE_NAME -n $NAMESPACE -o jsonpath='{.spec.clusterIP}') - port=$(kubectl get svc $SERVICE_NAME -n $NAMESPACE -o jsonpath='{.spec.ports[0].port}') - echo "try to curl http://${ip_address}:${port}/v1/codegen..." - - # generate a random logfile name to avoid conflict among multiple runners - LOGFILE=$LOG_PATH/curlmega_$NAMESPACE.log - # Curl the Mega Service - curl http://${ip_address}:${port}/v1/codegen -H "Content-Type: application/json" \ - -d '{"messages": "def print_hello_world():"}' > $LOGFILE - exit_code=$? - if [ $exit_code -ne 0 ]; then - echo "Megaservice codegen failed, please check the logs in $LOGFILE!" - exit 1 - fi - - echo "Checking response results, make sure the output is reasonable. " - local status=false - if [[ -f $LOGFILE ]] && \ - [[ $(grep -c "print" $LOGFILE) != 0 ]]; then - status=true - fi - - if [ $status == false ]; then - echo "Response check failed, please check the logs in artifacts!" - else - echo "Response check succeed!" - fi -} - -if [ $# -eq 0 ]; then - echo "Usage: $0 " - exit 1 -fi - -case "$1" in - init_CodeGen) - pushd CodeGen/kubernetes/manifests/xeon - init_codegen - popd - ;; - install_CodeGen) - pushd CodeGen/kubernetes/manifests/xeon - NAMESPACE=$2 - install_codegen - popd - ;; - validate_CodeGen) - NAMESPACE=$2 - SERVICE_NAME=codegen - validate_codegen - ;; - *) - echo "Unknown function: $1" - ;; -esac diff --git a/CodeTrans/.actions.json b/CodeTrans/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/CodeTrans/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/CodeTrans/docker-compose.yaml b/CodeTrans/docker-compose.yaml deleted file mode 100644 index bd0f70fce..000000000 --- a/CodeTrans/docker-compose.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for CodeTrans -services: - codetrans: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/codetrans:${GITHUB_RUN_NUMBER:-latest} - codetrans-ui: - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: codetrans - image: ${REGISTRY:-opea}/codetrans-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/CodeTrans/docker/gaudi/README.md b/CodeTrans/docker/gaudi/README.md index cb54175dd..87cd9f3ea 100755 --- a/CodeTrans/docker/gaudi/README.md +++ b/CodeTrans/docker/gaudi/README.md @@ -19,12 +19,19 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 3. Build MegaService Docker Images +### 3. Build MegaService Docker Image ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/CodeTrans -docker compose build codetrans codetrans-ui +cd GenAIExamples/CodeTrans/docker +docker build -t opea/codetrans:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build UI Docker Image + +```bash +cd GenAIExamples/CodeTrans/docker/ui +docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/CodeTrans/docker/xeon/README.md b/CodeTrans/docker/xeon/README.md index 0a0a2281c..ac13ea71f 100755 --- a/CodeTrans/docker/xeon/README.md +++ b/CodeTrans/docker/xeon/README.md @@ -27,12 +27,19 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 3. Build MegaService Docker Images +### 3. Build MegaService Docker Image ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/CodeTrans -docker compose build codetrans codetrans-ui +cd GenAIExamples/CodeTrans/docker +docker build -t opea/codetrans:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build UI Docker Image + +```bash +cd GenAIExamples/CodeTrans/docker/ui +docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/CodeTrans/tests/tests.yaml b/CodeTrans/tests/tests.yaml deleted file mode 100644 index 2322b3a6e..000000000 --- a/CodeTrans/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/codetrans:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import CodeTransGateway, MicroService, ServiceOrchestrator" - entrypoint: "python" diff --git a/DocSum/.actions.json b/DocSum/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/DocSum/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/DocSum/docker-compose.yaml b/DocSum/docker-compose.yaml deleted file mode 100644 index a9f8f93b0..000000000 --- a/DocSum/docker-compose.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for DocSum -services: - docsum: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/docsum:${GITHUB_RUN_NUMBER:-latest} - docsum-ui: - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: docsum - image: ${REGISTRY:-opea}/docsum-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/DocSum/docker/gaudi/README.md b/DocSum/docker/gaudi/README.md index 5504edbb2..c495df4cd 100644 --- a/DocSum/docker/gaudi/README.md +++ b/DocSum/docker/gaudi/README.md @@ -25,14 +25,23 @@ docker pull ghcr.io/huggingface/tgi-gaudi:1.2.1 docker build -t opea/llm-docsum-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/Dockerfile . ``` -### 3. Build MegaService Docker Images +### 3. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `docsum.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `docsum.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/DocSum -docker compose build docsum docsum-ui +cd GenAIExamples/DocSum/docker +docker build -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build UI Docker Image + +Construct the frontend Docker image using the command below: + +```bash +cd GenAIExamples/DocSum/docker/ui/ +docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/DocSum/docker/xeon/README.md b/DocSum/docker/xeon/README.md index f3cfa3b07..d473ef818 100644 --- a/DocSum/docker/xeon/README.md +++ b/DocSum/docker/xeon/README.md @@ -27,14 +27,23 @@ docker build -t opea/llm-docsum-tgi:latest --build-arg https_proxy=$https_proxy Then run the command `docker images`, you will have the following four Docker Images: -### 2. Build MegaService Docker Images +### 2. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `docsum.py` Python script. Build the MegaService Docker images via below command: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `docsum.py` Python script. Build the MegaService Docker image via below command: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/DocSum -docker compose build docsum docsum-ui +cd GenAIExamples/DocSum/docker +docker build -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 3. Build UI Docker Image + +Build the frontend Docker image via below command: + +```bash +cd GenAIExamples/DocSum/docker/ui/ +docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/DocSum/tests/tests.yaml b/DocSum/tests/tests.yaml deleted file mode 100644 index 9ff4feac1..000000000 --- a/DocSum/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/docsum:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import DocSumGateway, MicroService, ServiceOrchestrator, ServiceType" - entrypoint: "python" diff --git a/SearchQnA/.actions.json b/SearchQnA/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/SearchQnA/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/SearchQnA/docker-compose.yaml b/SearchQnA/docker-compose.yaml deleted file mode 100644 index 7c12965ae..000000000 --- a/SearchQnA/docker-compose.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for Translation -services: - translation: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/translation:${GITHUB_RUN_NUMBER:-latest} - translation-ui: # This image isn't being utilized, but does exist. - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: translation - image: ${REGISTRY:-opea}/translation-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/SearchQnA/docker/gaudi/README.md b/SearchQnA/docker/gaudi/README.md index 073cf8bec..5073c4725 100644 --- a/SearchQnA/docker/gaudi/README.md +++ b/SearchQnA/docker/gaudi/README.md @@ -48,14 +48,22 @@ docker build --no-cache -f Dockerfile-hpu -t opea/tei-gaudi:latest . cd ../.. ``` -### 7. Build MegaService Docker Images +### 7. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/SearchQnA -docker compose build searchqna searchqna-ui +cd GenAIExamples/SearchQnA/docker +docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +cd ../../.. +``` + +Then you need to build the last Docker image `opea/searchqna:latest`, which represents the Mega service through following commands: + +```bash +cd GenAIExamples/SearchQnA/docker +docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . ``` Then run the command `docker images`, you will have @@ -66,7 +74,6 @@ Then run the command `docker images`, you will have 4. `opea/reranking-tei:latest` 5. `opea/llm-tgi:latest` 6. `opea/searchqna:latest` -7. `opea/searchqna-ui:latest` ## 🚀 Set the environment variables diff --git a/SearchQnA/docker/xeon/README.md b/SearchQnA/docker/xeon/README.md index 9dd20c030..b615da255 100644 --- a/SearchQnA/docker/xeon/README.md +++ b/SearchQnA/docker/xeon/README.md @@ -35,14 +35,15 @@ docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$ht docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 6. Build MegaService Docker Images +### 6. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `searchqna.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples.git -cd GenAIExamples/SearchQnA -docker compose build searchqna searchqna-ui +cd GenAIExamples/SearchQnA/docker +docker build --no-cache -t opea/searchqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +cd ../../.. ``` Then run the command `docker images`, you will have following images ready: diff --git a/SearchQnA/tests/tests.yaml b/SearchQnA/tests/tests.yaml deleted file mode 100644 index d64773513..000000000 --- a/SearchQnA/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/searchqna:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import MicroService, SearchQnAGateway, ServiceOrchestrator, ServiceType" - entrypoint: "python" diff --git a/Translation/.actions.json b/Translation/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/Translation/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/Translation/docker-compose.yaml b/Translation/docker-compose.yaml deleted file mode 100644 index 60a68010d..000000000 --- a/Translation/docker-compose.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for SearchQnA -services: - searchqna: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./docker - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/searchqna:${GITHUB_RUN_NUMBER:-latest} - searchqna-ui: - build: - context: ./docker/ui - dockerfile: ./docker/Dockerfile - extends: searchqna - image: ${REGISTRY:-opea}/searchqna-ui:${GITHUB_RUN_NUMBER:-latest} diff --git a/Translation/docker/gaudi/README.md b/Translation/docker/gaudi/README.md index 6f361d8b0..b16fcaa8a 100644 --- a/Translation/docker/gaudi/README.md +++ b/Translation/docker/gaudi/README.md @@ -17,14 +17,23 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 2. Build MegaService Docker Images +### 2. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `translation.py` Python script. Build the MegaService Docker images using the command below: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `translation.py` Python script. Build the MegaService Docker image using the command below: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/Translation -docker compose build translation translation-ui +cd GenAIExamples/Translation/docker +docker build -t opea/translation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 3. Build UI Docker Image + +Construct the frontend Docker image using the command below: + +```bash +cd GenAIExamples/Translation/docker/ui/ +docker build -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following four Docker Images: @@ -33,8 +42,6 @@ Then run the command `docker images`, you will have the following four Docker Im 2. `opea/gen-ai-comps:llm-tgi-gaudi-server` 3. `opea/gen-ai-comps:translation-megaservice-server` 4. `opea/gen-ai-comps:translation-ui-server` -5. `opea/translation:latest` -6. `opea/trsaslation-ui:latest` ## 🚀 Start Microservices diff --git a/Translation/docker/xeon/README.md b/Translation/docker/xeon/README.md index 783dbdbba..ee01e9079 100644 --- a/Translation/docker/xeon/README.md +++ b/Translation/docker/xeon/README.md @@ -25,14 +25,23 @@ cd GenAIComps docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . ``` -### 2. Build MegaService Docker Images +### 2. Build MegaService Docker Image -To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `translation.py` Python script. Build MegaService Docker images via below command: +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `translation.py` Python script. Build MegaService Docker image via below command: ```bash git clone https://github.com/opea-project/GenAIExamples -cd GenAIExamples/Translation -docker compose build translation translation-ui +cd GenAIExamples/Translation/docker +docker build -t opea/translation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 3. Build UI Docker Image + +Build frontend Docker image via below command: + +```bash +cd GenAIExamples/Translation/docker/ui +docker build -t opea/translation-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: @@ -40,8 +49,6 @@ Then run the command `docker images`, you will have the following Docker Images: 1. `opea/gen-ai-comps:llm-tgi-server` 2. `opea/gen-ai-comps:translation-megaservice-server` 3. `opea/gen-ai-comps:translation-ui-server` -4. `opea/translation:latest` -5. `opea/trsaslation-ui:latest` ## 🚀 Start Microservices diff --git a/Translation/tests/tests.yaml b/Translation/tests/tests.yaml deleted file mode 100644 index 51df659b7..000000000 --- a/Translation/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/translation:${GITHUB_RUN_NUMBER:-0} - cmd: -c "from comps import MicroService, ServiceOrchestrator, ServiceType, TranslationGateway" - entrypoint: "python" diff --git a/VisualQnA/.actions.json b/VisualQnA/.actions.json deleted file mode 100644 index 72b9b599d..000000000 --- a/VisualQnA/.actions.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "experimental": [true], - "runner_label": ["ubuntu-latest"] -} diff --git a/VisualQnA/README.md b/VisualQnA/README.md index da88ffc55..c00b0d081 100644 --- a/VisualQnA/README.md +++ b/VisualQnA/README.md @@ -23,13 +23,14 @@ This example guides you through how to deploy a [LLaVA](https://llava-vl.github. 1. Build the Docker image needed for starting the service ``` -docker compose build +cd serving/ +docker build . --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${http_proxy} -t intel/gen-ai-examples:llava-gaudi ``` 2. Start the LLaVA service on Intel Gaudi2 ``` -docker run -d -p 8085:8000 -v ./data:/root/.cache/huggingface/hub/ -e http_proxy=$http_proxy -e https_proxy=$http_proxy --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host opea/llava-gaudi:latest +docker run -d -p 8085:8000 -v ./data:/root/.cache/huggingface/hub/ -e http_proxy=$http_proxy -e https_proxy=$http_proxy --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host intel/gen-ai-examples:llava-gaudi ``` Here are some explanation about the above parameters: diff --git a/VisualQnA/docker-compose.yaml b/VisualQnA/docker-compose.yaml deleted file mode 100644 index e6a420ef2..000000000 --- a/VisualQnA/docker-compose.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Container CI for VisualQnA -services: - visualqna: - build: - args: - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - no_proxy: "" - context: ./serving - dockerfile: Dockerfile - image: ${REGISTRY:-opea}/visualqna:${GITHUB_RUN_NUMBER:-latest} diff --git a/VisualQnA/tests/test_basic_inference.sh b/VisualQnA/tests/test_basic_inference.sh index 92c279445..4fdf6969c 100644 --- a/VisualQnA/tests/test_basic_inference.sh +++ b/VisualQnA/tests/test_basic_inference.sh @@ -7,15 +7,17 @@ set -xe function test_env_setup() { WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests/inference.log" + CONTAINER_NAME="test-llava-gaudi-service" cd $WORKPATH } function launch_llava_service() { cd ${WORKPATH} + cd serving/ local port=8855 - docker compose build - docker run -d --name=visualqna -p ${port}:8000 -v ~/.cache/huggingface/hub/:/root/.cache/huggingface/hub/ -e http_proxy=$http_proxy -e https_proxy=$http_proxy \ - --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host opea/visualqna:latest + docker build . --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${http_proxy} -t intel/gen-ai-examples:${CONTAINER_NAME} + docker run -d --name=${CONTAINER_NAME} -p ${port}:8000 -v ~/.cache/huggingface/hub/:/root/.cache/huggingface/hub/ -e http_proxy=$http_proxy -e https_proxy=$http_proxy \ + --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host intel/gen-ai-examples:${CONTAINER_NAME} sleep 3m } @@ -43,20 +45,21 @@ function check_response() { } function docker_stop() { - cid=$(docker ps -aq --filter "name=visualqna") + local container_name=$1 + cid=$(docker ps -aq --filter "name=$container_name") if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi } function main() { test_env_setup - docker_stop visualqna && sleep 5s + docker_stop $CONTAINER_NAME && sleep 5s launch_llava_service run_tests check_response - docker_stop visualqna && sleep 5s + docker_stop $CONTAINER_NAME && sleep 5s echo y | docker system prune } diff --git a/VisualQnA/tests/tests.yaml b/VisualQnA/tests/tests.yaml deleted file mode 100644 index f8ff3b1b6..000000000 --- a/VisualQnA/tests/tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# https://github.com/intel/ai-containers/tree/main/test-runner -import-comps: - img: ${REGISTRY}/visualqna:${GITHUB_RUN_NUMBER:-0} - cmd: llava_server.py -h - entrypoint: "python"