diff --git a/.github/workflows/build-and-push-to-ghcr.yml b/.github/workflows/build-and-push-to-ghcr.yml new file mode 100644 index 0000000..02e3835 --- /dev/null +++ b/.github/workflows/build-and-push-to-ghcr.yml @@ -0,0 +1,46 @@ +name: Build and push to GHCR +on: + push: + branches: [main] + pull_request: +env: + GHCR_URL: ghcr.io/coqui-ai/xtts-streaming-server +jobs: + build-and-push-to-ghcr: + runs-on: ubuntu-22.04 + steps: + - + name: Checkout + uses: actions/checkout@v3 + + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: 'Login to GitHub Container Registry' + run: | + set -xe + docker login --username ${{ github.actor }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io + + - name: Build only for PR + if: github.ref != 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: "{{defaultContext}}:server" + file: Dockerfile + push: false # Do not push image for PR + cache-from: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest;type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-pr-${{ github.event.number }} + cache-to: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-pr-${{ github.event.number }} + + - name: Build and Push image + if: github.ref == 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: "{{defaultContext}}:server" + file: Dockerfile + push: true # Push if merged + cache-from: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest + cache-to: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest + tags: coqui-ai/xtts-streaming-server:latest, coqui-ai/xtts-streaming-server:${{ env.RELEASE_VERSION }}-${{ github.sha }} + #build-args: + diff --git a/server/Dockerfile b/server/Dockerfile index ea97d9d..422ff53 100644 --- a/server/Dockerfile +++ b/server/Dockerfile @@ -5,5 +5,6 @@ RUN python -m pip install -r requirements.txt --use-deprecated=legacy-resolver COPY main.py . +ENV NUM_THREADS=8 EXPOSE 80 CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"] diff --git a/server/main.py b/server/main.py index 56bd94d..f293728 100644 --- a/server/main.py +++ b/server/main.py @@ -18,18 +18,22 @@ from TTS.utils.generic_utils import get_user_data_dir from TTS.utils.manage import ModelManager -torch.set_num_threads(8) +torch.set_num_threads(int(os.environ.get("NUM_THREADS", "8"))) device = torch.device("cuda") model_name = "tts_models/multilingual/multi-dataset/xtts_v1.1" +print("Downloading XTTS Model:",model_name) ModelManager().download_model(model_name) model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--")) +print("XTTS Model downloaded") +print("Loading XTTS") config = XttsConfig() config.load_json(os.path.join(model_path, "config.json")) model = Xtts.init_from_config(config) model.load_checkpoint(config, checkpoint_dir=model_path, eval=True, use_deepspeed=True) model.to(device) +print("XTTS Loaded.") ##### Run fastapi ##### app = FastAPI( diff --git a/server/requirements.txt b/server/requirements.txt index 4895c2e..8a3ad11 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -1,6 +1,8 @@ -TTS==0.18.2 +TTS==0.19.1 uvicorn[standard]==0.23.2 -fastapi==0.104.0 -deepspeed==0.8.3 +fastapi==0.95.2 +deepspeed==0.10.3 pydantic==1.10.13 python-multipart==0.0.6 +typing-extensions>=4.8.0 +numpy==1.24.3 \ No newline at end of file