Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Oct 9, 2024
1 parent 1d437cc commit b9914d0
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
9 changes: 6 additions & 3 deletions comps/cores/mega/micro_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,21 @@

import asyncio
import multiprocessing
import requests
import time
from typing import Any, List, Optional, Type

import requests

from ..proto.docarray import TextDoc
from .constants import ServiceRoleType, ServiceType
from .utils import check_ports_availability
from .logger import CustomLogger
from .utils import check_ports_availability

opea_microservices = {}

logger = CustomLogger("micro-service")


class MicroService:
"""MicroService class to create a microservice."""

Expand Down Expand Up @@ -56,7 +59,7 @@ def __init__(
else:
logger.info(f"LLM endpoint is ready - but error status code - {llm_endpoint}")
success = True

except requests.exceptions.RequestException as e:
logger.info(f"Error: {e} - {llm_endpoint}")
time.sleep(2.5)
Expand Down
2 changes: 1 addition & 1 deletion comps/llms/text-generation/vllm/llama_index/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def post_process_text(text: str):
endpoint="/v1/chat/completions",
host="0.0.0.0",
port=9000,
llm_endpoint = os.getenv("vLLM_ENDPOINT", "http://localhost:8008"),
llm_endpoint=os.getenv("vLLM_ENDPOINT", "http://localhost:8008"),
)
def llm_generate(input: LLMParamsDoc):
if logflag:
Expand Down

0 comments on commit b9914d0

Please sign in to comment.