Skip to content

Commit

Permalink
🐛 Bug: Fixed the bug where groq cannot switch models.
Browse files Browse the repository at this point in the history
✨ Feature: Add supports for llama3-70b-8192 model of groq.

💻 Code: Upgrade TikToken version to 0.6.0
  • Loading branch information
yym68686 committed Apr 22, 2024
1 parent b085fb0 commit 75d270f
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 15 deletions.
5 changes: 3 additions & 2 deletions bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ async def command_bot(update, context, language=None, prompt=translator_prompt,
}
}
)
# print("robot", robot)
await context.bot.send_chat_action(chat_id=chatid, action=ChatAction.TYPING)
await getChatGPT(update, context, title, robot, message, chatid, messageid)
else:
Expand Down Expand Up @@ -170,11 +171,10 @@ async def getChatGPT(update, context, title, robot, message, chatid, messageid):
reply_to_message_id=messageid,
)
messageid = message.message_id
get_answer = robot.ask_stream
pass_history = config.PASS_HISTORY

try:
for data in get_answer(text, convo_id=str(chatid), pass_history=pass_history):
for data in robot.ask_stream(text, convo_id=str(chatid), pass_history=pass_history):
if "🌐" not in data:
result = result + data
tmpresult = result
Expand Down Expand Up @@ -310,6 +310,7 @@ async def button_press(update, context):
data = callback_query.data
if "gpt-" in data or "claude" in data or "mixtral" in data or "llama" in data or "gemini" in data or (config.CUSTOM_MODELS and data in config.CUSTOM_MODELS):
config.GPT_ENGINE = data
# print("config.GPT_ENGINE", config.GPT_ENGINE)
if (config.API and "gpt-" in data) or (config.API and not config.ClaudeAPI) or (config.API and config.CUSTOM_MODELS and data in config.CUSTOM_MODELS):
config.ChatGPTbot = GPT(api_key=f"{config.API}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature)
config.ChatGPTbot.reset(convo_id=str(update.effective_chat.id), system_prompt=config.systemprompt)
Expand Down
7 changes: 2 additions & 5 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,7 @@
from utils.chatgpt2api import Chatbot as GPT
from utils.chatgpt2api import Imagebot, claudebot, groqbot, claude3bot, gemini_bot
if API:
try:
ChatGPTbot = GPT(api_key=f"{API}", engine=GPT_ENGINE, system_prompt=systemprompt, temperature=temperature)
except:
ChatGPTbot = GPT(api_key=f"{API}", engine="gpt-3.5-turbo-1106", system_prompt=systemprompt, temperature=temperature)
ChatGPTbot = GPT(api_key=f"{API}", engine=GPT_ENGINE, system_prompt=systemprompt, temperature=temperature)

translate_bot = GPT(api_key=f"{API}", engine=GPT_ENGINE, system_prompt=systemprompt, temperature=temperature)
copilot_bot = GPT(api_key=f"{API}", engine=GPT_ENGINE, system_prompt=prompt.search_system_prompt.format(LANGUAGE), temperature=temperature)
Expand Down Expand Up @@ -150,7 +147,7 @@ def create_buttons(strings):
if GROQ_API_KEY:
initial_model.extend([
"mixtral-8x7b-32768",
"llama2-70b-4096",
"llama3-70b-8192",
])
if GOOGLE_AI_API_KEY:
initial_model.extend([
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
--index-url https://pypi.python.org/simple/
tiktoken
requests
tiktoken==0.6.0
md2tgmd==0.1.2
# jieba
python-dotenv
Expand Down
17 changes: 10 additions & 7 deletions utils/chatgpt2api.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
"gpt-4-turbo-2024-04-09",
"mixtral-8x7b-32768",
"llama2-70b-4096",
"llama3-70b-8192",
"claude-2.1",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
Expand Down Expand Up @@ -177,7 +178,7 @@ def ask_stream(
}

json_post = {
"model": os.environ.get("MODEL_NAME") or model or self.engine,
"model": model or self.engine,
"prompt": self.conversation.Conversation(convo_id) if pass_history else f"\n\nHuman:{prompt}\n\nAssistant:",
"stream": True,
"temperature": kwargs.get("temperature", self.temperature),
Expand Down Expand Up @@ -335,7 +336,7 @@ def ask_stream(
}

json_post = {
"model": os.environ.get("MODEL_NAME") or model or self.engine,
"model": model or self.engine,
"messages": self.conversation[convo_id] if pass_history else [{
"role": "user",
"content": prompt
Expand Down Expand Up @@ -711,7 +712,7 @@ def get_post_body(
**kwargs,
):
json_post_body = {
"model": os.environ.get("MODEL_NAME") or model or self.engine,
"model": model or self.engine,
"messages": self.conversation[convo_id] if pass_history else [{"role": "system","content": self.system_prompt},{"role": role, "content": prompt}],
"max_tokens": 5000,
"stream": True,
Expand Down Expand Up @@ -1110,7 +1111,7 @@ class groqbot:
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "mixtral-8x7b-32768",
engine: str = os.environ.get("GPT_ENGINE") or "llama3-70b-8192",
temperature: float = 0.5,
top_p: float = 1,
chat_url: str = "https://api.groq.com/openai/v1/chat/completions",
Expand Down Expand Up @@ -1221,13 +1222,15 @@ def ask_stream(
"role": "user",
"content": prompt
}],
"model": os.environ.get("GPT_ENGINE") or model or self.engine,
"model": model or self.engine,
"temperature": kwargs.get("temperature", self.temperature),
"max_tokens": model_max_tokens,
"top_p": kwargs.get("top_p", self.top_p),
"stop": None,
"stream": True,
}
# print("json_post", json_post)
# print(os.environ.get("GPT_ENGINE"), model, self.engine)

try:
response = self.session.post(
Expand Down Expand Up @@ -1288,7 +1291,7 @@ class gemini_bot:
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "claude-3-opus-20240229",
engine: str = os.environ.get("GPT_ENGINE") or "gemini-1.5-pro-latest",
temperature: float = 0.5,
top_p: float = 0.7,
chat_url: str = "https://generativelanguage.googleapis.com/v1beta/models/{model}:{stream}?key={api_key}",
Expand Down Expand Up @@ -1417,7 +1420,7 @@ def ask_stream(
}
print(json.dumps(json_post, indent=4, ensure_ascii=False))

url = self.chat_url.format(model=os.environ.get("MODEL_NAME") or model or self.engine, stream="streamGenerateContent", api_key=self.api_key)
url = self.chat_url.format(model=model or self.engine, stream="streamGenerateContent", api_key=self.api_key)

try:
response = self.session.post(
Expand Down

0 comments on commit 75d270f

Please sign in to comment.