forked from autoGLM/funAGI
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chatter.py
71 lines (64 loc) · 2.66 KB
/
chatter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# chatter.py
import openai
from groq import Groq
import logging
class GPT4o:
def __init__(self, openai_api_key):
self.openai_api_key = openai_api_key
openai.api_key = self.openai_api_key
def generate_response(self, knowledge, model="gpt-4o"):
prompt = f"{knowledge}"
try:
response = openai.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "you are openmind the easy action event agi solution creator."},
{"role": "user", "content": prompt}
]
)
decision = response.choices[0].message.content
return decision.lower()
except openai.APIError as e:
logging.error(f"openai api error: {e}")
return "error: unable to generate a response due to an issue with the openai api."
class GroqModel:
def __init__(self, groq_api_key):
self.client = Groq(api_key=groq_api_key)
def generate_response(self, knowledge, model="mixtral-8x7b-32768"):
prompt = f"{knowledge}"
try:
chat_completion = self.client.chat.completions.create(
messages=[
{"role": "system", "content": "you are openmind the easy action event agi solution creator."},
{"role": "user", "content": prompt}
],
model=model,
)
decision = chat_completion.choices[0].message.content
return decision.lower()
except Exception as e:
logging.error(f"groq api error: {e}")
return "error: unable to generate a response due to an issue with the groq api."
class OllamaModel:
def __init__(self):
self.client = OpenAI(
base_url='http://localhost:11434/v1',
api_key='ollama', # required, but unused
)
def generate_response(self, knowledge, model="llama2"):
prompt = [
{"role": "system", "content": "openmindx"},
{"role": "assistant", "content": "agi"},
{"role": "tool", "content": "autonomous general learning model"},
{"role": "user", "content": f"autonomous general intelligence return solution: {knowledge}."}
]
try:
response = self.client.chat.completions.create(
model=model,
messages=prompt
)
decision = response.choices[0].message.content
return decision.lower()
except Exception as e:
logging.error(f"ollama api error: {e}")
return "error: unable to generate a response due to an issue with the ollama api."