Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: tools refactor, started work on subagent #79

Merged
merged 17 commits into from
Aug 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,14 @@ clean-test:
echo $$HOME/.local/share/gptme/logs/*test-*-test_*
rm -I $$HOME/.local/share/gptme/logs/*test-*-test_*/*.jsonl || true
rm --dir $$HOME/.local/share/gptme/logs/*test-*-test_*/ || true

cloc: cloc-core cloc-tools

cloc-core:
cloc gptme/*.py

cloc-tools:
cloc gptme/tools

cloc-tests:
cloc tests/*.py
2 changes: 1 addition & 1 deletion eval/filestore.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def download(self) -> Files:
if path.is_file():
key = str(path.relative_to(self.working_dir))
try:
with open(path, "r") as f:
with open(path) as f:
files[key] = f.read()
except UnicodeDecodeError:
# file is binary
Expand Down
5 changes: 3 additions & 2 deletions eval/types.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from dataclasses import dataclass
from typing import Callable, Dict, TypedDict
from typing import TypedDict
from collections.abc import Callable

Files = Dict[str, str | bytes]
Files = dict[str, str | bytes]


@dataclass
Expand Down
5 changes: 4 additions & 1 deletion gptme/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,9 @@ def chat(
"""
Run the chat loop.

prompt_msgs: list of messages to execute in sequence.
initial_msgs: list of history messages.

Callable from other modules.
"""
# init
Expand Down Expand Up @@ -233,7 +236,7 @@ def chat(
codeblock = log.get_last_code_block("assistant", history=1, content=False)
if not (codeblock and is_supported_codeblock(codeblock)):
logger.info("Non-interactive and exhausted prompts, exiting")
exit(0)
break

# ask for input if no prompt, generate reply, and run tools
for msg in step(log, no_confirm, model, stream=stream): # pragma: no cover
Expand Down
6 changes: 5 additions & 1 deletion gptme/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from rich import print
from rich.console import Console
from rich.syntax import Syntax
from tomlkit._utils import escape_string
from typing_extensions import Self

from .constants import ROLE_COLOR
Expand Down Expand Up @@ -80,7 +81,10 @@ def to_toml(self) -> str:
flags_toml = "\n".join(f"{flag} = true" for flag in flags)

# doublequotes need to be escaped
content = self.content.replace('"', '\\"')
# content = self.content.replace('"', '\\"')
content = escape_string(self.content)
content = content.replace("\\n", "\n")

return f'''[message]
role = "{self.role}"
content = """
Expand Down
23 changes: 14 additions & 9 deletions gptme/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,7 @@ class _ModelDictMeta(TypedDict):
# TODO: can we get this from the API?
MODELS: dict[str, dict[str, _ModelDictMeta]] = {
"openai": {
"gpt-4": {
"context": 8193,
# 0.03 USD per 1k input tokens
# 0.06 USD per 1k output tokens
"price_input": 0.03,
"price_output": 0.06,
},
# gpt-3.5
"gpt-3.5-turbo": {
"context": 4097,
# 0.001 USD per 1k input tokens
Expand All @@ -51,6 +45,17 @@ class _ModelDictMeta(TypedDict):
"gpt-3.5-turbo-16k": {
"context": 16385,
},
"gpt-3.5-turbo-1106": {
"context": 16385,
},
# gpt-4 (non-turbo, small context)
"gpt-4": {
"context": 8193,
# 0.03 USD per 1k input tokens
# 0.06 USD per 1k output tokens
"price_input": 0.03,
"price_output": 0.06,
},
# gpt-4-turbo
# https://openai.com/blog/new-models-and-developer-products-announced-at-devday
"gpt-4-1106-preview": {
Expand All @@ -59,8 +64,8 @@ class _ModelDictMeta(TypedDict):
"gpt-4-vision-preview": {
"context": 128_000,
},
"gpt-3.5-turbo-1106": {
"context": 16385,
"gpt-4-turbo": {
"context": 128_000,
},
}
}
Expand Down
62 changes: 38 additions & 24 deletions gptme/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,15 @@

from .config import get_config
from .message import Message
from .tools import browser, patch
from .tools import (
browser,
init_tools,
loaded_tools,
patch,
python,
save,
shell,
)

PromptType = Literal["full", "short"]

Expand Down Expand Up @@ -41,6 +49,8 @@ def prompt_full() -> Generator[Message, None, None]:
yield from prompt_gptme()

yield from prompt_tools()
# Useful in debugging
#yield from prompt_tools_from_spec()
yield from prompt_examples()
yield from prompt_gh()

Expand Down Expand Up @@ -133,6 +143,24 @@ def prompt_code_interpreter() -> Generator[Message, None, None]: # pragma: no c
)


def prompt_tools_from_spec() -> Generator[Message, None, None]:
# TODO: this should be moved to tools.py
# tools must have been initialized by now
init_tools()
prompt = ""
assert loaded_tools, "No tools loaded"
for tool in loaded_tools:
prompt += (
f"""## {tool.name}

{tool.desc.strip()}

{tool.instructions.strip()}""".strip()
+ "\n\n"
)
yield Message("system", prompt.strip())


def prompt_tools() -> Generator[Message, None, None]:
python_libraries = get_installed_python_libraries()
python_libraries_str = "\n".join(f"- {lib}" for lib in python_libraries)
Expand All @@ -147,24 +175,24 @@ def prompt_tools() -> Generator[Message, None, None]:

## python

When you send a message containing Python code (and is not a file block), it will be executed in a stateful environment.
Python will respond with the output of the execution.
{python.instructions}

The following libraries are available:
{python_libraries_str}

The following functions are available in the REPL:
{python.get_functions_prompt()}

## bash

When you send a message containing bash code, it will be executed in a stateful bash shell.
The shell will respond with the output of the execution.
{shell.instructions}

These programs are available, among others:
{shell_programs_str}

## saving files

To save a file, output a code block with a filename on the first line, like "```src/example.py" (a "file block").
It is very important that such blocks begin with a filename, otherwise the code will be executed instead of saved.
{save.instructions}

## patching files

Expand All @@ -191,29 +219,15 @@ def prompt_examples() -> Generator[Message, None, None]:

## bash

> User: learn about the project
```bash
git ls-files
```
> stdout: `README.md`
```bash
cat README.md
```
{shell.examples}

## Python

> User: print hello world
```python
print("Hello world")
```
{python.examples}

## Save files

> User: write a Hello world script to hello.py
```hello.py
print("Hello world")
```
Saved to `hello.py`.
{save.examples}

## Read files

Expand Down
Loading
Loading