Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add basic support for integration tools to ToolStep #519

Merged
merged 7 commits into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions agents-api/agents_api/activities/execute_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from typing import Any

from beartype import beartype
from temporalio import activity

from ..autogen.openapi_model import IntegrationDef
from ..common.protocol.tasks import StepContext
from ..env import testing
from ..models.tools import get_tool_args_from_metadata


@beartype
async def execute_integration(
context: StepContext,
tool_name: str,
integration: IntegrationDef,
arguments: dict[str, Any],
) -> Any:
developer_id = context.execution_input.developer_id
agent_id = context.execution_input.agent.id
task_id = context.execution_input.task.id

merged_tool_args = get_tool_args_from_metadata(
developer_id=developer_id, agent_id=agent_id, task_id=task_id
)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

need to also merge in integration.arguments

arguments = merged_tool_args.get(tool_name, {}) | arguments

try:
if integration.provider == "dummy":
return arguments

else:
raise NotImplementedError(
f"Unknown integration provider: {integration.provider}"
)
except BaseException as e:
if activity.in_activity():
activity.logger.error(f"Error in execute_integration: {e}")

raise


async def mock_execute_integration(
context: StepContext,
tool_name: str,
integration: IntegrationDef,
arguments: dict[str, Any],
) -> Any:
return arguments


execute_integration = activity.defn(name="execute_integration")(
execute_integration if not testing else mock_execute_integration
)
16 changes: 14 additions & 2 deletions agents-api/agents_api/activities/task_steps/prompt_step.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from beartype import beartype
from temporalio import activity
from temporalio.exceptions import ApplicationError

from ...clients import (
litellm, # We dont directly import `acompletion` so we can mock it
Expand Down Expand Up @@ -63,18 +64,29 @@ async def prompt_step(context: StepContext) -> StepOutcome:
else:
passed_settings: dict = {}

# Wrap the prompt in a list if it is not already
if isinstance(prompt, str):
prompt = [{"role": "user", "content": prompt}]

completion_data: dict = {
"model": agent_model,
"tools": formatted_agent_tools or None,
("messages" if isinstance(prompt, list) else "prompt"): prompt,
"messages": prompt,
**agent_default_settings,
**passed_settings,
}

response = await litellm.acompletion(
**completion_data,
)

if context.current_step.unwrap:
if response.choices[0].finish_reason == "tool_calls":
raise ApplicationError("Tool calls cannot be unwrapped")

response = response.choices[0].message.content

return StepOutcome(
output=response.model_dump(),
output=response.model_dump() if hasattr(response, "model_dump") else response,
next=None,
)
22 changes: 13 additions & 9 deletions agents-api/agents_api/activities/task_steps/tool_call_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@

from beartype import beartype
from temporalio import activity
from temporalio.exceptions import ApplicationError

from ...activities.task_steps import base_evaluate
from ...autogen.openapi_model import ToolCallStep
from ...activities.task_steps.base_evaluate import base_evaluate
from ...autogen.openapi_model import Tool, ToolCallStep
from ...common.protocol.tasks import (
StepContext,
StepOutcome,
Expand All @@ -26,24 +27,27 @@ def generate_call_id():
async def tool_call_step(context: StepContext) -> StepOutcome:
assert isinstance(context.current_step, ToolCallStep)

tool_type, tool_name = context.current_step.tool.split(".")
tools: list[Tool] = context.tools
tool_name = context.current_step.tool

tool = next((t for t in tools if t.name == tool_name), None)

if tool is None:
raise ApplicationError(f"Tool {tool_name} not found in the toolset")

arguments = await base_evaluate(
context.current_step.arguments, context.model_dump()
)

tools = context.execution_input.tools

assert tool_name in [tool.name for tool in tools], f"Tool {tool_name} not found"

call_id = generate_call_id()

tool_call = {
tool_type: {
tool.type: {
"arguments": arguments,
"name": tool_name,
},
"id": call_id,
"type": tool_type,
"type": tool.type,
}

return StepOutcome(output=tool_call)
10 changes: 10 additions & 0 deletions agents-api/agents_api/autogen/Common.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,16 @@
from pydantic import AwareDatetime, BaseModel, ConfigDict, Field, RootModel


class JinjaTemplate(RootModel[str]):
model_config = ConfigDict(
populate_by_name=True,
)
root: str
"""
A valid jinja template.
"""


class Limit(RootModel[int]):
model_config = ConfigDict(
populate_by_name=True,
Expand Down
50 changes: 50 additions & 0 deletions agents-api/agents_api/autogen/Sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,16 @@ class CreateSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)

If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -70,6 +80,16 @@ class PatchSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)

If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -97,6 +117,16 @@ class Session(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)

If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})]
metadata: dict[str, Any] | None = None
created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})]
Expand Down Expand Up @@ -160,6 +190,16 @@ class UpdateSessionRequest(BaseModel):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)

If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down Expand Up @@ -194,6 +234,16 @@ class CreateOrUpdateSessionRequest(CreateSessionRequest):
"""
Action to start on context window overflow
"""
forward_tool_results: StrictBool | None = None
"""
Whether to forward the tool results to the model when available.
"true" => always forward
"false" => never forward
null => forward if applicable (default)

If a tool call is made, the tool's output will be sent back to the model as the model's input.
If a tool call is not made, the model's output will be returned as is.
"""
metadata: dict[str, Any] | None = None


Expand Down
Loading
Loading