Skip to content

Commit

Permalink
Merge pull request #420 from Pingdred/tool_agent_fix
Browse files Browse the repository at this point in the history
Fix use of the fake tool `none_of_the_others`
  • Loading branch information
pieroit authored Aug 21, 2023
2 parents 0fd0734 + 2055709 commit e2d94d2
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 39 deletions.
76 changes: 43 additions & 33 deletions core/cat/looking_glass/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,47 +124,57 @@ def execute_agent(self, agent_input):
# " ".join([prompt_prefix, prompt_format_instructions, prompt_suffix]))


# Try to get information from tools if there is some allowed
allowed_tools = mad_hatter.execute_hook("agent_allowed_tools")
tools_result = None

# Try to get information from tools if there is some allowed
if len(allowed_tools) > 0:

log(f"{len(allowed_tools)} allowed tools retrived.", "DEBUG")

try:
tools_result = self.execute_tool_agent(agent_input, allowed_tools)

# If tools_result["output"] is None the LLM has used the fake tool none_of_the_others
# so no relevant information has been obtained from the tools.
if tools_result["output"] != None:

# Extract of intermediate steps in the format ((tool_name, tool_input), output)
used_tools = list(map(lambda x:((x[0].tool, x[0].tool_input), x[1]), tools_result["intermediate_steps"]))

# Get the name of the tools that have return_direct
return_direct_tools = []
for t in allowed_tools:
if t.return_direct:
return_direct_tools.append(t.name)

# execute_tool_agent returns immediately when a tool with return_direct is called,
# so if one is used it is definitely the last one used
if used_tools[-1][0][0] in return_direct_tools:
# intermediate_steps still contains the information of all the tools used even if their output is not returned
tools_result["intermediate_steps"] = used_tools
return tools_result

#Adding the tools_output key in agent input, needed by the memory chain
agent_input["tools_output"] = "## Tools output: \n" + tools_result["output"] if tools_result["output"] else ""

# Execute the memory chain
out = self.execute_memory_chain(agent_input, prompt_prefix, prompt_suffix)

# If some tools are used the intermediate step are added to the agent output
out["intermediate_steps"] = used_tools

#Early return
return out

except Exception as e:
error_description = str(e)
log(error_description, "ERROR")

#If an exeption occur in the execute_tool_agent or there is no allowed tools execute only the memory chain

#Adding the tools_output key in agent input, needed by the memory chain
if tools_result != None:

# Extract of intermediate steps in the format ((tool_name, tool_input), output)
used_tools = list(map(lambda x:((x[0].tool, x[0].tool_input), x[1]), tools_result["intermediate_steps"]))

# Get the name of the tools that have return_direct
return_direct_tools = []
for t in allowed_tools:
if t.return_direct:
return_direct_tools.append(t.name)

# execute_tool_agent returns immediately when a tool with return_direct is called,
# so if one is used it is definitely the last one used
if used_tools[-1][0][0] in return_direct_tools:
# intermediate_steps still contains the information of all the tools used even if their output is not returned
tools_result["intermediate_steps"] = used_tools
return tools_result

# If tools_result["output"] is None the LLM has used the fake tool none_of_the_others
# so no relevant information has been obtained from the tools.
agent_input["tools_output"] = "## Tools output: \n" + tools_result["output"] if tools_result["output"] else ""

# Execute the memory chain
out = self.execute_memory_chain(agent_input, prompt_prefix, prompt_suffix)

# If some tools are used the intermediate step are added to the agent output
out["intermediate_steps"] = used_tools
else:
agent_input["tools_output"] = ""
# Execute the memory chain
out = self.execute_memory_chain(agent_input, prompt_prefix, prompt_suffix)
agent_input["tools_output"] = ""
# Execute the memory chain
out = self.execute_memory_chain(agent_input, prompt_prefix, prompt_suffix)

return out
2 changes: 1 addition & 1 deletion core/cat/looking_glass/cheshire_cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def __call__(self, user_message_json):
# We grab the LLM output here anyway, so small and
# non instruction-fine-tuned models can still be used.
error_description = str(e)
log("LLM does not respect prompt instructions", "ERROR")

log(error_description, "ERROR")
if not "Could not parse LLM output: `" in error_description:
raise e
Expand Down
10 changes: 5 additions & 5 deletions core/cat/looking_glass/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,17 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if not match:
raise OutputParserException(f"Could not parse LLM output: `{llm_output}`")

# Check if agent decidet not tool is usefull
if "none_of_the_others" in llm_output:
# Extract action
action = match.group(1).strip()
action_input = match.group(2)

if action == "none_of_the_others":
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": None},
log=llm_output,
)

# Extract action
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)

0 comments on commit e2d94d2

Please sign in to comment.