From c2b8dc87f6b012e307e1a6a9228c37198f00403f Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 02:37:24 +1100 Subject: [PATCH 01/13] refactor: update FunctionTool to generate OpenAI tool schema This commit refactors the `FunctionTool` class in the `function_tool.py` file. It adds a new method `generate_openai_tool_schema` that uses an optional assistant model to generate the OpenAI tool schema for the specified function. If no assistant model is provided, it defaults to creating a GPT_4O_MINI model. The function's source code is used to generate a docstring and schema, which are validated before returning the final schema. If schema generation or validation fails, the process retries up to two times. This refactor improves the functionality and flexibility of the `FunctionTool` class. --- camel/toolkits/function_tool.py | 156 +++++++++++++++++++++++++++++++- 1 file changed, 155 insertions(+), 1 deletion(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 804bc894e..ce9321296 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== import warnings -from inspect import Parameter, signature +from inspect import Parameter, signature, getsource from typing import Any, Callable, Dict, Mapping, Optional, Tuple from docstring_parser import parse @@ -22,6 +22,12 @@ from pydantic.fields import FieldInfo from camel.utils import get_pydantic_object_schema, to_pascal +from camel.models.base_model import BaseModelBackend +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType +from camel.configs import ChatGPTConfig +from camel.messages import BaseMessage +from camel.agents import ChatAgent def _remove_a_key(d: Dict, remove_key: Any) -> None: @@ -142,6 +148,68 @@ def _create_mol(name, field): } return openai_tool_schema +def docstring_generation( + code: str, + model: BaseModelBackend, +) -> str: + """Generates a docstring for a given function code using the provided model. + + Args: + code (str): The source code of the function. + model (BaseModelBackend): The model used for generating the docstring. + + Returns: + str: The generated docstring. + """ + # Create the docstring prompt + docstring_prompt = ''' + **Role**: Generate professional Python docstrings conforming to PEP 8/PEP 257. + + **Requirements**: + - Use appropriate format: reST, Google, or NumPy, as needed. + - Include parameters, return values, and exceptions. + - Reference any existing docstring in the function and retain useful information. + + **Input**: Python function. + + **Output**: Docstring content (plain text, no code markers). + + **Example:** + + Input: + ```python + def add(a: int, b: int) -> int: + return a + b + ``` + + Output: + Adds two numbers. + Args: + a (int): The first number. + b (int): The second number. + + Returns: + int: The sum of the two numbers. + + **Task**: Generate a docstring for the function below. + + ''' + # Initialize assistant with system message and model + assistant_sys_msg = BaseMessage.make_assistant_message( + role_name="Assistant", + content="You are a helpful assistant.", + ) + docstring_assistant = ChatAgent(assistant_sys_msg, model=model, token_limit=4096) + + # Create user message to prompt the assistant + user_msg = BaseMessage.make_user_message( + role_name="User", + content=docstring_prompt + code, + ) + + # Get the response containing the generated docstring + response = docstring_assistant.step(user_msg) + return response.msg.content class FunctionTool: r"""An abstraction of a function that OpenAI chat models can call. See @@ -162,11 +230,23 @@ def __init__( self, func: Callable, openai_tool_schema: Optional[Dict[str, Any]] = None, + schema_assistant: Optional[BaseModelBackend] = None, ) -> None: self.func = func self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( func ) + try: + self.validate_openai_tool_schema(self.openai_tool_schema) + except Exception as e: + print( + f"Warning: No model provided. Use GPT_4O_MINI to generate the schema \ + for {self.func.__name__}. Attempting to generate one using LLM." + ) + schema = self.generate_openai_tool_schema(schema_assistant) + if schema: + self.openai_tool_schema = schema + @staticmethod def validate_openai_tool_schema( @@ -361,6 +441,80 @@ def set_parameter(self, param_name: str, value: Dict[str, Any]): self.openai_tool_schema["function"]["parameters"]["properties"][ param_name ] = value + + def generate_openai_tool_schema( + self, + schema_assistant: Optional[BaseModelBackend]=None, + ) -> Dict[str, Any]: + r"""Generates an OpenAI tool schema for the specified function. + + This method generates the OpenAI tool schema using the provided + LLM assistant. If no assistant is provided, it defaults + to creating a GPT_4O_MINI model. The function's source code is used + to generate a docstring and schema, which are validated before + returning the final schema. If schema generation or validation fails, + the process retries up to two times. + + Args: + schema_assistant (Optional[BaseModelBackend]): An optional + assistant model to use for schema generation. If not provided, a + GPT_4O_MINI model will be created. + + Returns: + Dict[str, Any]: The generated OpenAI tool schema for the function. + + Raises: + ValueError: If schema generation or validation fails after the + maximum number of retries, a ValueError is raised, prompting manual + schema setting. + """ + if not schema_assistant: + print(f"Warning: No model provided. Use GPT_4O_MINI to generate the schema.") + try: + schema_assistant = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), + ) + except Exception as e: + raise ValueError( + f"Failed to generate the OpenAI tool schema for function " + f"{self.func.__name__}. Please set the OpenAI tool schema manually." + ) from e + + function_string = getsource(self.func) + + max_retries = 2 + retries = 0 + + # Retry loop to handle schema generation and validation + while retries < max_retries: + try: + # Generate the docstring and the schema + docstring = docstring_generation(function_string, schema_assistant) + self.func.__doc__ = docstring + schema = get_openai_tool_schema(self.func) + + # Validate the schema + self.validate_openai_tool_schema(schema) + + print( + f"Successfully generated the OpenAI tool schema for the function " + f"{self.func.__name__}." + ) + return schema + + except Exception as e: + # If maximum retries reached, raise a ValueError + if retries == max_retries: + raise ValueError( + f"Failed to generate the OpenAI tool Schema. Please set " + f"the OpenAI tool schema for function {self.func.__name__} " + f"manually." + ) from e + + print(f"Schema validation failed. Retrying...") + retries += 1 @property def parameters(self) -> Dict[str, Any]: From bde54d884bea51e235f343a58f0dec87c25eb222 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 04:11:29 +1100 Subject: [PATCH 02/13] Add an unit test and an example --- camel/toolkits/__init__.py | 2 + camel/toolkits/function_tool.py | 13 +- .../generate_openai_tool_schema_example.py | 82 +++++++++++ .../test_generate_openai_tool_schema.py | 132 ++++++++++++++++++ 4 files changed, 222 insertions(+), 7 deletions(-) create mode 100644 examples/tool_call/generate_openai_tool_schema_example.py create mode 100644 test/toolkits/test_generate_openai_tool_schema.py diff --git a/camel/toolkits/__init__.py b/camel/toolkits/__init__.py index d1658959c..0a55c0a82 100644 --- a/camel/toolkits/__init__.py +++ b/camel/toolkits/__init__.py @@ -17,6 +17,7 @@ OpenAIFunction, get_openai_function_schema, get_openai_tool_schema, + generate_docstring, ) from .open_api_specs.security_config import openapi_security_config @@ -46,6 +47,7 @@ 'OpenAIFunction', 'get_openai_function_schema', 'get_openai_tool_schema', + 'generate_docstring' 'openapi_security_config', 'GithubToolkit', 'MathToolkit', diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index ce9321296..e8a0eb541 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -148,7 +148,7 @@ def _create_mol(name, field): } return openai_tool_schema -def docstring_generation( +def generate_docstring( code: str, model: BaseModelBackend, ) -> str: @@ -240,8 +240,8 @@ def __init__( self.validate_openai_tool_schema(self.openai_tool_schema) except Exception as e: print( - f"Warning: No model provided. Use GPT_4O_MINI to generate the schema \ - for {self.func.__name__}. Attempting to generate one using LLM." + f"Warning: No model provided. Use GPT_4O_MINI to generate the schema " + f"for {self.func.__name__}. Attempting to generate one using LLM." ) schema = self.generate_openai_tool_schema(schema_assistant) if schema: @@ -491,7 +491,7 @@ def generate_openai_tool_schema( while retries < max_retries: try: # Generate the docstring and the schema - docstring = docstring_generation(function_string, schema_assistant) + docstring = generate_docstring(function_string, schema_assistant) self.func.__doc__ = docstring schema = get_openai_tool_schema(self.func) @@ -505,16 +505,15 @@ def generate_openai_tool_schema( return schema except Exception as e: - # If maximum retries reached, raise a ValueError + retries += 1 if retries == max_retries: raise ValueError( f"Failed to generate the OpenAI tool Schema. Please set " f"the OpenAI tool schema for function {self.func.__name__} " f"manually." ) from e - print(f"Schema validation failed. Retrying...") - retries += 1 + @property def parameters(self) -> Dict[str, Any]: diff --git a/examples/tool_call/generate_openai_tool_schema_example.py b/examples/tool_call/generate_openai_tool_schema_example.py new file mode 100644 index 000000000..f87fbbcbc --- /dev/null +++ b/examples/tool_call/generate_openai_tool_schema_example.py @@ -0,0 +1,82 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== + +from camel.agents import ChatAgent +from camel.configs.openai_config import ChatGPTConfig +from camel.messages import BaseMessage +from camel.models import ModelFactory +from camel.toolkits import FunctionTool +from camel.types import ModelPlatformType, ModelType +import os + +# Set OpenAI API key +api_key = os.getenv("OPENAI_API_KEY") +if not api_key: + raise ValueError("API key not found in environment variables.") + +# Define a function which does't have a docstring +def get_perfect_square(n: int) -> int: + return n ** 2 + +# Create a model instance +model_config_dict = ChatGPTConfig(temperature=1.0).as_dict() +agent_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + model_config_dict=model_config_dict, +) + +# Create a FunctionTool with the function +function_tool = FunctionTool(get_perfect_square, schema_assistant=agent_model) +print("\nGenerated OpenAI Tool Schema:") +print(function_tool.get_openai_tool_schema()) + +# Set system message for the assistant +assistant_sys_msg = BaseMessage.make_assistant_message( + role_name="Assistant", + content="You are a helpful assistant." +) + +# Create a ChatAgent with the tool +camel_agent = ChatAgent( + system_message=assistant_sys_msg, + model=agent_model, + tools=[function_tool] +) +camel_agent.reset() + +# Define a user message +user_prompt = "What is the perfect square of 2024?" +user_msg = BaseMessage.make_user_message( + role_name="User", + content=user_prompt +) + +# Get response from the assistant +response = camel_agent.step(user_msg) +print("\nAssistant Response:") +print(response.msg.content) + +print(""" +=============================================================================== +Warning: No model provided. Use GPT_4O_MINI to generate the schema for get_perfect_square. Attempting to generate one using LLM. +Successfully generated the OpenAI tool schema for the function get_perfect_square. + +Generated OpenAI Tool Schema: +{'type': 'function', 'function': {'name': 'get_perfect_square', 'description': 'Calculates the perfect square of a given integer.', 'parameters': {'properties': {'n': {'type': 'integer', 'description': 'The integer to be squared.'}}, 'required': ['n'], 'type': 'object'}}} + +[FunctionCallingRecord(func_name='get_perfect_square', args={'n': 2024}, +result={'result': 4096576})] +=============================================================================== +""") diff --git a/test/toolkits/test_generate_openai_tool_schema.py b/test/toolkits/test_generate_openai_tool_schema.py new file mode 100644 index 000000000..d7a7cb3dc --- /dev/null +++ b/test/toolkits/test_generate_openai_tool_schema.py @@ -0,0 +1,132 @@ +from unittest.mock import MagicMock, patch +import pytest +from camel.models.base_model import BaseModelBackend +from camel.messages import BaseMessage +from camel.agents import ChatAgent +from camel.toolkits import FunctionTool, generate_docstring + + +def sample_function(a: int, b: str = "default") -> bool: + """ + This function checks if the integer is positive and if the string is non-empty. + + Args: + a (int): The integer value to check. + b (str): The string to verify. Default is "default". + + Returns: + bool: True if both conditions are met, otherwise False. + """ + return a > 0 and len(b) > 0 + +@patch.object(ChatAgent, 'step') +@patch('camel.models.ModelFactory.create') +def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): + # Mock model instance + mock_model_instance = MagicMock() + mock_model_instance.model_config_dict = {} + mock_create_model.return_value = mock_model_instance + + # Mock ChatAgent's step method return value + mock_message = MagicMock() + mock_message.content = ( + "This function checks if the integer is positive and if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, otherwise False." + ) + mock_response = MagicMock() + mock_response.msgs = [mock_message] + mock_response.msg = mock_message + mock_response.terminated = True + mock_chat_agent_step.return_value = mock_response + + # Create FunctionTool instance + function_tool = FunctionTool(func=sample_function) + + # Generate schema + try: + schema = function_tool.generate_openai_tool_schema(schema_assistant=mock_model_instance) + except Exception as e: + pytest.fail(f"generate_openai_tool_schema() raised an exception unexpectedly: {e}") + + if schema is None: + pytest.fail("generate_openai_tool_schema() returned None unexpectedly.") + + # Adjusted expected schema with 'default' key in 'b' parameter + expected_schema = { + 'type': 'function', + 'function': { + 'name': 'sample_function', + 'description': 'This function checks if the integer is positive and if the string is non-empty.', + 'parameters': { + 'type': 'object', + 'properties': { + 'a': { + 'type': 'integer', + 'description': 'The integer value to check.' + }, + 'b': { + 'type': 'string', + 'description': "The string to verify. Default is 'default'.", + 'default': 'default' # Include this line + } + }, + 'required': ['a'] + } + } + } + + assert schema == expected_schema + + +@pytest.fixture +def mock_model(): + # Create a mock model to simulate BaseModelBackend behavior + mock_model = MagicMock(spec=BaseModelBackend) + mock_model.model_type = MagicMock() + mock_model.model_type.value_for_tiktoken = "mock_value_for_tiktoken" + mock_model.model_config_dict = {} + mock_model.value_for_tiktoken = MagicMock(return_value=1000) + return mock_model + +@patch.object(ChatAgent, 'step') +def test_generate_docstring(mock_chat_agent_step, mock_model): + code = """ + def sample_function(a: int, b: str = "default") -> bool: + return a > 0 and len(b) > 0 + """ + + # Ensure mock_model has required attributes + mock_model.model_type = MagicMock() + mock_model.model_type.value_for_tiktoken = "mock_value_for_tiktoken" + mock_model.model_config_dict = {} + mock_model.value_for_tiktoken = MagicMock(return_value=1000) + + # Mock ChatAgent's step method return value + mock_message = MagicMock() + mock_message.content = ( + "This function checks if the integer is positive and if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, otherwise False." + ) + mock_response = MagicMock() + mock_response.msgs = [mock_message] + mock_response.msg = mock_message + mock_response.terminated = True + mock_chat_agent_step.return_value = mock_response + + # Generate docstring + try: + docstring = generate_docstring(code, mock_model) + except AttributeError as e: + pytest.fail(f"generate_docstring() raised AttributeError unexpectedly: {e}") + except RuntimeError as e: + pytest.fail(f"generate_docstring() raised RuntimeError unexpectedly: {e}") + + expected_docstring = ( + "This function checks if the integer is positive and if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, otherwise False." + ) + + assert docstring == expected_docstring \ No newline at end of file From 6d087718728ee72ed66d755366cabed1252ad885 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 16:00:18 +1100 Subject: [PATCH 03/13] Here is the translation: "Adjusted the code to comply with PEP 8 line length standards. Added the `use_schema_assistant` parameter in `FunctionTool` to control whether to use LLM to generate the schema, with a default value of `False`." --- camel/toolkits/__init__.py | 1 - camel/toolkits/function_tool.py | 91 +++++++++++++------ .../generate_openai_tool_schema_example.py | 18 +++- .../test_generate_openai_tool_schema.py | 61 +++++++++---- 4 files changed, 119 insertions(+), 52 deletions(-) diff --git a/camel/toolkits/__init__.py b/camel/toolkits/__init__.py index 0a55c0a82..f55dd84a9 100644 --- a/camel/toolkits/__init__.py +++ b/camel/toolkits/__init__.py @@ -47,7 +47,6 @@ 'OpenAIFunction', 'get_openai_function_schema', 'get_openai_tool_schema', - 'generate_docstring' 'openapi_security_config', 'GithubToolkit', 'MathToolkit', diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index e8a0eb541..53daedf6d 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -152,7 +152,7 @@ def generate_docstring( code: str, model: BaseModelBackend, ) -> str: - """Generates a docstring for a given function code using the provided model. + """Generates a docstring for a given function code using LLM. Args: code (str): The source code of the function. @@ -163,12 +163,14 @@ def generate_docstring( """ # Create the docstring prompt docstring_prompt = ''' - **Role**: Generate professional Python docstrings conforming to PEP 8/PEP 257. + **Role**: Generate professional Python docstrings conforming to + PEP 8/PEP 257. **Requirements**: - Use appropriate format: reST, Google, or NumPy, as needed. - Include parameters, return values, and exceptions. - - Reference any existing docstring in the function and retain useful information. + - Reference any existing docstring in the function and + retain useful information. **Input**: Python function. @@ -199,7 +201,11 @@ def add(a: int, b: int) -> int: role_name="Assistant", content="You are a helpful assistant.", ) - docstring_assistant = ChatAgent(assistant_sys_msg, model=model, token_limit=4096) + docstring_assistant = ChatAgent( + assistant_sys_msg, + model=model, + token_limit=4096 + ) # Create user message to prompt the assistant user_msg = BaseMessage.make_user_message( @@ -224,6 +230,14 @@ class FunctionTool: openai_tool_schema (Optional[Dict[str, Any]], optional): A user-defined openai tool schema to override the default result. (default: :obj:`None`) + schema_assistant (Optional[BaseModelBackend], optional): An assistant + (e.g., an LLM model) used to generate the schema if no valid + schema is provided and use_schema_assistant is enabled. + (default: :obj:`None`) + use_schema_assistant (bool, optional): Whether to enable the use of + the schema_assistant to automatically generate the schema if + validation fails or no valid schema is provided. + (default: :obj:`False`) """ def __init__( @@ -231,21 +245,33 @@ def __init__( func: Callable, openai_tool_schema: Optional[Dict[str, Any]] = None, schema_assistant: Optional[BaseModelBackend] = None, + use_schema_assistant: bool = False, ) -> None: self.func = func - self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( - func - ) - try: - self.validate_openai_tool_schema(self.openai_tool_schema) - except Exception as e: - print( - f"Warning: No model provided. Use GPT_4O_MINI to generate the schema " - f"for {self.func.__name__}. Attempting to generate one using LLM." - ) - schema = self.generate_openai_tool_schema(schema_assistant) - if schema: - self.openai_tool_schema = schema + self.openai_tool_schema = openai_tool_schema + + if not self.openai_tool_schema: + self.openai_tool_schema = get_openai_tool_schema(func) + + if use_schema_assistant: + try: + self.validate_openai_tool_schema(self.openai_tool_schema) + except Exception as e: + print( + f"Warning: No valid schema found " + f"for {self.func.__name__}. " + f"Attempting to generate one using LLM." + ) + schema = self.generate_openai_tool_schema( + schema_assistant + ) + if schema: + self.openai_tool_schema = schema + else: + raise ValueError( + f"Failed to generate valid schema for " + f"{self.func.__name__}" + ) @staticmethod @@ -465,21 +491,25 @@ def generate_openai_tool_schema( Raises: ValueError: If schema generation or validation fails after the - maximum number of retries, a ValueError is raised, prompting manual - schema setting. + maximum number of retries, a ValueError is raised, + prompting manual schema setting. """ if not schema_assistant: - print(f"Warning: No model provided. Use GPT_4O_MINI to generate the schema.") + print( + f"Warning: No model provided. " + f"Use GPT_4O_MINI to generate the schema." + ) try: schema_assistant = ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O_MINI, - model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), + model_config_dict=ChatGPTConfig(temperature=1.0).as_dict() ) except Exception as e: raise ValueError( - f"Failed to generate the OpenAI tool schema for function " - f"{self.func.__name__}. Please set the OpenAI tool schema manually." + f"Failed to generate the OpenAI tool schema for " + f"the function {self.func.__name__}. " + f"Please set the OpenAI tool schema manually." ) from e function_string = getsource(self.func) @@ -491,7 +521,10 @@ def generate_openai_tool_schema( while retries < max_retries: try: # Generate the docstring and the schema - docstring = generate_docstring(function_string, schema_assistant) + docstring = generate_docstring( + function_string, + schema_assistant + ) self.func.__doc__ = docstring schema = get_openai_tool_schema(self.func) @@ -499,8 +532,8 @@ def generate_openai_tool_schema( self.validate_openai_tool_schema(schema) print( - f"Successfully generated the OpenAI tool schema for the function " - f"{self.func.__name__}." + f"Successfully generated the OpenAI tool schema for " + f"the function {self.func.__name__}." ) return schema @@ -508,9 +541,9 @@ def generate_openai_tool_schema( retries += 1 if retries == max_retries: raise ValueError( - f"Failed to generate the OpenAI tool Schema. Please set " - f"the OpenAI tool schema for function {self.func.__name__} " - f"manually." + f"Failed to generate the OpenAI tool Schema. " + f"Please set the OpenAI tool schema for " + f"function {self.func.__name__} manually." ) from e print(f"Schema validation failed. Retrying...") diff --git a/examples/tool_call/generate_openai_tool_schema_example.py b/examples/tool_call/generate_openai_tool_schema_example.py index f87fbbcbc..0c5a63c49 100644 --- a/examples/tool_call/generate_openai_tool_schema_example.py +++ b/examples/tool_call/generate_openai_tool_schema_example.py @@ -38,7 +38,11 @@ def get_perfect_square(n: int) -> int: ) # Create a FunctionTool with the function -function_tool = FunctionTool(get_perfect_square, schema_assistant=agent_model) +function_tool = FunctionTool( + get_perfect_square, + schema_assistant=agent_model, + use_schema_assistant=True +) print("\nGenerated OpenAI Tool Schema:") print(function_tool.get_openai_tool_schema()) @@ -70,11 +74,17 @@ def get_perfect_square(n: int) -> int: print(""" =============================================================================== -Warning: No model provided. Use GPT_4O_MINI to generate the schema for get_perfect_square. Attempting to generate one using LLM. -Successfully generated the OpenAI tool schema for the function get_perfect_square. +Warning: No model provided. Use GPT_4O_MINI to generate the schema for +the function get_perfect_square. Attempting to generate one using LLM. +Successfully generated the OpenAI tool schema for +the function get_perfect_square. Generated OpenAI Tool Schema: -{'type': 'function', 'function': {'name': 'get_perfect_square', 'description': 'Calculates the perfect square of a given integer.', 'parameters': {'properties': {'n': {'type': 'integer', 'description': 'The integer to be squared.'}}, 'required': ['n'], 'type': 'object'}}} +{'type': 'function', 'function': {'name': 'get_perfect_square', +'description': 'Calculates the perfect square of a given integer.', +'parameters': {'properties': {'n': {'type': 'integer', +'description': 'The integer to be squared.'}}, 'required': ['n'], +'type': 'object'}}} [FunctionCallingRecord(func_name='get_perfect_square', args={'n': 2024}, result={'result': 4096576})] diff --git a/test/toolkits/test_generate_openai_tool_schema.py b/test/toolkits/test_generate_openai_tool_schema.py index d7a7cb3dc..3b557c801 100644 --- a/test/toolkits/test_generate_openai_tool_schema.py +++ b/test/toolkits/test_generate_openai_tool_schema.py @@ -8,7 +8,8 @@ def sample_function(a: int, b: str = "default") -> bool: """ - This function checks if the integer is positive and if the string is non-empty. + This function checks if the integer is positive and + if the string is non-empty. Args: a (int): The integer value to check. @@ -30,9 +31,13 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): # Mock ChatAgent's step method return value mock_message = MagicMock() mock_message.content = ( - "This function checks if the integer is positive and if the string is non-empty.\n" - "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" - "Returns:\n bool: True if both conditions are met, otherwise False." + "This function checks if the integer is positive and if the string " + "is non-empty.\n" + "Args:\n" + " a (int): The integer value to check.\n" + " b (str): The string to verify. Default is 'default'.\n" + "Returns:\n" + " bool: True if both conditions are met, otherwise False." ) mock_response = MagicMock() mock_response.msgs = [mock_message] @@ -45,19 +50,26 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): # Generate schema try: - schema = function_tool.generate_openai_tool_schema(schema_assistant=mock_model_instance) + schema = function_tool.generate_openai_tool_schema( + schema_assistant=mock_model_instance + ) except Exception as e: - pytest.fail(f"generate_openai_tool_schema() raised an exception unexpectedly: {e}") + pytest.fail(f"generate_openai_tool_schema() raised an exception: {e}") if schema is None: - pytest.fail("generate_openai_tool_schema() returned None unexpectedly.") + pytest.fail( + "generate_openai_tool_schema() returned None unexpectedly." + ) # Adjusted expected schema with 'default' key in 'b' parameter expected_schema = { 'type': 'function', 'function': { 'name': 'sample_function', - 'description': 'This function checks if the integer is positive and if the string is non-empty.', + 'description': ( + 'This function checks if the integer is positive ' + 'and if the string is non-empty.' + ), 'parameters': { 'type': 'object', 'properties': { @@ -67,8 +79,11 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): }, 'b': { 'type': 'string', - 'description': "The string to verify. Default is 'default'.", - 'default': 'default' # Include this line + 'description': ( + "The string to verify. " + "Default is 'default'." + ), + 'default': 'default' } }, 'required': ['a'] @@ -105,9 +120,12 @@ def sample_function(a: int, b: str = "default") -> bool: # Mock ChatAgent's step method return value mock_message = MagicMock() mock_message.content = ( - "This function checks if the integer is positive and if the string is non-empty.\n" - "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" - "Returns:\n bool: True if both conditions are met, otherwise False." + f"This function checks if the integer is positive and " + f"if the string is non-empty.\n" + f"Args:\n a (int): The integer value to check.\n " + f" b (str): The string to verify. Default is 'default'.\n" + f"Returns:\n bool: True if both conditions are met, " + f"otherwise False." ) mock_response = MagicMock() mock_response.msgs = [mock_message] @@ -119,14 +137,21 @@ def sample_function(a: int, b: str = "default") -> bool: try: docstring = generate_docstring(code, mock_model) except AttributeError as e: - pytest.fail(f"generate_docstring() raised AttributeError unexpectedly: {e}") + pytest.fail( + f"generate_docstring() raised AttributeError unexpectedly: {e}" + ) except RuntimeError as e: - pytest.fail(f"generate_docstring() raised RuntimeError unexpectedly: {e}") + pytest.fail( + f"generate_docstring() raised RuntimeError unexpectedly: {e}" + ) expected_docstring = ( - "This function checks if the integer is positive and if the string is non-empty.\n" - "Args:\n a (int): The integer value to check.\n b (str): The string to verify. Default is 'default'.\n" - "Returns:\n bool: True if both conditions are met, otherwise False." + f"This function checks if the integer is positive and " + f"if the string is non-empty.\n" + f"Args:\n a (int): The integer value to check.\n " + f" b (str): The string to verify. Default is 'default'.\n" + f"Returns:\n bool: True if both conditions are met, " + f"otherwise False." ) assert docstring == expected_docstring \ No newline at end of file From 942b6e47e75d4f2e8cb1c73fc49989b6fa2e2ceb Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 16:09:26 +1100 Subject: [PATCH 04/13] Updated .camel/toolkits/__init__.py according to the result of Pre Commit Check --- camel/toolkits/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/camel/toolkits/__init__.py b/camel/toolkits/__init__.py index f55dd84a9..b7454890f 100644 --- a/camel/toolkits/__init__.py +++ b/camel/toolkits/__init__.py @@ -47,6 +47,7 @@ 'OpenAIFunction', 'get_openai_function_schema', 'get_openai_tool_schema', + "generate_docstring", 'openapi_security_config', 'GithubToolkit', 'MathToolkit', From 21df8fa9ed03150cb9c6163d720252eedbcbc502 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 17:12:06 +1100 Subject: [PATCH 05/13] Modify the usage of openai_tool_schema in FunctionTool.__init__ --- camel/toolkits/function_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 53daedf6d..7fb330634 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -250,7 +250,7 @@ def __init__( self.func = func self.openai_tool_schema = openai_tool_schema - if not self.openai_tool_schema: + if self.openai_tool_schema is not None: self.openai_tool_schema = get_openai_tool_schema(func) if use_schema_assistant: From fd071d6ad8038db2ff793c97b64ae9698ea43720 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 17:16:09 +1100 Subject: [PATCH 06/13] Fix typo --- camel/toolkits/function_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 7fb330634..52b7515eb 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -250,7 +250,7 @@ def __init__( self.func = func self.openai_tool_schema = openai_tool_schema - if self.openai_tool_schema is not None: + if self.openai_tool_schema is None: self.openai_tool_schema = get_openai_tool_schema(func) if use_schema_assistant: From c8a3c53f19a6ff83beac244937981859d043f5dd Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 17:34:01 +1100 Subject: [PATCH 07/13] Change the calling logic of generate_openai_tool_schema --- camel/toolkits/function_tool.py | 44 ++++++++++++++++----------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 52b7515eb..a56e809e1 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -248,30 +248,28 @@ def __init__( use_schema_assistant: bool = False, ) -> None: self.func = func - self.openai_tool_schema = openai_tool_schema - - if self.openai_tool_schema is None: - self.openai_tool_schema = get_openai_tool_schema(func) - - if use_schema_assistant: - try: - self.validate_openai_tool_schema(self.openai_tool_schema) - except Exception as e: - print( - f"Warning: No valid schema found " - f"for {self.func.__name__}. " - f"Attempting to generate one using LLM." - ) - schema = self.generate_openai_tool_schema( - schema_assistant + self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( + func + ) + + if use_schema_assistant: + try: + # 验证 schema + self.validate_openai_tool_schema(self.openai_tool_schema) + except Exception as e: + print( + f"Warning: No valid schema found for " + f"{self.func.__name__}. " + f"Attempting to generate one using LLM." + ) + schema = self.generate_openai_tool_schema(schema_assistant) + if schema: + self.openai_tool_schema = schema + else: + raise ValueError( + f"Failed to generate valid schema for " + f"{self.func.__name__}" ) - if schema: - self.openai_tool_schema = schema - else: - raise ValueError( - f"Failed to generate valid schema for " - f"{self.func.__name__}" - ) @staticmethod From 802d074b0f87f0831d420bd5dbfad48de43a505a Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 17:41:33 +1100 Subject: [PATCH 08/13] Fixed: camel/toolkits/function_tool.py:468: error: Missing return statement --- camel/toolkits/function_tool.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index a56e809e1..38ec5226b 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -254,7 +254,6 @@ def __init__( if use_schema_assistant: try: - # 验证 schema self.validate_openai_tool_schema(self.openai_tool_schema) except Exception as e: print( @@ -544,6 +543,8 @@ def generate_openai_tool_schema( f"function {self.func.__name__} manually." ) from e print(f"Schema validation failed. Retrying...") + + return {} @property From a343510cedc2fc2bf4646dba460fac3c2b6e1731 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Fri, 18 Oct 2024 22:23:49 +1100 Subject: [PATCH 09/13] Apply pre-commit fixes --- camel/toolkits/function_tool.py | 71 +++++++++---------- .../generate_openai_tool_schema_example.py | 23 +++--- .../test_generate_openai_tool_schema.py | 63 +++++++++------- 3 files changed, 82 insertions(+), 75 deletions(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 38ec5226b..7c5728f72 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== import warnings -from inspect import Parameter, signature, getsource +from inspect import Parameter, getsource, signature from typing import Any, Callable, Dict, Mapping, Optional, Tuple from docstring_parser import parse @@ -21,13 +21,13 @@ from pydantic import create_model from pydantic.fields import FieldInfo -from camel.utils import get_pydantic_object_schema, to_pascal -from camel.models.base_model import BaseModelBackend -from camel.models import ModelFactory -from camel.types import ModelPlatformType, ModelType +from camel.agents import ChatAgent from camel.configs import ChatGPTConfig from camel.messages import BaseMessage -from camel.agents import ChatAgent +from camel.models import ModelFactory +from camel.models.base_model import BaseModelBackend +from camel.types import ModelPlatformType, ModelType +from camel.utils import get_pydantic_object_schema, to_pascal def _remove_a_key(d: Dict, remove_key: Any) -> None: @@ -148,8 +148,9 @@ def _create_mol(name, field): } return openai_tool_schema + def generate_docstring( - code: str, + code: str, model: BaseModelBackend, ) -> str: """Generates a docstring for a given function code using LLM. @@ -202,9 +203,7 @@ def add(a: int, b: int) -> int: content="You are a helpful assistant.", ) docstring_assistant = ChatAgent( - assistant_sys_msg, - model=model, - token_limit=4096 + assistant_sys_msg, model=model, token_limit=4096 ) # Create user message to prompt the assistant @@ -212,11 +211,12 @@ def add(a: int, b: int) -> int: role_name="User", content=docstring_prompt + code, ) - + # Get the response containing the generated docstring response = docstring_assistant.step(user_msg) return response.msg.content + class FunctionTool: r"""An abstraction of a function that OpenAI chat models can call. See https://platform.openai.com/docs/api-reference/chat/create. @@ -230,12 +230,12 @@ class FunctionTool: openai_tool_schema (Optional[Dict[str, Any]], optional): A user-defined openai tool schema to override the default result. (default: :obj:`None`) - schema_assistant (Optional[BaseModelBackend], optional): An assistant - (e.g., an LLM model) used to generate the schema if no valid + schema_assistant (Optional[BaseModelBackend], optional): An assistant + (e.g., an LLM model) used to generate the schema if no valid schema is provided and use_schema_assistant is enabled. (default: :obj:`None`) - use_schema_assistant (bool, optional): Whether to enable the use of - the schema_assistant to automatically generate the schema if + use_schema_assistant (bool, optional): Whether to enable the use of + the schema_assistant to automatically generate the schema if validation fails or no valid schema is provided. (default: :obj:`False`) """ @@ -245,7 +245,7 @@ def __init__( func: Callable, openai_tool_schema: Optional[Dict[str, Any]] = None, schema_assistant: Optional[BaseModelBackend] = None, - use_schema_assistant: bool = False, + use_schema_assistant: Optional[bool] = False, ) -> None: self.func = func self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( @@ -255,7 +255,7 @@ def __init__( if use_schema_assistant: try: self.validate_openai_tool_schema(self.openai_tool_schema) - except Exception as e: + except Exception: print( f"Warning: No valid schema found for " f"{self.func.__name__}. " @@ -270,7 +270,6 @@ def __init__( f"{self.func.__name__}" ) - @staticmethod def validate_openai_tool_schema( openai_tool_schema: Dict[str, Any], @@ -464,43 +463,43 @@ def set_parameter(self, param_name: str, value: Dict[str, Any]): self.openai_tool_schema["function"]["parameters"]["properties"][ param_name ] = value - + def generate_openai_tool_schema( self, - schema_assistant: Optional[BaseModelBackend]=None, + schema_assistant: Optional[BaseModelBackend] = None, ) -> Dict[str, Any]: r"""Generates an OpenAI tool schema for the specified function. - This method generates the OpenAI tool schema using the provided - LLM assistant. If no assistant is provided, it defaults + This method generates the OpenAI tool schema using the provided + LLM assistant. If no assistant is provided, it defaults to creating a GPT_4O_MINI model. The function's source code is used - to generate a docstring and schema, which are validated before - returning the final schema. If schema generation or validation fails, + to generate a docstring and schema, which are validated before + returning the final schema. If schema generation or validation fails, the process retries up to two times. Args: - schema_assistant (Optional[BaseModelBackend]): An optional - assistant model to use for schema generation. If not provided, a + schema_assistant (Optional[BaseModelBackend]): An optional + assistant model to use for schema generation. If not provided, a GPT_4O_MINI model will be created. Returns: Dict[str, Any]: The generated OpenAI tool schema for the function. Raises: - ValueError: If schema generation or validation fails after the - maximum number of retries, a ValueError is raised, + ValueError: If schema generation or validation fails after the + maximum number of retries, a ValueError is raised, prompting manual schema setting. """ if not schema_assistant: print( - f"Warning: No model provided. " - f"Use GPT_4O_MINI to generate the schema." + "Warning: No model provided. " + "Use GPT_4O_MINI to generate the schema." ) try: schema_assistant = ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O_MINI, - model_config_dict=ChatGPTConfig(temperature=1.0).as_dict() + model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), ) except Exception as e: raise ValueError( @@ -510,7 +509,7 @@ def generate_openai_tool_schema( ) from e function_string = getsource(self.func) - + max_retries = 2 retries = 0 @@ -519,8 +518,7 @@ def generate_openai_tool_schema( try: # Generate the docstring and the schema docstring = generate_docstring( - function_string, - schema_assistant + function_string, schema_assistant ) self.func.__doc__ = docstring schema = get_openai_tool_schema(self.func) @@ -542,10 +540,9 @@ def generate_openai_tool_schema( f"Please set the OpenAI tool schema for " f"function {self.func.__name__} manually." ) from e - print(f"Schema validation failed. Retrying...") - - return {} + print("Schema validation failed. Retrying...") + return {} @property def parameters(self) -> Dict[str, Any]: diff --git a/examples/tool_call/generate_openai_tool_schema_example.py b/examples/tool_call/generate_openai_tool_schema_example.py index 0c5a63c49..59617a23b 100644 --- a/examples/tool_call/generate_openai_tool_schema_example.py +++ b/examples/tool_call/generate_openai_tool_schema_example.py @@ -12,22 +12,25 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import os + from camel.agents import ChatAgent from camel.configs.openai_config import ChatGPTConfig from camel.messages import BaseMessage from camel.models import ModelFactory from camel.toolkits import FunctionTool from camel.types import ModelPlatformType, ModelType -import os # Set OpenAI API key api_key = os.getenv("OPENAI_API_KEY") if not api_key: raise ValueError("API key not found in environment variables.") + # Define a function which does't have a docstring def get_perfect_square(n: int) -> int: - return n ** 2 + return n**2 + # Create a model instance model_config_dict = ChatGPTConfig(temperature=1.0).as_dict() @@ -39,33 +42,25 @@ def get_perfect_square(n: int) -> int: # Create a FunctionTool with the function function_tool = FunctionTool( - get_perfect_square, - schema_assistant=agent_model, - use_schema_assistant=True + get_perfect_square, schema_assistant=agent_model, use_schema_assistant=True ) print("\nGenerated OpenAI Tool Schema:") print(function_tool.get_openai_tool_schema()) # Set system message for the assistant assistant_sys_msg = BaseMessage.make_assistant_message( - role_name="Assistant", - content="You are a helpful assistant." + role_name="Assistant", content="You are a helpful assistant." ) # Create a ChatAgent with the tool camel_agent = ChatAgent( - system_message=assistant_sys_msg, - model=agent_model, - tools=[function_tool] + system_message=assistant_sys_msg, model=agent_model, tools=[function_tool] ) camel_agent.reset() # Define a user message user_prompt = "What is the perfect square of 2024?" -user_msg = BaseMessage.make_user_message( - role_name="User", - content=user_prompt -) +user_msg = BaseMessage.make_user_message(role_name="User", content=user_prompt) # Get response from the assistant response = camel_agent.step(user_msg) diff --git a/test/toolkits/test_generate_openai_tool_schema.py b/test/toolkits/test_generate_openai_tool_schema.py index 3b557c801..16ae0ff1b 100644 --- a/test/toolkits/test_generate_openai_tool_schema.py +++ b/test/toolkits/test_generate_openai_tool_schema.py @@ -1,14 +1,28 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from unittest.mock import MagicMock, patch + import pytest -from camel.models.base_model import BaseModelBackend -from camel.messages import BaseMessage + from camel.agents import ChatAgent +from camel.models.base_model import BaseModelBackend from camel.toolkits import FunctionTool, generate_docstring def sample_function(a: int, b: str = "default") -> bool: """ - This function checks if the integer is positive and + This function checks if the integer is positive and if the string is non-empty. Args: @@ -20,6 +34,7 @@ def sample_function(a: int, b: str = "default") -> bool: """ return a > 0 and len(b) > 0 + @patch.object(ChatAgent, 'step') @patch('camel.models.ModelFactory.create') def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): @@ -75,20 +90,19 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): 'properties': { 'a': { 'type': 'integer', - 'description': 'The integer value to check.' + 'description': 'The integer value to check.', }, 'b': { 'type': 'string', 'description': ( - "The string to verify. " - "Default is 'default'." + "The string to verify. " "Default is 'default'." ), - 'default': 'default' - } + 'default': 'default', + }, }, - 'required': ['a'] - } - } + 'required': ['a'], + }, + }, } assert schema == expected_schema @@ -104,6 +118,7 @@ def mock_model(): mock_model.value_for_tiktoken = MagicMock(return_value=1000) return mock_model + @patch.object(ChatAgent, 'step') def test_generate_docstring(mock_chat_agent_step, mock_model): code = """ @@ -120,12 +135,12 @@ def sample_function(a: int, b: str = "default") -> bool: # Mock ChatAgent's step method return value mock_message = MagicMock() mock_message.content = ( - f"This function checks if the integer is positive and " - f"if the string is non-empty.\n" - f"Args:\n a (int): The integer value to check.\n " - f" b (str): The string to verify. Default is 'default'.\n" - f"Returns:\n bool: True if both conditions are met, " - f"otherwise False." + "This function checks if the integer is positive and " + "if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n " + " b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, " + "otherwise False." ) mock_response = MagicMock() mock_response.msgs = [mock_message] @@ -146,12 +161,12 @@ def sample_function(a: int, b: str = "default") -> bool: ) expected_docstring = ( - f"This function checks if the integer is positive and " - f"if the string is non-empty.\n" - f"Args:\n a (int): The integer value to check.\n " - f" b (str): The string to verify. Default is 'default'.\n" - f"Returns:\n bool: True if both conditions are met, " - f"otherwise False." + "This function checks if the integer is positive and " + "if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n " + " b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, " + "otherwise False." ) - assert docstring == expected_docstring \ No newline at end of file + assert docstring == expected_docstring From 387a6481fe5b0527fc15d365b556566078b0ba61 Mon Sep 17 00:00:00 2001 From: Zeyu Zhang <503423253@qq.com> Date: Sat, 19 Oct 2024 12:53:58 +1100 Subject: [PATCH 10/13] Apply suggestions from code review Co-authored-by: Guohao Li --- camel/toolkits/function_tool.py | 17 ++++++----------- .../generate_openai_tool_schema_example.py | 2 +- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 7c5728f72..f0e119626 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -24,8 +24,7 @@ from camel.agents import ChatAgent from camel.configs import ChatGPTConfig from camel.messages import BaseMessage -from camel.models import ModelFactory -from camel.models.base_model import BaseModelBackend +from camel.models import ModelFactory, BaseModelBackend from camel.types import ModelPlatformType, ModelType from camel.utils import get_pydantic_object_schema, to_pascal @@ -203,7 +202,7 @@ def add(a: int, b: int) -> int: content="You are a helpful assistant.", ) docstring_assistant = ChatAgent( - assistant_sys_msg, model=model, token_limit=4096 + assistant_sys_msg, model=model ) # Create user message to prompt the assistant @@ -230,12 +229,12 @@ class FunctionTool: openai_tool_schema (Optional[Dict[str, Any]], optional): A user-defined openai tool schema to override the default result. (default: :obj:`None`) - schema_assistant (Optional[BaseModelBackend], optional): An assistant + schema_assistant_model (Optional[BaseModelBackend], optional): An assistant (e.g., an LLM model) used to generate the schema if no valid schema is provided and use_schema_assistant is enabled. (default: :obj:`None`) use_schema_assistant (bool, optional): Whether to enable the use of - the schema_assistant to automatically generate the schema if + the `schema_assistant` to automatically generate the schema if validation fails or no valid schema is provided. (default: :obj:`False`) """ @@ -508,7 +507,7 @@ def generate_openai_tool_schema( f"Please set the OpenAI tool schema manually." ) from e - function_string = getsource(self.func) + code = getsource(self.func) max_retries = 2 retries = 0 @@ -526,17 +525,13 @@ def generate_openai_tool_schema( # Validate the schema self.validate_openai_tool_schema(schema) - print( - f"Successfully generated the OpenAI tool schema for " - f"the function {self.func.__name__}." - ) return schema except Exception as e: retries += 1 if retries == max_retries: raise ValueError( - f"Failed to generate the OpenAI tool Schema. " + f"Failed to generate the OpenAI tool Schema after {max_retries} retries. " f"Please set the OpenAI tool schema for " f"function {self.func.__name__} manually." ) from e diff --git a/examples/tool_call/generate_openai_tool_schema_example.py b/examples/tool_call/generate_openai_tool_schema_example.py index 59617a23b..c01cdad40 100644 --- a/examples/tool_call/generate_openai_tool_schema_example.py +++ b/examples/tool_call/generate_openai_tool_schema_example.py @@ -15,7 +15,7 @@ import os from camel.agents import ChatAgent -from camel.configs.openai_config import ChatGPTConfig +from camel.configs import ChatGPTConfig from camel.messages import BaseMessage from camel.models import ModelFactory from camel.toolkits import FunctionTool From e8d45a4071b5489823bf7ced9b7b83874e2639c0 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Sat, 19 Oct 2024 14:49:18 +1100 Subject: [PATCH 11/13] Apply suggestions from code review --- camel/toolkits/function_tool.py | 120 +++++++------ .../generate_openai_tool_schema_example.py | 4 +- .../test_generate_openai_tool_schema.py | 158 ++++++++++++------ 3 files changed, 174 insertions(+), 108 deletions(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index f0e119626..88c99378e 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import logging import warnings from inspect import Parameter, getsource, signature from typing import Any, Callable, Dict, Mapping, Optional, Tuple @@ -24,10 +25,12 @@ from camel.agents import ChatAgent from camel.configs import ChatGPTConfig from camel.messages import BaseMessage -from camel.models import ModelFactory, BaseModelBackend +from camel.models import BaseModelBackend, ModelFactory from camel.types import ModelPlatformType, ModelType from camel.utils import get_pydantic_object_schema, to_pascal +logger = logging.getLogger(__name__) + def _remove_a_key(d: Dict, remove_key: Any) -> None: r"""Remove a key from a dictionary recursively.""" @@ -150,13 +153,18 @@ def _create_mol(name, field): def generate_docstring( code: str, - model: BaseModelBackend, + model: Optional[BaseModelBackend] = None, ) -> str: """Generates a docstring for a given function code using LLM. + This function leverages a language model to generate a + PEP 8/PEP 257-compliant docstring for a provided Python function. + If no model is supplied, a default GPT_4O_MINI is used. + Args: code (str): The source code of the function. - model (BaseModelBackend): The model used for generating the docstring. + model (Optional[BaseModelBackend]): An optional language model backend + instance. If not provided, a default GPT_4O_MINI is used. Returns: str: The generated docstring. @@ -196,14 +204,19 @@ def add(a: int, b: int) -> int: **Task**: Generate a docstring for the function below. ''' + # Create the assistant model if not provided + if not model: + model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), + ) # Initialize assistant with system message and model assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) - docstring_assistant = ChatAgent( - assistant_sys_msg, model=model - ) + docstring_assistant = ChatAgent(assistant_sys_msg, model=model) # Create user message to prompt the assistant user_msg = BaseMessage.make_user_message( @@ -224,27 +237,33 @@ class FunctionTool: provide a user-defined tool schema to override. Args: - func (Callable): The function to call.The tool schema is parsed from - the signature and docstring by default. - openai_tool_schema (Optional[Dict[str, Any]], optional): A user-defined - openai tool schema to override the default result. - (default: :obj:`None`) - schema_assistant_model (Optional[BaseModelBackend], optional): An assistant - (e.g., an LLM model) used to generate the schema if no valid - schema is provided and use_schema_assistant is enabled. + func (Callable): The function to call. The tool schema is parsed from + the function signature and docstring by default. + openai_tool_schema (Optional[Dict[str, Any]], optional): A + user-defined OpenAI tool schema to override the default result. (default: :obj:`None`) - use_schema_assistant (bool, optional): Whether to enable the use of - the `schema_assistant` to automatically generate the schema if - validation fails or no valid schema is provided. + use_schema_assistant (Optional[bool], optional): Whether to enable the + use of a schema assistant model to automatically generate the + schema if validation fails or no valid schema is provided. (default: :obj:`False`) + schema_assistant_model (Optional[BaseModelBackend], optional): An + assistant model (e.g., an LLM model) used to generate the schema + if `use_schema_assistant` is enabled and no valid schema is + provided. + (default: :obj:`None`) + schema_generation_max_retries (Optional[int], optional): The maximum + number of attempts to retry schema generation using the schema + assistant model if the previous attempts fail. + (default: :obj:`2`) """ def __init__( self, func: Callable, openai_tool_schema: Optional[Dict[str, Any]] = None, - schema_assistant: Optional[BaseModelBackend] = None, use_schema_assistant: Optional[bool] = False, + schema_assistant_model: Optional[BaseModelBackend] = None, + schema_generation_max_retries: Optional[int] = 2, ) -> None: self.func = func self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( @@ -253,6 +272,7 @@ def __init__( if use_schema_assistant: try: + print(self.openai_tool_schema) self.validate_openai_tool_schema(self.openai_tool_schema) except Exception: print( @@ -260,7 +280,9 @@ def __init__( f"{self.func.__name__}. " f"Attempting to generate one using LLM." ) - schema = self.generate_openai_tool_schema(schema_assistant) + schema = self.generate_openai_tool_schema( + schema_generation_max_retries, schema_assistant_model + ) if schema: self.openai_tool_schema = schema else: @@ -361,8 +383,8 @@ def set_openai_function_schema( r"""Sets the schema of the function within the OpenAI tool schema. Args: - openai_function_schema (Dict[str, Any]): The function schema to set - within the OpenAI tool schema. + openai_function_schema (Dict[str, Any]): The function schema to + set within the OpenAI tool schema. """ self.openai_tool_schema["function"] = openai_function_schema @@ -465,21 +487,25 @@ def set_parameter(self, param_name: str, value: Dict[str, Any]): def generate_openai_tool_schema( self, + max_retries: int, schema_assistant: Optional[BaseModelBackend] = None, ) -> Dict[str, Any]: r"""Generates an OpenAI tool schema for the specified function. - This method generates the OpenAI tool schema using the provided - LLM assistant. If no assistant is provided, it defaults - to creating a GPT_4O_MINI model. The function's source code is used - to generate a docstring and schema, which are validated before - returning the final schema. If schema generation or validation fails, - the process retries up to two times. + This method uses a language model (LLM) to generate the OpenAI tool + schema for the specified function by first generating a docstring and + then creating a schema based on the function's source code. If no LLM + is provided, it defaults to initializing a GPT_4O_MINI model. The + schema generation and validation process is retried up to + `max_retries` times in case of failure. + Args: - schema_assistant (Optional[BaseModelBackend]): An optional - assistant model to use for schema generation. If not provided, a - GPT_4O_MINI model will be created. + max_retries (int): The maximum number of retries for schema + generation and validation if the process fails. + schema_assistant (Optional[BaseModelBackend]): An optional LLM + backend model used for generating the docstring and schema. If + not provided, a GPT_4O_MINI model will be created. Returns: Dict[str, Any]: The generated OpenAI tool schema for the function. @@ -490,52 +516,38 @@ def generate_openai_tool_schema( prompting manual schema setting. """ if not schema_assistant: - print( + logger.warning( "Warning: No model provided. " "Use GPT_4O_MINI to generate the schema." ) - try: - schema_assistant = ModelFactory.create( - model_platform=ModelPlatformType.OPENAI, - model_type=ModelType.GPT_4O_MINI, - model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), - ) - except Exception as e: - raise ValueError( - f"Failed to generate the OpenAI tool schema for " - f"the function {self.func.__name__}. " - f"Please set the OpenAI tool schema manually." - ) from e - + schema_assistant = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + model_config_dict=ChatGPTConfig(temperature=1.0).as_dict(), + ) code = getsource(self.func) - - max_retries = 2 retries = 0 - # Retry loop to handle schema generation and validation while retries < max_retries: try: # Generate the docstring and the schema - docstring = generate_docstring( - function_string, schema_assistant - ) + docstring = generate_docstring(code, schema_assistant) self.func.__doc__ = docstring schema = get_openai_tool_schema(self.func) - # Validate the schema self.validate_openai_tool_schema(schema) - return schema except Exception as e: retries += 1 if retries == max_retries: raise ValueError( - f"Failed to generate the OpenAI tool Schema after {max_retries} retries. " + f"Failed to generate the OpenAI tool Schema after " + f"{max_retries} retries. " f"Please set the OpenAI tool schema for " f"function {self.func.__name__} manually." ) from e - print("Schema validation failed. Retrying...") + logger.warning("Schema validation failed. Retrying...") return {} diff --git a/examples/tool_call/generate_openai_tool_schema_example.py b/examples/tool_call/generate_openai_tool_schema_example.py index c01cdad40..6229640e0 100644 --- a/examples/tool_call/generate_openai_tool_schema_example.py +++ b/examples/tool_call/generate_openai_tool_schema_example.py @@ -42,7 +42,9 @@ def get_perfect_square(n: int) -> int: # Create a FunctionTool with the function function_tool = FunctionTool( - get_perfect_square, schema_assistant=agent_model, use_schema_assistant=True + get_perfect_square, + schema_assistant_model=agent_model, + use_schema_assistant=True, ) print("\nGenerated OpenAI Tool Schema:") print(function_tool.get_openai_tool_schema()) diff --git a/test/toolkits/test_generate_openai_tool_schema.py b/test/toolkits/test_generate_openai_tool_schema.py index 16ae0ff1b..275332561 100644 --- a/test/toolkits/test_generate_openai_tool_schema.py +++ b/test/toolkits/test_generate_openai_tool_schema.py @@ -16,7 +16,7 @@ import pytest from camel.agents import ChatAgent -from camel.models.base_model import BaseModelBackend +from camel.models import BaseModelBackend from camel.toolkits import FunctionTool, generate_docstring @@ -27,7 +27,7 @@ def sample_function(a: int, b: str = "default") -> bool: Args: a (int): The integer value to check. - b (str): The string to verify. Default is "default". + b (str): The string to verify. Default is 'default'. Returns: bool: True if both conditions are met, otherwise False. @@ -35,55 +35,22 @@ def sample_function(a: int, b: str = "default") -> bool: return a > 0 and len(b) > 0 -@patch.object(ChatAgent, 'step') -@patch('camel.models.ModelFactory.create') -def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): - # Mock model instance - mock_model_instance = MagicMock() - mock_model_instance.model_config_dict = {} - mock_create_model.return_value = mock_model_instance - - # Mock ChatAgent's step method return value - mock_message = MagicMock() - mock_message.content = ( - "This function checks if the integer is positive and if the string " - "is non-empty.\n" - "Args:\n" - " a (int): The integer value to check.\n" - " b (str): The string to verify. Default is 'default'.\n" - "Returns:\n" - " bool: True if both conditions are met, otherwise False." - ) - mock_response = MagicMock() - mock_response.msgs = [mock_message] - mock_response.msg = mock_message - mock_response.terminated = True - mock_chat_agent_step.return_value = mock_response - - # Create FunctionTool instance - function_tool = FunctionTool(func=sample_function) +@patch.object(FunctionTool, 'validate_openai_tool_schema') +@patch.object(FunctionTool, 'generate_openai_tool_schema') +def test_generate_openai_tool_schema( + mock_generate_schema, mock_validate_schema +): + # Mock the validate_openai_tool_schema to raise an exception + mock_validate_schema.side_effect = Exception("Invalid schema") - # Generate schema - try: - schema = function_tool.generate_openai_tool_schema( - schema_assistant=mock_model_instance - ) - except Exception as e: - pytest.fail(f"generate_openai_tool_schema() raised an exception: {e}") - - if schema is None: - pytest.fail( - "generate_openai_tool_schema() returned None unexpectedly." - ) - - # Adjusted expected schema with 'default' key in 'b' parameter - expected_schema = { + # Mock the generate_openai_tool_schema to return a specific schema + mock_schema = { 'type': 'function', 'function': { 'name': 'sample_function', 'description': ( - 'This function checks if the integer is positive ' - 'and if the string is non-empty.' + 'This function checks if the integer is positive and\n' + 'if the string is non-empty.' ), 'parameters': { 'type': 'object', @@ -95,7 +62,7 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): 'b': { 'type': 'string', 'description': ( - "The string to verify. " "Default is 'default'." + "The string to verify. Default is 'default'." ), 'default': 'default', }, @@ -104,8 +71,18 @@ def test_generate_openai_tool_schema(mock_create_model, mock_chat_agent_step): }, }, } + mock_generate_schema.return_value = mock_schema + + # Create FunctionTool instance with use_schema_assistant=True + function_tool = FunctionTool( + func=sample_function, + use_schema_assistant=True, + schema_assistant_model=None, + ) - assert schema == expected_schema + # Assert that the generated schema matches the expected schema + assert function_tool.openai_tool_schema == mock_schema + mock_generate_schema.assert_called_once() @pytest.fixture @@ -119,13 +96,19 @@ def mock_model(): return mock_model +@patch('camel.models.ModelFactory.create') @patch.object(ChatAgent, 'step') -def test_generate_docstring(mock_chat_agent_step, mock_model): +def test_generate_docstring( + mock_chat_agent_step, mock_model_factory, mock_model +): code = """ def sample_function(a: int, b: str = "default") -> bool: return a > 0 and len(b) > 0 """ + # Mock the model factory to return the mock model + mock_model_factory.return_value = mock_model + # Ensure mock_model has required attributes mock_model.model_type = MagicMock() mock_model.model_type.value_for_tiktoken = "mock_value_for_tiktoken" @@ -137,8 +120,8 @@ def sample_function(a: int, b: str = "default") -> bool: mock_message.content = ( "This function checks if the integer is positive and " "if the string is non-empty.\n" - "Args:\n a (int): The integer value to check.\n " - " b (str): The string to verify. Default is 'default'.\n" + "Args:\n a (int): The integer value to check.\n" + " b (str): The string to verify. Default is 'default'.\n" "Returns:\n bool: True if both conditions are met, " "otherwise False." ) @@ -163,10 +146,79 @@ def sample_function(a: int, b: str = "default") -> bool: expected_docstring = ( "This function checks if the integer is positive and " "if the string is non-empty.\n" - "Args:\n a (int): The integer value to check.\n " - " b (str): The string to verify. Default is 'default'.\n" + "Args:\n a (int): The integer value to check.\n" + " b (str): The string to verify. Default is 'default'.\n" "Returns:\n bool: True if both conditions are met, " "otherwise False." ) assert docstring == expected_docstring + + +@patch('camel.models.ModelFactory.create') +@patch.object(ChatAgent, 'step') +def test_function_tool_generate_schema_with_retries( + mock_chat_agent_step, mock_model_factory +): + # Mock the model factory to return a mock model + mock_model = MagicMock(spec=BaseModelBackend) + mock_model_factory.return_value = mock_model + + # Mock ChatAgent's step method to simulate retries + mock_message = MagicMock() + mock_message.content = ( + "This function checks if the integer is positive and\n" + "if the string is non-empty.\n" + "Args:\n a (int): The integer value to check.\n" + " b (str): The string to verify. Default is 'default'.\n" + "Returns:\n bool: True if both conditions are met, otherwise False." + ) + mock_response = MagicMock() + mock_response.msgs = [mock_message] + mock_response.msg = mock_message + mock_response.terminated = True + + # Configure the step method to fail the first time + # and succeed the second time + mock_chat_agent_step.side_effect = [ + Exception("Validation failed"), + mock_response, + ] + + # Create FunctionTool instance with use_schema_assistant=True + function_tool = FunctionTool( + func=sample_function, + use_schema_assistant=True, + schema_assistant_model=mock_model, + schema_generation_max_retries=2, + ) + + expected_schema = { + 'type': 'function', + 'function': { + 'name': 'sample_function', + 'description': ( + 'This function checks if the integer is positive and\n' + 'if the string is non-empty.' + ), + 'parameters': { + 'type': 'object', + 'properties': { + 'a': { + 'type': 'integer', + 'description': 'The integer value to check.', + }, + 'b': { + 'type': 'string', + 'description': ( + "The string to verify. Default is 'default'." + ), + 'default': 'default', + }, + }, + 'required': ['a'], + }, + }, + } + + assert function_tool.openai_tool_schema == expected_schema From 9413ec6c400089e240ee0a2df97c62146ec807db Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Sat, 19 Oct 2024 14:56:29 +1100 Subject: [PATCH 12/13] Pre-commit fix --- camel/toolkits/function_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index 88c99378e..b707729a4 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -251,10 +251,10 @@ class FunctionTool: if `use_schema_assistant` is enabled and no valid schema is provided. (default: :obj:`None`) - schema_generation_max_retries (Optional[int], optional): The maximum + schema_generation_max_retries (int, optional): The maximum number of attempts to retry schema generation using the schema assistant model if the previous attempts fail. - (default: :obj:`2`) + (default: 2) """ def __init__( @@ -263,7 +263,7 @@ def __init__( openai_tool_schema: Optional[Dict[str, Any]] = None, use_schema_assistant: Optional[bool] = False, schema_assistant_model: Optional[BaseModelBackend] = None, - schema_generation_max_retries: Optional[int] = 2, + schema_generation_max_retries: int = 2, ) -> None: self.func = func self.openai_tool_schema = openai_tool_schema or get_openai_tool_schema( From d2616f420d8470a0e42ec10517b5e0d5ff8b7789 Mon Sep 17 00:00:00 2001 From: Zhangzeyu97 <503423253@qq.com> Date: Sat, 19 Oct 2024 15:06:47 +1100 Subject: [PATCH 13/13] Remove unnecessary print --- camel/toolkits/function_tool.py | 1 - 1 file changed, 1 deletion(-) diff --git a/camel/toolkits/function_tool.py b/camel/toolkits/function_tool.py index b707729a4..f1e74402a 100644 --- a/camel/toolkits/function_tool.py +++ b/camel/toolkits/function_tool.py @@ -272,7 +272,6 @@ def __init__( if use_schema_assistant: try: - print(self.openai_tool_schema) self.validate_openai_tool_schema(self.openai_tool_schema) except Exception: print(