Skip to content

Commit

Permalink
feat: add ministral 3b and 8b and some small enhancements (#1076)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Oct 18, 2024
1 parent 9f9a7f9 commit 5eaa483
Show file tree
Hide file tree
Showing 32 changed files with 197 additions and 93 deletions.
24 changes: 12 additions & 12 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ def record_message(self, message: BaseMessage) -> None:
def step(
self,
input_message: BaseMessage,
output_schema: Optional[Type[BaseModel]] = None,
response_format: Optional[Type[BaseModel]] = None,
) -> ChatAgentResponse:
r"""Performs a single step in the chat session by generating a response
to the input message.
Expand All @@ -426,7 +426,7 @@ def step(
either `user` or `assistant` but it will be set to `user`
anyway since for the self agent any incoming message is
external.
output_schema (Optional[Type[BaseModel]], optional): A pydantic
response_format (Optional[Type[BaseModel]], optional): A pydantic
model class that includes value types and field descriptions
used to generate a structured response by LLM. This schema
helps in defining the expected output format. (default:
Expand Down Expand Up @@ -518,15 +518,15 @@ def step(
self._step_tool_call_and_update(response)
)

if output_schema is not None:
if response_format is not None:
(
output_messages,
finish_reasons,
usage_dict,
response_id,
tool_call,
num_tokens,
) = self._structure_output_with_function(output_schema)
) = self._structure_output_with_function(response_format)
tool_call_records.append(tool_call)

info = self._step_get_info(
Expand Down Expand Up @@ -608,7 +608,7 @@ def step(
)

if (
output_schema is not None
response_format is not None
and self.model_type.support_native_tool_calling
):
(
Expand All @@ -618,7 +618,7 @@ def step(
response_id,
tool_call,
num_tokens,
) = self._structure_output_with_function(output_schema)
) = self._structure_output_with_function(response_format)
tool_call_records.append(tool_call)

info = self._step_get_info(
Expand Down Expand Up @@ -647,7 +647,7 @@ def step(
async def step_async(
self,
input_message: BaseMessage,
output_schema: Optional[Type[BaseModel]] = None,
response_format: Optional[Type[BaseModel]] = None,
) -> ChatAgentResponse:
r"""Performs a single step in the chat session by generating a response
to the input message. This agent step can call async function calls.
Expand All @@ -658,7 +658,7 @@ async def step_async(
either `user` or `assistant` but it will be set to `user`
anyway since for the self agent any incoming message is
external.
output_schema (Optional[Type[BaseModel]], optional): A pydantic
response_format (Optional[Type[BaseModel]], optional): A pydantic
model class that includes value types and field descriptions
used to generate a structured response by LLM. This schema
helps in defining the expected output format. (default:
Expand Down Expand Up @@ -718,7 +718,7 @@ async def step_async(
)

if (
output_schema is not None
response_format is not None
and self.model_type.support_native_tool_calling
):
(
Expand All @@ -728,7 +728,7 @@ async def step_async(
response_id,
tool_call_record,
num_tokens,
) = self._structure_output_with_function(output_schema)
) = self._structure_output_with_function(response_format)
tool_call_records.append(tool_call_record)

info = self._step_get_info(
Expand Down Expand Up @@ -795,7 +795,7 @@ async def _step_tool_call_and_update_async(
return func_record

def _structure_output_with_function(
self, output_schema: Type[BaseModel]
self, response_format: Type[BaseModel]
) -> Tuple[
List[BaseMessage],
List[str],
Expand All @@ -809,7 +809,7 @@ def _structure_output_with_function(
"""
from camel.toolkits import FunctionTool

schema_json = get_pydantic_object_schema(output_schema)
schema_json = get_pydantic_object_schema(response_format)
func_str = json_to_function_code(schema_json)
func_callable = func_string_to_callable(func_str)
func = FunctionTool(func_callable)
Expand Down
7 changes: 2 additions & 5 deletions camel/configs/anthropic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@

from typing import List, Union

from openai._types import NOT_GIVEN, NotGiven

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class AnthropicConfig(BaseConfig):
Expand Down Expand Up @@ -55,9 +54,7 @@ class AnthropicConfig(BaseConfig):
(default: :obj:`5`)
metadata: An object describing metadata about the request.
stream (bool, optional): Whether to incrementally stream the response
using server-sent events.
(default: :obj:`False`)
using server-sent events. (default: :obj:`False`)
"""

max_tokens: int = 256
Expand Down
3 changes: 1 addition & 2 deletions camel/configs/groq_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@

from typing import Optional, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class GroqConfig(BaseConfig):
Expand Down
3 changes: 1 addition & 2 deletions camel/configs/ollama_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@

from typing import Sequence, Union

from openai._types import NOT_GIVEN, NotGiven

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class OllamaConfig(BaseConfig):
Expand Down
2 changes: 1 addition & 1 deletion camel/configs/openai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@

from typing import Optional, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven
from pydantic import Field

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class ChatGPTConfig(BaseConfig):
Expand Down
2 changes: 1 addition & 1 deletion camel/configs/samba_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@

from typing import Any, Dict, Optional, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven
from pydantic import Field

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class SambaFastAPIConfig(BaseConfig):
Expand Down
2 changes: 1 addition & 1 deletion camel/configs/togetherai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@

from typing import Any, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven
from pydantic import Field

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class TogetherAIConfig(BaseConfig):
Expand Down
2 changes: 1 addition & 1 deletion camel/configs/vllm_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@

from typing import Sequence, Union

from openai._types import NOT_GIVEN, NotGiven
from pydantic import Field

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


# flake8: noqa: E501
Expand Down
3 changes: 1 addition & 2 deletions camel/configs/zhipuai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@

from typing import Optional, Sequence, Union

from openai._types import NOT_GIVEN, NotGiven

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class ZhipuAIConfig(BaseConfig):
Expand Down
4 changes: 2 additions & 2 deletions camel/embeddings/openai_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
import os
from typing import Any

from openai import NOT_GIVEN, NotGiven, OpenAI
from openai import OpenAI

from camel.embeddings.base import BaseEmbedding
from camel.types import EmbeddingModelType
from camel.types import NOT_GIVEN, EmbeddingModelType, NotGiven
from camel.utils import api_keys_required


Expand Down
6 changes: 3 additions & 3 deletions camel/loaders/firecrawl_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,12 +155,12 @@ def scrape(
except Exception as e:
raise RuntimeError(f"Failed to scrape the URL: {e}")

def structured_scrape(self, url: str, output_schema: BaseModel) -> Dict:
def structured_scrape(self, url: str, response_format: BaseModel) -> Dict:
r"""Use LLM to extract structured data from given URL.
Args:
url (str): The URL to read.
output_schema (BaseModel): A pydantic model
response_format (BaseModel): A pydantic model
that includes value types and field descriptions used to
generate a structured response by LLM. This schema helps
in defining the expected output format.
Expand All @@ -176,7 +176,7 @@ def structured_scrape(self, url: str, output_schema: BaseModel) -> Dict:
url,
{
'formats': ['extract'],
'extract': {'schema': output_schema.model_json_schema()},
'extract': {'schema': response_format.model_json_schema()},
},
)
return data.get("extract", {})
Expand Down
5 changes: 3 additions & 2 deletions camel/models/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
import os
from typing import Any, Dict, List, Optional, Union

from openai import NOT_GIVEN

from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
from camel.messages import OpenAIMessage
from camel.models.base_model import BaseModelBackend
Expand All @@ -24,6 +22,7 @@
AnthropicTokenCounter,
BaseTokenCounter,
api_keys_required,
dependencies_required,
)


Expand All @@ -46,6 +45,7 @@ class AnthropicModel(BaseModelBackend):
will be used. (default: :obj:`None`)
"""

@dependencies_required('anthropic')
def __init__(
self,
model_type: Union[ModelType, str],
Expand Down Expand Up @@ -122,6 +122,7 @@ def run(
Returns:
ChatCompletion: Response in the OpenAI API format.
"""
from anthropic import NOT_GIVEN

if messages[0]["role"] == "system":
sys_msg = str(messages.pop(0)["content"])
Expand Down
2 changes: 2 additions & 0 deletions camel/models/gemini_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
BaseTokenCounter,
GeminiTokenCounter,
api_keys_required,
dependencies_required,
)

if TYPE_CHECKING:
Expand Down Expand Up @@ -56,6 +57,7 @@ class GeminiModel(BaseModelBackend):
limitation of the current camel design.
"""

@dependencies_required('google')
def __init__(
self,
model_type: Union[ModelType, str],
Expand Down
7 changes: 6 additions & 1 deletion camel/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,11 @@
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ModelType
from camel.utils import BaseTokenCounter, LiteLLMTokenCounter
from camel.utils import (
BaseTokenCounter,
LiteLLMTokenCounter,
dependencies_required,
)


class LiteLLMModel(BaseModelBackend):
Expand All @@ -41,6 +45,7 @@ class LiteLLMModel(BaseModelBackend):

# NOTE: Currently stream mode is not supported.

@dependencies_required('litellm')
def __init__(
self,
model_type: Union[ModelType, str],
Expand Down
2 changes: 2 additions & 0 deletions camel/models/mistral_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
dependencies_required,
)

try:
Expand Down Expand Up @@ -58,6 +59,7 @@ class MistralModel(BaseModelBackend):
be used. (default: :obj:`None`)
"""

@dependencies_required('mistralai')
def __init__(
self,
model_type: Union[ModelType, str],
Expand Down
2 changes: 2 additions & 0 deletions camel/models/reka_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
dependencies_required,
)

if TYPE_CHECKING:
Expand Down Expand Up @@ -55,6 +56,7 @@ class RekaModel(BaseModelBackend):
be used. (default: :obj:`None`)
"""

@dependencies_required('reka')
def __init__(
self,
model_type: Union[ModelType, str],
Expand Down
4 changes: 4 additions & 0 deletions camel/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
VoiceType,
)
from .openai_types import (
NOT_GIVEN,
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionChunk,
Expand All @@ -38,6 +39,7 @@
ChatCompletionUserMessageParam,
Choice,
CompletionUsage,
NotGiven,
)
from .unified_model_type import UnifiedModelType

Expand Down Expand Up @@ -67,4 +69,6 @@
'AudioModelType',
'VoiceType',
'UnifiedModelType',
'NOT_GIVEN',
'NotGiven',
]
Loading

0 comments on commit 5eaa483

Please sign in to comment.