Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

langchain_google_genai.chat_models.ChatGoogleGenerativeAIError: Invalid argument provided to Gemini: 400 * GenerateContentRequest.tools[0].function_declarations[0].parameters.properties[action].type: must be specified #535

Open
Arunprakaash opened this issue Oct 5, 2024 · 0 comments

Comments

@Arunprakaash
Copy link

Arunprakaash commented Oct 5, 2024

pydantic model

class Response(BaseModel):
    """Response to user."""

    response: str
class Act(BaseModel):
    """Action to perform."""

    action: Union[Response, Plan] = Field(
        description="Action to perform. If you want to respond to user, use Response. "
        "If you need to further use tools to get the answer, use Plan."
    )
def call_replan(state: AgentState, config: RunnableConfig):
    model.with_structured_output(convert_to_openai_function(Act))
    // throws error with/without converting pydantic to dict schema using convert_to_openai_function
        
llm = ChatGoogleGenerativeAI(
    temperature=0, model="gemini-1.5-flash")

as well as on tool calling arg schema

class RequestsGetToolInput(BaseModel):
    url: str = Field(description="The URL to send the GET request to")
    params: Optional[dict[str, str]] = Field(
        # default_factory=dict,
        default={},
        description="Query parameters for the GET request"
    )
    output_instructions: str = Field(description="Instructions on what information to extract from the response")


class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
    """Requests GET tool with LLM-instructed extraction of truncated responses."""

    name: str = "requests_get"
    description: str = REQUESTS_GET_TOOL_DESCRIPTION
    args_schema: type[BaseModel] = RequestsGetToolInput
    response_length: int = MAX_RESPONSE_LENGTH
    llm_chain: Any = Field(
        default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT)
    )

    def _run(
        self,
        url: str,
        output_instructions: str,
        params: dict[str, str] = {},
        run_manager: Optional[CallbackManagerForToolRun] = None
    ) -> str:
        response: str = cast(
            str, self.requests_wrapper.get(url, params=params)
        )
        response = response[: self.response_length]
        return self.llm_chain.invoke({
            "response": response, "instructions": output_instructions
        }).strip()

    async def _arun(
        self,
        url: str,
        output_instructions: str,
        params: dict[str, str] = {},
        run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
    ) -> str:
        """Use the tool asynchronously."""
        return self._run(url, output_instructions, params, run_manager=run_manager.get_sync() if run_manager else None)

langchain_google_genai.chat_models.ChatGoogleGenerativeAIError: Invalid argument provided to Gemini: 400 * GenerateContentRequest.tools[0].function_declarations[0].parameters.properties[params].properties: should be non-empty for OBJECT type

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

1 participant