From 426ca300d4f292320f590f676ae5258ad72ba3c9 Mon Sep 17 00:00:00 2001 From: Piyush Jain Date: Tue, 2 Apr 2024 11:07:55 -0700 Subject: [PATCH] Remove extraneous files. --- libs/aws/langchain_aws/chat_models.py | 71 ---------- libs/aws/langchain_aws/embeddings.py | 34 ----- libs/aws/langchain_aws/llms.py | 74 ---------- libs/aws/langchain_aws/vectorstores.py | 180 ------------------------- 4 files changed, 359 deletions(-) delete mode 100644 libs/aws/langchain_aws/chat_models.py delete mode 100644 libs/aws/langchain_aws/embeddings.py delete mode 100644 libs/aws/langchain_aws/llms.py delete mode 100644 libs/aws/langchain_aws/vectorstores.py diff --git a/libs/aws/langchain_aws/chat_models.py b/libs/aws/langchain_aws/chat_models.py deleted file mode 100644 index 1b28c239..00000000 --- a/libs/aws/langchain_aws/chat_models.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Bedrock chat models.""" -from typing import Any, AsyncIterator, Iterator, List, Optional - -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.messages import BaseMessage -from langchain_core.outputs import ChatGenerationChunk, ChatResult - - -class ChatBedrock(BaseChatModel): - """ChatBedrock chat model. - - Example: - .. code-block:: python - - from langchain_core.messages import HumanMessage - - from langchain_aws import ChatBedrock - - model = ChatBedrock() - model.invoke([HumanMessage(content="Come up with 10 names for a song about parrots.")]) - """ # noqa: E501 - - @property - def _llm_type(self) -> str: - """Return type of chat model.""" - return "chat-aws" - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports streaming. Otherwise delete method. - def _stream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports async streaming. Otherwise delete - # method. - async def _astream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports async generation. Otherwise delete - # method. - async def _agenerate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - raise NotImplementedError diff --git a/libs/aws/langchain_aws/embeddings.py b/libs/aws/langchain_aws/embeddings.py deleted file mode 100644 index 50e3d7f1..00000000 --- a/libs/aws/langchain_aws/embeddings.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import List - -from langchain_core.embeddings import Embeddings - - -class BedrockEmbeddings(Embeddings): - """BedrockEmbeddings embedding model. - - Example: - .. code-block:: python - - from langchain_aws import BedrockEmbeddings - - model = BedrockEmbeddings() - """ - - def embed_documents(self, texts: List[str]) -> List[List[float]]: - """Embed search docs.""" - raise NotImplementedError - - def embed_query(self, text: str) -> List[float]: - """Embed query text.""" - raise NotImplementedError - - # only keep aembed_documents and aembed_query if they're implemented! - # delete them otherwise to use the base class' default - # implementation, which calls the sync version in an executor - async def aembed_documents(self, texts: List[str]) -> List[List[float]]: - """Asynchronous Embed search docs.""" - raise NotImplementedError - - async def aembed_query(self, text: str) -> List[float]: - """Asynchronous Embed query text.""" - raise NotImplementedError diff --git a/libs/aws/langchain_aws/llms.py b/libs/aws/langchain_aws/llms.py deleted file mode 100644 index fa03d040..00000000 --- a/libs/aws/langchain_aws/llms.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Bedrock large language models.""" -from typing import ( - Any, - AsyncIterator, - Iterator, - List, - Optional, -) - -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import BaseLLM -from langchain_core.outputs import GenerationChunk, LLMResult - - -class BedrockLLM(BaseLLM): - """BedrockLLM large language models. - - Example: - .. code-block:: python - - from langchain_aws import BedrockLLM - - model = BedrockLLM() - model.invoke("Come up with 10 names for a song about parrots") - """ - - @property - def _llm_type(self) -> str: - """Return type of LLM.""" - return "aws-llm" - - def _generate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> LLMResult: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports async generation. Otherwise - # delete method. - async def _agenerate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> LLMResult: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports streaming. Otherwise delete method. - def _stream( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> Iterator[GenerationChunk]: - raise NotImplementedError - - # TODO: Implement if __model_name__ supports async streaming. Otherwise delete - # method. - async def _astream( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> AsyncIterator[GenerationChunk]: - raise NotImplementedError diff --git a/libs/aws/langchain_aws/vectorstores.py b/libs/aws/langchain_aws/vectorstores.py deleted file mode 100644 index ff72ab83..00000000 --- a/libs/aws/langchain_aws/vectorstores.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Bedrock vector stores.""" -from __future__ import annotations - -import asyncio -from functools import partial -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Iterable, - List, - Optional, - Tuple, - Type, - TypeVar, -) - -from langchain_core.embeddings import Embeddings -from langchain_core.vectorstores import VectorStore - -if TYPE_CHECKING: - from langchain_core.documents import Document - -VST = TypeVar("VST", bound=VectorStore) - - -class BedrockVectorStore(VectorStore): - """Bedrock vector store. - - Example: - .. code-block:: python - - from langchain_aws.vectorstores import BedrockVectorStore - - vectorstore = BedrockVectorStore() - """ - - def add_texts( - self, - texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> List[str]: - raise NotImplementedError - - async def aadd_texts( - self, - texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> List[str]: - return await asyncio.get_running_loop().run_in_executor( - None, partial(self.add_texts, **kwargs), texts, metadatas - ) - - def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: - raise NotImplementedError - - async def adelete( - self, ids: Optional[List[str]] = None, **kwargs: Any - ) -> Optional[bool]: - raise NotImplementedError - - def similarity_search( - self, query: str, k: int = 4, **kwargs: Any - ) -> List[Document]: - raise NotImplementedError - - async def asimilarity_search( - self, query: str, k: int = 4, **kwargs: Any - ) -> List[Document]: - # This is a temporary workaround to make the similarity search - # asynchronous. The proper solution is to make the similarity search - # asynchronous in the vector store implementations. - func = partial(self.similarity_search, query, k=k, **kwargs) - return await asyncio.get_event_loop().run_in_executor(None, func) - - def similarity_search_with_score( - self, *args: Any, **kwargs: Any - ) -> List[Tuple[Document, float]]: - raise NotImplementedError - - async def asimilarity_search_with_score( - self, *args: Any, **kwargs: Any - ) -> List[Tuple[Document, float]]: - # This is a temporary workaround to make the similarity search - # asynchronous. The proper solution is to make the similarity search - # asynchronous in the vector store implementations. - func = partial(self.similarity_search_with_score, *args, **kwargs) - return await asyncio.get_event_loop().run_in_executor(None, func) - - def similarity_search_by_vector( - self, embedding: List[float], k: int = 4, **kwargs: Any - ) -> List[Document]: - raise NotImplementedError - - async def asimilarity_search_by_vector( - self, embedding: List[float], k: int = 4, **kwargs: Any - ) -> List[Document]: - # This is a temporary workaround to make the similarity search - # asynchronous. The proper solution is to make the similarity search - # asynchronous in the vector store implementations. - func = partial(self.similarity_search_by_vector, embedding, k=k, **kwargs) - return await asyncio.get_event_loop().run_in_executor(None, func) - - def max_marginal_relevance_search( - self, - query: str, - k: int = 4, - fetch_k: int = 20, - lambda_mult: float = 0.5, - **kwargs: Any, - ) -> List[Document]: - raise NotImplementedError - - async def amax_marginal_relevance_search( - self, - query: str, - k: int = 4, - fetch_k: int = 20, - lambda_mult: float = 0.5, - **kwargs: Any, - ) -> List[Document]: - # This is a temporary workaround to make the similarity search - # asynchronous. The proper solution is to make the similarity search - # asynchronous in the vector store implementations. - func = partial( - self.max_marginal_relevance_search, - query, - k=k, - fetch_k=fetch_k, - lambda_mult=lambda_mult, - **kwargs, - ) - return await asyncio.get_event_loop().run_in_executor(None, func) - - def max_marginal_relevance_search_by_vector( - self, - embedding: List[float], - k: int = 4, - fetch_k: int = 20, - lambda_mult: float = 0.5, - **kwargs: Any, - ) -> List[Document]: - raise NotImplementedError - - async def amax_marginal_relevance_search_by_vector( - self, - embedding: List[float], - k: int = 4, - fetch_k: int = 20, - lambda_mult: float = 0.5, - **kwargs: Any, - ) -> List[Document]: - raise NotImplementedError - - @classmethod - def from_texts( - cls: Type[VST], - texts: List[str], - embedding: Embeddings, - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> VST: - raise NotImplementedError - - @classmethod - async def afrom_texts( - cls: Type[VST], - texts: List[str], - embedding: Embeddings, - metadatas: Optional[List[dict]] = None, - **kwargs: Any, - ) -> VST: - return await asyncio.get_running_loop().run_in_executor( - None, partial(cls.from_texts, **kwargs), texts, embedding, metadatas - ) - - def _select_relevance_score_fn(self) -> Callable[[float], float]: - raise NotImplementedError