date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~dashvector.py
from __future__ import annotations import logging import uuid from typing import ( Any, Iterable, List, Optional, Tuple, ) import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_env from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class DashVector(VectorStore): """`DashVector` vector store. To use, you should have the ``dashvector`` python package installed. Example: .. code-block:: python from langchain.vectorstores import DashVector from langchain.embeddings.openai import OpenAIEmbeddings import dashvector client = dashvector.Client(api_key="***") client.create("langchain", dimension=1024) collection = client.get("langchain") embeddings = OpenAIEmbeddings() vectorstore = DashVector(collection, embeddings.embed_query, "text") """ def __init__( self, collection: Any, embedding: Embeddings, text_field: str, ): """Initialize with DashVector collection.""" try: import dashvector except ImportError: raise ValueError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) if not isinstance(collection, dashvector.Collection): raise ValueError( f"collection should be an instance of dashvector.Collection, " f"bug got {type(collection)}" ) self._collection = collection self._embedding = embedding self._text_field = text_field def _similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query vector, along with scores""" # query by vector ret = self._collection.query(embedding, topk=k, filter=filter) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) docs = [] for doc in ret: metadata = doc.fields text = metadata.pop(self._text_field) score = doc.score docs.append((Document(page_content=text, metadata=metadata), score)) return docs def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 25, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ ids = ids or [str(uuid.uuid4().hex) for _ in texts] text_list = list(texts) for i in range(0, len(text_list), batch_size): # batch end end = min(i + batch_size, len(text_list)) batch_texts = text_list[i:end] batch_ids = ids[i:end] batch_embeddings = self._embedding.embed_documents(list(batch_texts)) # batch metadatas if metadatas: batch_metadatas = metadatas[i:end] else: batch_metadatas = [{} for _ in range(i, end)] for metadata, text in zip(batch_metadatas, batch_texts): metadata[self._text_field] = text # batch upsert to collection docs = list(zip(batch_ids, batch_embeddings, batch_metadatas)) ret = self._collection.upsert(docs) if not ret: raise ValueError( f"Fail to upsert docs to dashvector vector database," f"Error: {ret.message}" ) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool: """Delete by vector ID. Args: ids: List of ids to delete. Returns: True if deletion is successful, False otherwise. """ return bool(self._collection.delete(ids)) def similarity_search( self, query: str, k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to search documents similar to. k: Number of documents to return. Default to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_relevance_scores(query, k, filter) return [doc for doc, _ in docs_and_scores] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query text , alone with relevance scores. Less is more similar, more is more dissimilar. Args: query: input text k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Tuples of (doc, similarity_score) """ embedding = self._embedding.embed_query(query) return self._similarity_search_with_score_by_vector( embedding, k=k, filter=filter ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self._similarity_search_with_score_by_vector( embedding, k, filter ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ # query by vector ret = self._collection.query( embedding, topk=fetch_k, filter=filter, include_vector=True ) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) candidate_embeddings = [doc.vector for doc in ret] mmr_selected = maximal_marginal_relevance( np.array(embedding), candidate_embeddings, lambda_mult, k ) metadatas = [ret.output[i].fields for i in mmr_selected] return [ Document(page_content=metadata.pop(self._text_field), metadata=metadata) for metadata in metadatas ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, dashvector_api_key: Optional[str] = None, collection_name: str = "langchain", text_field: str = "text", batch_size: int = 25, ids: Optional[List[str]] = None, **kwargs: Any, ) -> DashVector: """Return DashVector VectorStore initialized from texts and embeddings. This is the quick way to get started with dashvector vector store. Example: .. code-block:: python from langchain.vectorstores import DashVector from langchain.embeddings import OpenAIEmbeddings import dashvector embeddings = OpenAIEmbeddings() dashvector = DashVector.from_documents( docs, embeddings, dashvector_api_key="{DASHVECTOR_API_KEY}" ) """ try: import dashvector except ImportError: raise ValueError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) dashvector_api_key = dashvector_api_key or get_from_env( "dashvector_api_key", "DASHVECTOR_API_KEY" ) dashvector_client = dashvector.Client(api_key=dashvector_api_key) dashvector_client.delete(collection_name) collection = dashvector_client.get(collection_name) if not collection: dim = len(embedding.embed_query(texts[0])) # create collection if not existed resp = dashvector_client.create(collection_name, dimension=dim) if resp: collection = dashvector_client.get(collection_name) else: raise ValueError( "Fail to create collection. " f"Error: {resp.message}." ) dashvector_vector_db = cls(collection, embedding, text_field) dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size) return dashvector_vector_db
[]
2024-01-10
axgpt/langchain
libs~core~langchain_core~schema~messages.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union from typing_extensions import Literal from langchain_core.load.serializable import Serializable from langchain_core.pydantic_v1 import Extra, Field if TYPE_CHECKING: from langchain_core.prompts.chat import ChatPromptTemplate def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of HumanMessages. ai_prefix: THe prefix to prepend to contents of AIMessages. Returns: A single string concatenation of all input messages. Example: .. code-block:: python from langchain_core.schema import AIMessage, HumanMessage messages = [ HumanMessage(content="Hi, how are you?"), AIMessage(content="Good, how are you?"), ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" """ string_messages = [] for m in messages: if isinstance(m, HumanMessage): role = human_prefix elif isinstance(m, AIMessage): role = ai_prefix elif isinstance(m, SystemMessage): role = "System" elif isinstance(m, FunctionMessage): role = "Function" elif isinstance(m, ChatMessage): role = m.role else: raise ValueError(f"Got unsupported message type: {m}") message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" string_messages.append(message) return "\n".join(string_messages) class BaseMessage(Serializable): """The base abstract Message class. Messages are the inputs and outputs of ChatModels. """ content: Union[str, List[Union[str, Dict]]] """The string contents of the message.""" additional_kwargs: dict = Field(default_factory=dict) """Any additional information.""" type: str class Config: extra = Extra.allow @classmethod def is_lc_serializable(cls) -> bool: """Return whether this class is serializable.""" return True def __add__(self, other: Any) -> ChatPromptTemplate: from langchain_core.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other def merge_content( first_content: Union[str, List[Union[str, Dict]]], second_content: Union[str, List[Union[str, Dict]]], ) -> Union[str, List[Union[str, Dict]]]: # If first chunk is a string if isinstance(first_content, str): # If the second chunk is also a string, then merge them naively if isinstance(second_content, str): return first_content + second_content # If the second chunk is a list, add the first chunk to the start of the list else: return_list: List[Union[str, Dict]] = [first_content] return return_list + second_content # If both are lists, merge them naively elif isinstance(second_content, List): return first_content + second_content # If the first content is a list, and the second content is a string else: # If the last element of the first content is a string # Add the second content to the last element if isinstance(first_content[-1], str): return first_content[:-1] + [first_content[-1] + second_content] else: # Otherwise, add the second content as a new element of the list return first_content + [second_content] class BaseMessageChunk(BaseMessage): """A Message chunk, which can be concatenated with other Message chunks.""" def _merge_kwargs_dict( self, left: Dict[str, Any], right: Dict[str, Any] ) -> Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif type(merged[k]) != type(v): raise ValueError( f'additional_kwargs["{k}"] already exists in this message,' " but with a different type." ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) else: raise ValueError( f"Additional kwargs key {k} already exists in this message." ) return merged def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, BaseMessageChunk): # If both are (subclasses of) BaseMessageChunk, # concat into a single BaseMessageChunk if isinstance(self, ChatMessageChunk): return self.__class__( role=self.role, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return self.__class__( content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) else: raise TypeError( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) class HumanMessage(BaseMessage): """A Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ type: Literal["human"] = "human" HumanMessage.update_forward_refs() class HumanMessageChunk(HumanMessage, BaseMessageChunk): """A Human Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501 class AIMessage(BaseMessage): """A Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ type: Literal["ai"] = "ai" AIMessage.update_forward_refs() class AIMessageChunk(AIMessage, BaseMessageChunk): """A Message chunk from an AI.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501 def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, AIMessageChunk): if self.example != other.example: raise ValueError( "Cannot concatenate AIMessageChunks with different example values." ) return self.__class__( example=self.example, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) class SystemMessage(BaseMessage): """A Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ type: Literal["system"] = "system" SystemMessage.update_forward_refs() class SystemMessageChunk(SystemMessage, BaseMessageChunk): """A System Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501 class FunctionMessage(BaseMessage): """A Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" type: Literal["function"] = "function" FunctionMessage.update_forward_refs() class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): """A Function Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment] def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, FunctionMessageChunk): if self.name != other.name: raise ValueError( "Cannot concatenate FunctionMessageChunks with different names." ) return self.__class__( name=self.name, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) class ToolMessage(BaseMessage): """A Message for passing the result of executing a tool back to a model.""" tool_call_id: str """Tool call that this message is responding to.""" type: Literal["tool"] = "tool" ToolMessage.update_forward_refs() class ToolMessageChunk(ToolMessage, BaseMessageChunk): """A Tool Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment] def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ToolMessageChunk): if self.tool_call_id != other.tool_call_id: raise ValueError( "Cannot concatenate ToolMessageChunks with different names." ) return self.__class__( tool_call_id=self.tool_call_id, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) class ChatMessage(BaseMessage): """A Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" type: Literal["chat"] = "chat" ChatMessage.update_forward_refs() class ChatMessageChunk(ChatMessage, BaseMessageChunk): """A Chat Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ChatMessageChunk): if self.role != other.role: raise ValueError( "Cannot concatenate ChatMessageChunks with different roles." ) return self.__class__( role=self.role, content=merge_content(self.content, other.content), additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) return super().__add__(other) AnyMessage = Union[ AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage ] def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [_message_to_dict(m) for m in messages] def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) elif _type == "tool": return ToolMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: messages: Sequence of messages (as dicts) to convert. Returns: List of messages (BaseMessages). """ return [_message_from_dict(m) for m in messages]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~bm25.py
from __future__ import annotations from typing import Any, Callable, Dict, Iterable, List, Optional from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun def default_preprocessing_func(text: str) -> List[str]: return text.split() class BM25Retriever(BaseRetriever): """`BM25` retriever without Elasticsearch.""" vectorizer: Any """ BM25 vectorizer.""" docs: List[Document] """ List of documents.""" k: int = 4 """ Number of documents to return.""" preprocess_func: Callable[[str], List[str]] = default_preprocessing_func """ Preprocessing function to use on the text before BM25 vectorization.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, bm25_params: Optional[Dict[str, Any]] = None, preprocess_func: Callable[[str], List[str]] = default_preprocessing_func, **kwargs: Any, ) -> BM25Retriever: """ Create a BM25Retriever from a list of texts. Args: texts: A list of texts to vectorize. metadatas: A list of metadata dicts to associate with each text. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """ try: from rank_bm25 import BM25Okapi except ImportError: raise ImportError( "Could not import rank_bm25, please install with `pip install " "rank_bm25`." ) texts_processed = [preprocess_func(t) for t in texts] bm25_params = bm25_params or {} vectorizer = BM25Okapi(texts_processed, **bm25_params) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls( vectorizer=vectorizer, docs=docs, preprocess_func=preprocess_func, **kwargs ) @classmethod def from_documents( cls, documents: Iterable[Document], *, bm25_params: Optional[Dict[str, Any]] = None, preprocess_func: Callable[[str], List[str]] = default_preprocessing_func, **kwargs: Any, ) -> BM25Retriever: """ Create a BM25Retriever from a list of Documents. Args: documents: A list of Documents to vectorize. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """ texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, bm25_params=bm25_params, metadatas=metadatas, preprocess_func=preprocess_func, **kwargs, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: processed_query = self.preprocess_func(query) return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=self.k) return return_docs
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~test_dependencies.py
"""A unit test meant to catch accidental introduction of non-optional dependencies.""" from pathlib import Path from typing import Any, Dict, Mapping import pytest import toml HERE = Path(__file__).parent PYPROJECT_TOML = HERE / "../../pyproject.toml" @pytest.fixture() def poetry_conf() -> Dict[str, Any]: """Load the pyproject.toml file.""" with open(PYPROJECT_TOML) as f: return toml.load(f)["tool"]["poetry"] def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None: """A test that checks if a new non-optional dependency is being introduced. If this test is triggered, it means that a contributor is trying to introduce a new required dependency. This should be avoided in most situations. """ # Get the dependencies from the [tool.poetry.dependencies] section dependencies = poetry_conf["dependencies"] is_required = { package_name: isinstance(requirements, str) or not requirements.get("optional", False) for package_name, requirements in dependencies.items() } required_dependencies = [ package_name for package_name, required in is_required.items() if required ] assert sorted(required_dependencies) == [ "PyYAML", "SQLAlchemy", "aiohttp", "anyio", "async-timeout", "dataclasses-json", "jsonpatch", "langsmith", "numpy", "pydantic", "python", "requests", "tenacity", ] unrequired_dependencies = [ package_name for package_name, required in is_required.items() if not required ] in_extras = [dep for group in poetry_conf["extras"].values() for dep in group] assert set(unrequired_dependencies) == set(in_extras) def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: """Check if someone is attempting to add additional test dependencies. Only dependencies associated with test running infrastructure should be added to the test group; e.g., pytest, pytest-cov etc. Examples of dependencies that should NOT be included: boto3, azure, postgres, etc. """ test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"]) assert test_group_deps == sorted( [ "duckdb-engine", "freezegun", "lark", "pandas", "pytest", "pytest-asyncio", "pytest-cov", "pytest-dotenv", "pytest-mock", "pytest-socket", "pytest-watcher", "responses", "syrupy", "requests-mock", ] ) def test_imports() -> None: """Test that you can import all top level things okay.""" from langchain_core.schema import BasePromptTemplate # noqa: F401 from langchain.agents import OpenAIFunctionsAgent # noqa: F401 from langchain.callbacks import OpenAICallbackHandler # noqa: F401 from langchain.chains import LLMChain # noqa: F401 from langchain.chat_models import ChatOpenAI # noqa: F401 from langchain.document_loaders import BSHTMLLoader # noqa: F401 from langchain.embeddings import OpenAIEmbeddings # noqa: F401 from langchain.llms import OpenAI # noqa: F401 from langchain.retrievers import VespaRetriever # noqa: F401 from langchain.tools import DuckDuckGoSearchResults # noqa: F401 from langchain.utilities import SerpAPIWrapper # noqa: F401 from langchain.vectorstores import FAISS # noqa: F401
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~metal.py
from typing import Any, List, Optional from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun class MetalRetriever(BaseRetriever): """`Metal API` retriever.""" client: Any """The Metal client to use.""" params: Optional[dict] = None """The parameters to pass to the Metal client.""" @root_validator(pre=True) def validate_client(cls, values: dict) -> dict: """Validate that the client is of the correct type.""" from metal_sdk.metal import Metal if "client" in values: client = values["client"] if not isinstance(client, Metal): raise ValueError( "Got unexpected client, should be of type metal_sdk.metal.Metal. " f"Instead, got {type(client)}" ) values["params"] = values.get("params", {}) return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: results = self.client.search({"text": query}, **self.params) final_results = [] for r in results["data"]: metadata = {k: v for k, v in r.items() if k != "text"} final_results.append(Document(page_content=r["text"], metadata=metadata)) return final_results
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~titan_takeoff.py
from typing import Any, Iterator, List, Mapping, Optional import requests from langchain_core.schema.output import GenerationChunk from requests.exceptions import ConnectionError from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class TitanTakeoff(LLM): """Wrapper around Titan Takeoff APIs.""" base_url: str = "http://localhost:8000" """Specifies the baseURL to use for the Titan Takeoff API. Default = http://localhost:8000. """ generate_max_length: int = 128 """Maximum generation length. Default = 128.""" sampling_topk: int = 1 """Sample predictions from the top K most probable candidates. Default = 1.""" sampling_topp: float = 1.0 """Sample from predictions whose cumulative probability exceeds this value. Default = 1.0. """ sampling_temperature: float = 1.0 """Sample with randomness. Bigger temperatures are associated with more randomness and 'creativity'. Default = 1.0. """ repetition_penalty: float = 1.0 """Penalise the generation of tokens that have been generated before. Set to > 1 to penalize. Default = 1 (no penalty). """ no_repeat_ngram_size: int = 0 """Prevent repetitions of ngrams of this size. Default = 0 (turned off).""" streaming: bool = False """Whether to stream the output. Default = False.""" @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Titan Takeoff Server.""" params = { "generate_max_length": self.generate_max_length, "sampling_topk": self.sampling_topk, "sampling_topp": self.sampling_topp, "sampling_temperature": self.sampling_temperature, "repetition_penalty": self.repetition_penalty, "no_repeat_ngram_size": self.no_repeat_ngram_size, } return params @property def _llm_type(self) -> str: """Return type of llm.""" return "titan_takeoff" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Titan Takeoff generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ try: if self.streaming: text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, ): text_output += chunk.text return text_output url = f"{self.base_url}/generate" params = {"text": prompt, **self._default_params} response = requests.post(url, json=params) response.raise_for_status() response.encoding = "utf-8" text = "" if "message" in response.json(): text = response.json()["message"] else: raise ValueError("Something went wrong.") if stop is not None: text = enforce_stop_tokens(text, stop) return text except ConnectionError: raise ConnectionError( "Could not connect to Titan Takeoff server. \ Please make sure that the server is running." ) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Call out to Titan Takeoff stream endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Yields: A dictionary like object containing a string token. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ url = f"{self.base_url}/generate_stream" params = {"text": prompt, **self._default_params} response = requests.post(url, json=params, stream=True) response.encoding = "utf-8" for text in response.iter_content(chunk_size=1, decode_unicode=True): if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"base_url": self.base_url, **{}, **self._default_params}
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~vertexai.py
from __future__ import annotations from concurrent.futures import Executor, ThreadPoolExecutor from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, Iterator, List, Optional, Union, ) from langchain_core.pydantic_v1 import BaseModel, Field, root_validator from langchain_core.schema import ( Generation, LLMResult, ) from langchain_core.schema.output import GenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM, create_base_retry_decorator from langchain.utilities.vertexai import ( get_client_info, init_vertexai, raise_vertex_import_error, ) if TYPE_CHECKING: from google.cloud.aiplatform.gapic import ( PredictionServiceAsyncClient, PredictionServiceClient, ) from vertexai.language_models._language_models import ( TextGenerationResponse, _LanguageModel, ) def _response_to_generation( response: TextGenerationResponse, ) -> GenerationChunk: """Convert a stream response to a generation chunk.""" try: generation_info = { "is_blocked": response.is_blocked, "safety_attributes": response.safety_attributes, } except Exception: generation_info = None return GenerationChunk(text=response.text, generation_info=generation_info) def is_codey_model(model_name: str) -> bool: """Returns True if the model name is a Codey model. Args: model_name: The model name to check. Returns: True if the model name is a Codey model. """ return "code" in model_name def _create_retry_decorator( llm: VertexAI, *, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: import google.api_core errors = [ google.api_core.exceptions.ResourceExhausted, google.api_core.exceptions.ServiceUnavailable, google.api_core.exceptions.Aborted, google.api_core.exceptions.DeadlineExceeded, ] decorator = create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager ) return decorator def completion_with_retry( llm: VertexAI, *args: Any, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(*args: Any, **kwargs: Any) -> Any: return llm.client.predict(*args, **kwargs) return _completion_with_retry(*args, **kwargs) def stream_completion_with_retry( llm: VertexAI, *args: Any, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(*args: Any, **kwargs: Any) -> Any: return llm.client.predict_streaming(*args, **kwargs) return _completion_with_retry(*args, **kwargs) async def acompletion_with_retry( llm: VertexAI, *args: Any, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _acompletion_with_retry(*args: Any, **kwargs: Any) -> Any: return await llm.client.predict_async(*args, **kwargs) return await _acompletion_with_retry(*args, **kwargs) class _VertexAIBase(BaseModel): project: Optional[str] = None "The default GCP project to use when making Vertex API calls." location: str = "us-central1" "The default location to use when making API calls." request_parallelism: int = 5 "The amount of parallelism allowed for requests issued to VertexAI models. " "Default is 5." max_retries: int = 6 """The maximum number of retries to make when generating.""" task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True) stop: Optional[List[str]] = None "Optional list of stop words to use when generating." model_name: Optional[str] = None "Underlying model name." @classmethod def _get_task_executor(cls, request_parallelism: int = 5) -> Executor: if cls.task_executor is None: cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism) return cls.task_executor class _VertexAICommon(_VertexAIBase): client: "_LanguageModel" = None #: :meta private: model_name: str "Underlying model name." temperature: float = 0.0 "Sampling temperature, it controls the degree of randomness in token selection." max_output_tokens: int = 128 "Token limit determines the maximum amount of text output from one prompt." top_p: float = 0.95 "Tokens are selected from most probable to least until the sum of their " "probabilities equals the top-p value. Top-p is ignored for Codey models." top_k: int = 40 "How the model selects tokens for output, the next token is selected from " "among the top-k most probable tokens. Top-k is ignored for Codey models." credentials: Any = Field(default=None, exclude=True) "The default custom credentials (google.auth.credentials.Credentials) to use " "when making API calls. If not provided, credentials will be ascertained from " "the environment." n: int = 1 """How many completions to generate for each prompt.""" streaming: bool = False """Whether to stream the results or not.""" @property def _llm_type(self) -> str: return "vertexai" @property def is_codey_model(self) -> bool: return is_codey_model(self.model_name) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _default_params(self) -> Dict[str, Any]: if self.is_codey_model: return { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, } else: return { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, "top_k": self.top_k, "top_p": self.top_p, "candidate_count": self.n, } @classmethod def _try_init_vertexai(cls, values: Dict) -> None: allowed_params = ["project", "location", "credentials"] params = {k: v for k, v in values.items() if k in allowed_params} init_vertexai(**params) return None def _prepare_params( self, stop: Optional[List[str]] = None, stream: bool = False, **kwargs: Any, ) -> dict: stop_sequences = stop or self.stop params_mapping = {"n": "candidate_count"} params = {params_mapping.get(k, k): v for k, v in kwargs.items()} params = {**self._default_params, "stop_sequences": stop_sequences, **params} if stream or self.streaming: params.pop("candidate_count") return params class VertexAI(_VertexAICommon, BaseLLM): """Google Vertex AI large language models.""" model_name: str = "text-bison" "The name of the Vertex AI large language model." tuned_model_name: Optional[str] = None "The name of a tuned model. If provided, model_name is ignored." @classmethod def is_lc_serializable(self) -> bool: return True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) tuned_model_name = values.get("tuned_model_name") model_name = values["model_name"] try: if not is_codey_model(model_name): from vertexai.preview.language_models import TextGenerationModel if tuned_model_name: values["client"] = TextGenerationModel.get_tuned_model( tuned_model_name ) else: values["client"] = TextGenerationModel.from_pretrained(model_name) else: from vertexai.preview.language_models import CodeGenerationModel if tuned_model_name: values["client"] = CodeGenerationModel.get_tuned_model( tuned_model_name ) else: values["client"] = CodeGenerationModel.from_pretrained(model_name) except ImportError: raise_vertex_import_error() if values["streaming"] and values["n"] > 1: raise ValueError("Only one candidate can be generated with streaming!") return values def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ try: result = self.client.count_tokens([text]) except AttributeError: raise NotImplementedError( "Your google-cloud-aiplatform version didn't implement count_tokens." "Please, install it with pip install google-cloud-aiplatform>=1.35.0" ) return result.total_tokens def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming params = self._prepare_params(stop=stop, stream=should_stream, **kwargs) generations = [] for prompt in prompts: if should_stream: generation = GenerationChunk(text="") for chunk in self._stream( prompt, stop=stop, run_manager=run_manager, **kwargs ): generation += chunk generations.append([generation]) else: res = completion_with_retry( self, prompt, run_manager=run_manager, **params ) if self.is_codey_model: generations.append([_response_to_generation(res)]) else: generations.append( [_response_to_generation(r) for r in res.candidates] ) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: params = self._prepare_params(stop=stop, **kwargs) generations = [] for prompt in prompts: res = await acompletion_with_retry( self, prompt, run_manager=run_manager, **params ) generations.append([_response_to_generation(r) for r in res.candidates]) return LLMResult(generations=generations) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) for stream_resp in stream_completion_with_retry( self, prompt, run_manager=run_manager, **params ): chunk = _response_to_generation(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) class VertexAIModelGarden(_VertexAIBase, BaseLLM): """Large language models served from Vertex AI Model Garden.""" client: "PredictionServiceClient" = None #: :meta private: async_client: "PredictionServiceAsyncClient" = None #: :meta private: endpoint_id: str "A name of an endpoint where the model has been deployed." allowed_model_args: Optional[List[str]] = None """Allowed optional args to be passed to the model.""" prompt_arg: str = "prompt" result_arg: str = "generated_text" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: from google.api_core.client_options import ClientOptions from google.cloud.aiplatform.gapic import ( PredictionServiceAsyncClient, PredictionServiceClient, ) except ImportError: raise_vertex_import_error() if values["project"] is None: raise ValueError( "A GCP project should be provided to run inference on Model Garden!" ) client_options = ClientOptions( api_endpoint=f"{values['location']}-aiplatform.googleapis.com" ) client_info = get_client_info(module="vertex-ai-model-garden") values["client"] = PredictionServiceClient( client_options=client_options, client_info=client_info ) values["async_client"] = PredictionServiceAsyncClient( client_options=client_options, client_info=client_info ) return values @property def _llm_type(self) -> str: return "vertexai_model_garden" def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" try: from google.protobuf import json_format from google.protobuf.struct_pb2 import Value except ImportError: raise ImportError( "protobuf package not found, please install it with" " `pip install protobuf`" ) instances = [] for prompt in prompts: if self.allowed_model_args: instance = { k: v for k, v in kwargs.items() if k in self.allowed_model_args } else: instance = {} instance[self.prompt_arg] = prompt instances.append(instance) predict_instances = [ json_format.ParseDict(instance_dict, Value()) for instance_dict in instances ] endpoint = self.client.endpoint_path( project=self.project, location=self.location, endpoint=self.endpoint_id ) response = self.client.predict(endpoint=endpoint, instances=predict_instances) generations: List[List[Generation]] = [] for result in response.predictions: generations.append( [Generation(text=prediction[self.result_arg]) for prediction in result] ) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" try: from google.protobuf import json_format from google.protobuf.struct_pb2 import Value except ImportError: raise ImportError( "protobuf package not found, please install it with" " `pip install protobuf`" ) instances = [] for prompt in prompts: if self.allowed_model_args: instance = { k: v for k, v in kwargs.items() if k in self.allowed_model_args } else: instance = {} instance[self.prompt_arg] = prompt instances.append(instance) predict_instances = [ json_format.ParseDict(instance_dict, Value()) for instance_dict in instances ] endpoint = self.async_client.endpoint_path( project=self.project, location=self.location, endpoint=self.endpoint_id ) response = await self.async_client.predict( endpoint=endpoint, instances=predict_instances ) generations: List[List[Generation]] = [] for result in response.predictions: generations.append( [Generation(text=prediction[self.result_arg]) for prediction in result] ) return LLMResult(generations=generations)
[ "prompt" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~opaqueprompts.py
import logging from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import Extra, root_validator from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class OpaquePrompts(LLM): """An LLM wrapper that uses OpaquePrompts to sanitize prompts. Wraps another LLM and sanitizes prompts before passing it to the LLM, then de-sanitizes the response. To use, you should have the ``opaqueprompts`` python package installed, and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import OpaquePrompts from langchain.chat_models import ChatOpenAI op_llm = OpaquePrompts(base_llm=ChatOpenAI()) """ base_llm: BaseLanguageModel """The base LLM to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validates that the OpaquePrompts API key and the Python package exist.""" try: import opaqueprompts as op except ImportError: raise ImportError( "Could not import the `opaqueprompts` Python package, " "please install it with `pip install opaqueprompts`." ) if op.__package__ is None: raise ValueError( "Could not properly import `opaqueprompts`, " "opaqueprompts.__package__ is None." ) api_key = get_from_dict_or_env( values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default="" ) if not api_key: raise ValueError( "Could not find OPAQUEPROMPTS_API_KEY in the environment. " "Please set it to your OpaquePrompts API key." "You can get it by creating an account on the OpaquePrompts website: " "https://opaqueprompts.opaque.co/ ." ) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call base LLM with sanitization before and de-sanitization after. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = op_llm("Tell me a joke.") """ import opaqueprompts as op _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() # sanitize the prompt by replacing the sensitive information with a placeholder sanitize_response: op.SanitizeResponse = op.sanitize([prompt]) sanitized_prompt_value_str = sanitize_response.sanitized_texts[0] # TODO: Add in callbacks once child runs for LLMs are supported by LangSmith. # call the LLM with the sanitized prompt and get the response llm_response = self.base_llm.predict( sanitized_prompt_value_str, stop=stop, ) # desanitize the response by restoring the original sensitive information desanitize_response: op.DesanitizeResponse = op.desanitize( llm_response, secure_context=sanitize_response.secure_context, ) return desanitize_response.desanitized_text @property def _llm_type(self) -> str: """Return type of LLM. This is an override of the base class method. """ return "opaqueprompts"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~marqo.py
from __future__ import annotations import json import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union, ) from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document if TYPE_CHECKING: import marqo class Marqo(VectorStore): """`Marqo` vector store. Marqo indexes have their own models associated with them to generate your embeddings. This means that you can selected from a range of different models and also use CLIP models to create multimodal indexes with images and text together. Marqo also supports more advanced queries with multiple weighted terms, see See https://docs.marqo.ai/latest/#searching-using-weights-in-queries. This class can flexibly take strings or dictionaries for weighted queries in its similarity search methods. To use, you should have the `marqo` python package installed, you can do this with `pip install marqo`. Example: .. code-block:: python import marqo from langchain.vectorstores import Marqo client = marqo.Client(url=os.environ["MARQO_URL"], ...) vectorstore = Marqo(client, index_name) """ def __init__( self, client: marqo.Client, index_name: str, add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, Any]], str]] = None, ): """Initialize with Marqo client.""" try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not isinstance(client, marqo.Client): raise ValueError( f"client should be an instance of marqo.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._add_documents_settings = ( {} if add_documents_settings is None else add_documents_settings ) self._searchable_attributes = searchable_attributes self.page_content_builder = page_content_builder self.tensor_fields = ["text"] self._document_batch_size = 1024 @property def embeddings(self) -> Optional[Embeddings]: return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added. """ if self._client.index(self._index_name).get_settings()["index_defaults"][ "treat_urls_and_pointers_as_images" ]: raise ValueError( "Marqo.add_texts is disabled for multimodal indexes. To add documents " "with a multimodal index use the Python client for Marqo directly." ) documents: List[Dict[str, str]] = [] num_docs = 0 for i, text in enumerate(texts): doc = { "text": text, "metadata": json.dumps(metadatas[i]) if metadatas else json.dumps({}), } documents.append(doc) num_docs += 1 ids = [] for i in range(0, num_docs, self._document_batch_size): response = self._client.index(self._index_name).add_documents( documents[i : i + self._document_batch_size], tensor_fields=self.tensor_fields, **self._add_documents_settings, ) if response["errors"]: err_msg = ( f"Error in upload for documents in index range [{i}," f"{i + self._document_batch_size}], " f"check Marqo logs." ) raise RuntimeError(err_msg) ids += [item["_id"] for item in response["items"]] return ids def similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, **kwargs: Any, ) -> List[Document]: """Search the marqo index for the most similar documents. Args: query (Union[str, Dict[str, float]]): The query for the search, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Document]: k documents ordered from best to worst match. """ results = self.marqo_similarity_search(query=query, k=k) documents = self._construct_documents_from_results_without_score(results) return documents def similarity_search_with_score( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> List[Tuple[Document, float]]: """Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score. """ results = self.marqo_similarity_search(query=query, k=k) scored_documents = self._construct_documents_from_results_with_score(results) return scored_documents def bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Document]]: """Search the marqo index for the most similar documents in bulk with multiple queries. Args: queries (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: List[List[Document]]: A list of results for each query. """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Document]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_without_score(results) bulk_documents.append(documents) return bulk_documents def bulk_similarity_search_with_score( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Tuple[Document, float]]]: """Return documents from Marqo that are similar to the query as well as their scores using a batch of queries. Args: query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: A list of lists of the matching documents and their scores for each query """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Tuple[Document, float]]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_with_score(results) bulk_documents.append(documents) return bulk_documents def _construct_documents_from_results_with_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Tuple[Document, Any]]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Tuple[Document, Any]] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append( (Document(page_content=text, metadata=metadata), res["_score"]) ) return documents def _construct_documents_from_results_without_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Document]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Document] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append(Document(page_content=text, metadata=metadata)) return documents def marqo_similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> Dict[str, List[Dict[str, str]]]: """Return documents from Marqo exposing Marqo's output directly Args: query (str): The query to search with. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Dict[str, Any]]: This hits from marqo. """ results = self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) return results def marqo_bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4 ) -> Dict[str, List[Dict[str, List[Dict[str, str]]]]]: """Return documents from Marqo using a bulk search, exposes Marqo's output directly Args: queries (Iterable[Union[str, Dict[str, float]]]): A list of queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results object """ bulk_results = { "result": [ self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) for query in queries ] } return bulk_results @classmethod def from_documents( cls: Type[Marqo], documents: List[Document], embedding: Union[Embeddings, None] = None, **kwargs: Any, ) -> Marqo: """Return VectorStore initialized from documents. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. Args: documents (List[Document]): Input documents embedding (Any, optional): Embeddings (not required). Defaults to None. Returns: VectorStore: A Marqo vectorstore """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, metadatas=metadatas, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Any = None, metadatas: Optional[List[dict]] = None, index_name: str = "", url: str = "http://localhost:8882", api_key: str = "", add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, str]], str]] = None, index_settings: Optional[Dict[str, Any]] = None, verbose: bool = True, **kwargs: Any, ) -> Marqo: """Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store """ try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not index_name: index_name = str(uuid.uuid4()) client = marqo.Client(url=url, api_key=api_key) try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: print(f"Created {index_name} successfully.") except Exception: if verbose: print(f"Index {index_name} exists.") instance: Marqo = cls( client, index_name, searchable_attributes=searchable_attributes, add_documents_settings=add_documents_settings or {}, page_content_builder=page_content_builder, ) instance.add_texts(texts, metadatas) return instance def get_indexes(self) -> List[Dict[str, str]]: """Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes """ return self._client.get_indexes()["results"] def get_number_of_documents(self) -> int: """Helper to see the number of documents in the index Returns: int: The number of documents """ return self._client.index(self._index_name).get_stats()["numberOfDocuments"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~litellm.py
"""Wrapper around LiteLLM's model I/O library.""" from __future__ import annotations import logging from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, Tuple, Type, Union, ) from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema import ( ChatGeneration, ChatResult, ) from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ) from langchain_core.schema.output import ChatGenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import ( BaseChatModel, _agenerate_from_stream, _generate_from_stream, ) from langchain.llms.base import create_base_retry_decorator from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class ChatLiteLLMException(Exception): """Error with the `LiteLLM I/O` library""" def _create_retry_decorator( llm: ChatLiteLLM, run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import litellm errors = [ litellm.Timeout, litellm.APIError, litellm.APIConnectionError, litellm.RateLimitError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager ) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": # Fix for azure # Also OpenAI returns None for tool invocations content = _dict.get("content", "") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=_dict["content"]) elif role == "function": return FunctionMessage(content=_dict["content"], name=_dict["name"]) else: return ChatMessage(content=_dict["content"], role=role) async def acompletion_with_retry( llm: ChatLiteLLM, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_delta_to_message_chunk( _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: role = _dict.get("role") content = _dict.get("content") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content, additional_kwargs=additional_kwargs) elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role == "function" or default_class == FunctionMessageChunk: return FunctionMessageChunk(content=content, name=_dict["name"]) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class ChatLiteLLM(BaseChatModel): """A chat model that uses the LiteLLM API.""" client: Any #: :meta private: model: str = "gpt-3.5-turbo" model_name: Optional[str] = None """Model name to use.""" openai_api_key: Optional[str] = None azure_api_key: Optional[str] = None anthropic_api_key: Optional[str] = None replicate_api_key: Optional[str] = None cohere_api_key: Optional[str] = None openrouter_api_key: Optional[str] = None streaming: bool = False api_base: Optional[str] = None organization: Optional[str] = None custom_llm_provider: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None temperature: Optional[float] = 1 model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" max_tokens: int = 256 max_retries: int = 6 @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name return { "model": set_model_value, "force_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, "custom_llm_provider": self.custom_llm_provider, **self.model_kwargs, } @property def _client_params(self) -> Dict[str, Any]: """Get the parameters used for the openai client.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name self.client.api_base = self.api_base self.client.organization = self.organization creds: Dict[str, Any] = { "model": set_model_value, "force_timeout": self.request_timeout, } return {**self._default_params, **creds} def completion_with_retry( self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(self, run_manager=run_manager) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.completion(**kwargs) return _completion_with_retry(**kwargs) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists, temperature, top_p, and top_k.""" try: import litellm except ImportError: raise ChatLiteLLMException( "Could not import google.generativeai python package. " "Please install it with `pip install google-generativeai`" ) values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY", default="" ) values["azure_api_key"] = get_from_dict_or_env( values, "azure_api_key", "AZURE_API_KEY", default="" ) values["anthropic_api_key"] = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY", default="" ) values["replicate_api_key"] = get_from_dict_or_env( values, "replicate_api_key", "REPLICATE_API_KEY", default="" ) values["openrouter_api_key"] = get_from_dict_or_env( values, "openrouter_api_key", "OPENROUTER_API_KEY", default="" ) values["cohere_api_key"] = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY", default="" ) values["huggingface_api_key"] = get_from_dict_or_env( values, "huggingface_api_key", "HUGGINGFACE_API_KEY", default="" ) values["together_ai_api_key"] = get_from_dict_or_env( values, "together_ai_api_key", "TOGETHERAI_API_KEY", default="" ) values["client"] = litellm if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ) return _generate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = self.completion_with_retry( messages=message_dicts, run_manager=run_manager, **params ) return self._create_chat_result(response) def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration( message=message, generation_info=dict(finish_reason=res.get("finish_reason")), ) generations.append(gen) token_usage = response.get("usage", {}) set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name llm_output = {"token_usage": token_usage, "model": set_model_value} return ChatResult(generations=generations, llm_output=llm_output) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = self._client_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry( messages=message_dicts, run_manager=run_manager, **params ): if len(chunk["choices"]) == 0: continue delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk async for chunk in await acompletion_with_retry( self, messages=message_dicts, run_manager=run_manager, **params ): if len(chunk["choices"]) == 0: continue delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: await run_manager.on_llm_new_token(chunk.content) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._astream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return await _agenerate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = await acompletion_with_retry( self, messages=message_dicts, run_manager=run_manager, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name return { "model": set_model_value, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "n": self.n, } @property def _llm_type(self) -> str: return "litellm-chat"
[ "content" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~arthur_callback.py
"""ArthurAI's Callback Handler.""" from __future__ import annotations import os import uuid from collections import defaultdict from datetime import datetime from time import time from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional import numpy as np from langchain_core.schema import AgentAction, AgentFinish, LLMResult from langchain.callbacks.base import BaseCallbackHandler if TYPE_CHECKING: import arthurai from arthurai.core.models import ArthurModel PROMPT_TOKENS = "prompt_tokens" COMPLETION_TOKENS = "completion_tokens" TOKEN_USAGE = "token_usage" FINISH_REASON = "finish_reason" DURATION = "duration" def _lazy_load_arthur() -> arthurai: """Lazy load Arthur.""" try: import arthurai except ImportError as e: raise ImportError( "To use the ArthurCallbackHandler you need the" " `arthurai` package. Please install it with" " `pip install arthurai`.", e, ) return arthurai class ArthurCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Arthur platform. Arthur helps enterprise teams optimize model operations and performance at scale. The Arthur API tracks model performance, explainability, and fairness across tabular, NLP, and CV models. Our API is model- and platform-agnostic, and continuously scales with complex and dynamic enterprise needs. To learn more about Arthur, visit our website at https://www.arthur.ai/ or read the Arthur docs at https://docs.arthur.ai/ """ def __init__( self, arthur_model: ArthurModel, ) -> None: """Initialize callback handler.""" super().__init__() arthurai = _lazy_load_arthur() Stage = arthurai.common.constants.Stage ValueType = arthurai.common.constants.ValueType self.arthur_model = arthur_model # save the attributes of this model to be used when preparing # inferences to log to Arthur in on_llm_end() self.attr_names = set([a.name for a in self.arthur_model.get_attributes()]) self.input_attr = [ x for x in self.arthur_model.get_attributes() if x.stage == Stage.ModelPipelineInput and x.value_type == ValueType.Unstructured_Text ][0].name self.output_attr = [ x for x in self.arthur_model.get_attributes() if x.stage == Stage.PredictedValue and x.value_type == ValueType.Unstructured_Text ][0].name self.token_likelihood_attr = None if ( len( [ x for x in self.arthur_model.get_attributes() if x.value_type == ValueType.TokenLikelihoods ] ) > 0 ): self.token_likelihood_attr = [ x for x in self.arthur_model.get_attributes() if x.value_type == ValueType.TokenLikelihoods ][0].name self.run_map: DefaultDict[str, Any] = defaultdict(dict) @classmethod def from_credentials( cls, model_id: str, arthur_url: Optional[str] = "https://app.arthur.ai", arthur_login: Optional[str] = None, arthur_password: Optional[str] = None, ) -> ArthurCallbackHandler: """Initialize callback handler from Arthur credentials. Args: model_id (str): The ID of the arthur model to log to. arthur_url (str, optional): The URL of the Arthur instance to log to. Defaults to "https://app.arthur.ai". arthur_login (str, optional): The login to use to connect to Arthur. Defaults to None. arthur_password (str, optional): The password to use to connect to Arthur. Defaults to None. Returns: ArthurCallbackHandler: The initialized callback handler. """ arthurai = _lazy_load_arthur() ArthurAI = arthurai.ArthurAI ResponseClientError = arthurai.common.exceptions.ResponseClientError # connect to Arthur if arthur_login is None: try: arthur_api_key = os.environ["ARTHUR_API_KEY"] except KeyError: raise ValueError( "No Arthur authentication provided. Either give" " a login to the ArthurCallbackHandler" " or set an ARTHUR_API_KEY as an environment variable." ) arthur = ArthurAI(url=arthur_url, access_key=arthur_api_key) else: if arthur_password is None: arthur = ArthurAI(url=arthur_url, login=arthur_login) else: arthur = ArthurAI( url=arthur_url, login=arthur_login, password=arthur_password ) # get model from Arthur by the provided model ID try: arthur_model = arthur.get_model(model_id) except ResponseClientError: raise ValueError( f"Was unable to retrieve model with id {model_id} from Arthur." " Make sure the ID corresponds to a model that is currently" " registered with your Arthur account." ) return cls(arthur_model) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """On LLM start, save the input prompts""" run_id = kwargs["run_id"] self.run_map[run_id]["input_texts"] = prompts self.run_map[run_id]["start_time"] = time() def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """On LLM end, send data to Arthur.""" try: import pytz # type: ignore[import] except ImportError as e: raise ImportError( "Could not import pytz. Please install it with 'pip install pytz'." ) from e run_id = kwargs["run_id"] # get the run params from this run ID, # or raise an error if this run ID has no corresponding metadata in self.run_map try: run_map_data = self.run_map[run_id] except KeyError as e: raise KeyError( "This function has been called with a run_id" " that was never registered in on_llm_start()." " Restart and try running the LLM again" ) from e # mark the duration time between on_llm_start() and on_llm_end() time_from_start_to_end = time() - run_map_data["start_time"] # create inferences to log to Arthur inferences = [] for i, generations in enumerate(response.generations): for generation in generations: inference = { "partner_inference_id": str(uuid.uuid4()), "inference_timestamp": datetime.now(tz=pytz.UTC), self.input_attr: run_map_data["input_texts"][i], self.output_attr: generation.text, } if generation.generation_info is not None: # add finish reason to the inference # if generation info contains a finish reason and # if the ArthurModel was registered to monitor finish_reason if ( FINISH_REASON in generation.generation_info and FINISH_REASON in self.attr_names ): inference[FINISH_REASON] = generation.generation_info[ FINISH_REASON ] # add token likelihoods data to the inference if the ArthurModel # was registered to monitor token likelihoods logprobs_data = generation.generation_info["logprobs"] if ( logprobs_data is not None and self.token_likelihood_attr is not None ): logprobs = logprobs_data["top_logprobs"] likelihoods = [ {k: np.exp(v) for k, v in logprobs[i].items()} for i in range(len(logprobs)) ] inference[self.token_likelihood_attr] = likelihoods # add token usage counts to the inference if the # ArthurModel was registered to monitor token usage if ( isinstance(response.llm_output, dict) and TOKEN_USAGE in response.llm_output ): token_usage = response.llm_output[TOKEN_USAGE] if ( PROMPT_TOKENS in token_usage and PROMPT_TOKENS in self.attr_names ): inference[PROMPT_TOKENS] = token_usage[PROMPT_TOKENS] if ( COMPLETION_TOKENS in token_usage and COMPLETION_TOKENS in self.attr_names ): inference[COMPLETION_TOKENS] = token_usage[COMPLETION_TOKENS] # add inference duration to the inference if the ArthurModel # was registered to monitor inference duration if DURATION in self.attr_names: inference[DURATION] = time_from_start_to_end inferences.append(inference) # send inferences to arthur self.arthur_model.send_inferences(inferences) def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """On chain start, do nothing.""" def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """On chain end, do nothing.""" def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM outputs an error.""" def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """On new token, pass.""" def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM chain outputs an error.""" def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when tool outputs an error.""" def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing"""
[ "prompt_tokens" ]
2024-01-10
axgpt/langchain
libs~core~tests~unit_tests~runnable~test_history.py
from typing import Any, Callable, Sequence, Union from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableConfig, RunnableLambda from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_core.schema import AIMessage, BaseMessage, HumanMessage from tests.unit_tests.fake.memory import ChatMessageHistory def _get_get_session_history() -> Callable[..., ChatMessageHistory]: chat_history_store = {} def get_session_history(session_id: str, **kwargs: Any) -> ChatMessageHistory: if session_id not in chat_history_store: chat_history_store[session_id] = ChatMessageHistory() return chat_history_store[session_id] return get_session_history def test_input_messages() -> None: runnable = RunnableLambda( lambda messages: "you said: " + "\n".join(str(m.content) for m in messages if isinstance(m, HumanMessage)) ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history) config: RunnableConfig = {"configurable": {"session_id": "1"}} output = with_history.invoke([HumanMessage(content="hello")], config) assert output == "you said: hello" output = with_history.invoke([HumanMessage(content="good bye")], config) assert output == "you said: hello\ngood bye" def test_input_dict() -> None: runnable = RunnableLambda( lambda input: "you said: " + "\n".join( str(m.content) for m in input["messages"] if isinstance(m, HumanMessage) ) ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="messages" ) config: RunnableConfig = {"configurable": {"session_id": "2"}} output = with_history.invoke({"messages": [HumanMessage(content="hello")]}, config) assert output == "you said: hello" output = with_history.invoke( {"messages": [HumanMessage(content="good bye")]}, config ) assert output == "you said: hello\ngood bye" def test_input_dict_with_history_key() -> None: runnable = RunnableLambda( lambda input: "you said: " + "\n".join( [str(m.content) for m in input["history"] if isinstance(m, HumanMessage)] + [input["input"]] ) ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) config: RunnableConfig = {"configurable": {"session_id": "3"}} output = with_history.invoke({"input": "hello"}, config) assert output == "you said: hello" output = with_history.invoke({"input": "good bye"}, config) assert output == "you said: hello\ngood bye" def test_output_message() -> None: runnable = RunnableLambda( lambda input: AIMessage( content="you said: " + "\n".join( [ str(m.content) for m in input["history"] if isinstance(m, HumanMessage) ] + [input["input"]] ) ) ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) config: RunnableConfig = {"configurable": {"session_id": "4"}} output = with_history.invoke({"input": "hello"}, config) assert output == AIMessage(content="you said: hello") output = with_history.invoke({"input": "good bye"}, config) assert output == AIMessage(content="you said: hello\ngood bye") def test_output_messages() -> None: runnable = RunnableLambda( lambda input: [ AIMessage( content="you said: " + "\n".join( [ str(m.content) for m in input["history"] if isinstance(m, HumanMessage) ] + [input["input"]] ) ) ] ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) config: RunnableConfig = {"configurable": {"session_id": "5"}} output = with_history.invoke({"input": "hello"}, config) assert output == [AIMessage(content="you said: hello")] output = with_history.invoke({"input": "good bye"}, config) assert output == [AIMessage(content="you said: hello\ngood bye")] def test_output_dict() -> None: runnable = RunnableLambda( lambda input: { "output": [ AIMessage( content="you said: " + "\n".join( [ str(m.content) for m in input["history"] if isinstance(m, HumanMessage) ] + [input["input"]] ) ) ] } ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", output_messages_key="output", ) config: RunnableConfig = {"configurable": {"session_id": "6"}} output = with_history.invoke({"input": "hello"}, config) assert output == {"output": [AIMessage(content="you said: hello")]} output = with_history.invoke({"input": "good bye"}, config) assert output == {"output": [AIMessage(content="you said: hello\ngood bye")]} def test_get_input_schema_input_dict() -> None: class RunnableWithChatHistoryInput(BaseModel): input: Union[str, BaseMessage, Sequence[BaseMessage]] history: Sequence[BaseMessage] runnable = RunnableLambda( lambda input: { "output": [ AIMessage( content="you said: " + "\n".join( [ str(m.content) for m in input["history"] if isinstance(m, HumanMessage) ] + [input["input"]] ) ) ] } ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", output_messages_key="output", ) assert ( with_history.get_input_schema().schema() == RunnableWithChatHistoryInput.schema() ) def test_get_input_schema_input_messages() -> None: class RunnableWithChatHistoryInput(BaseModel): __root__: Sequence[BaseMessage] runnable = RunnableLambda( lambda messages: { "output": [ AIMessage( content="you said: " + "\n".join( [ str(m.content) for m in messages if isinstance(m, HumanMessage) ] ) ) ] } ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, output_messages_key="output" ) assert ( with_history.get_input_schema().schema() == RunnableWithChatHistoryInput.schema() )
[ "good bye", "\n", "you said: hello\ngood bye", "you said: hello", "input", "you said: ", "hello" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~callbacks~fake_callback_handler.py
"""A fake callback handler for testing purposes.""" from itertools import chain from typing import Any, Dict, List, Optional, Union from uuid import UUID from langchain_core.pydantic_v1 import BaseModel from langchain_core.schema.messages import BaseMessage from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler class BaseFakeCallbackHandler(BaseModel): """Base fake callback handler for testing.""" starts: int = 0 ends: int = 0 errors: int = 0 text: int = 0 ignore_llm_: bool = False ignore_chain_: bool = False ignore_agent_: bool = False ignore_retriever_: bool = False ignore_chat_model_: bool = False # to allow for similar callback handlers that are not technicall equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests chain_starts: int = 0 chain_ends: int = 0 llm_starts: int = 0 llm_ends: int = 0 llm_streams: int = 0 tool_starts: int = 0 tool_ends: int = 0 agent_actions: int = 0 agent_ends: int = 0 chat_model_starts: int = 0 retriever_starts: int = 0 retriever_ends: int = 0 retriever_errors: int = 0 retries: int = 0 class BaseFakeCallbackHandlerMixin(BaseFakeCallbackHandler): """Base fake callback handler mixin for testing.""" def on_llm_start_common(self) -> None: self.llm_starts += 1 self.starts += 1 def on_llm_end_common(self) -> None: self.llm_ends += 1 self.ends += 1 def on_llm_error_common(self) -> None: self.errors += 1 def on_llm_new_token_common(self) -> None: self.llm_streams += 1 def on_retry_common(self) -> None: self.retries += 1 def on_chain_start_common(self) -> None: self.chain_starts += 1 self.starts += 1 def on_chain_end_common(self) -> None: self.chain_ends += 1 self.ends += 1 def on_chain_error_common(self) -> None: self.errors += 1 def on_tool_start_common(self) -> None: self.tool_starts += 1 self.starts += 1 def on_tool_end_common(self) -> None: self.tool_ends += 1 self.ends += 1 def on_tool_error_common(self) -> None: self.errors += 1 def on_agent_action_common(self) -> None: self.agent_actions += 1 self.starts += 1 def on_agent_finish_common(self) -> None: self.agent_ends += 1 self.ends += 1 def on_chat_model_start_common(self) -> None: self.chat_model_starts += 1 self.starts += 1 def on_text_common(self) -> None: self.text += 1 def on_retriever_start_common(self) -> None: self.starts += 1 self.retriever_starts += 1 def on_retriever_end_common(self) -> None: self.ends += 1 self.retriever_ends += 1 def on_retriever_error_common(self) -> None: self.errors += 1 self.retriever_errors += 1 class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin): """Fake callback handler for testing.""" @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ @property def ignore_retriever(self) -> bool: """Whether to ignore retriever callbacks.""" return self.ignore_retriever_ def on_llm_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_start_common() def on_llm_new_token( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_new_token_common() def on_llm_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_end_common() def on_llm_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_error_common() def on_retry( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retry_common() def on_chain_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_start_common() def on_chain_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_end_common() def on_chain_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_error_common() def on_tool_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_start_common() def on_tool_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_end_common() def on_tool_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_error_common() def on_agent_action( self, *args: Any, **kwargs: Any, ) -> Any: self.on_agent_action_common() def on_agent_finish( self, *args: Any, **kwargs: Any, ) -> Any: self.on_agent_finish_common() def on_text( self, *args: Any, **kwargs: Any, ) -> Any: self.on_text_common() def on_retriever_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_start_common() def on_retriever_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_end_common() def on_retriever_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_error_common() def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": return self class FakeCallbackHandlerWithChatStart(FakeCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common() class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin): """Fake async callback handler for testing.""" @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ async def on_retry( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retry_common() async def on_llm_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_start_common() async def on_llm_new_token( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_new_token_common() async def on_llm_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_end_common() async def on_llm_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_error_common() async def on_chain_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_start_common() async def on_chain_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_end_common() async def on_chain_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_error_common() async def on_tool_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_start_common() async def on_tool_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_end_common() async def on_tool_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_error_common() async def on_agent_action( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_action_common() async def on_agent_finish( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_finish_common() async def on_text( self, *args: Any, **kwargs: Any, ) -> None: self.on_text_common() def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": return self
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~enum.py
from enum import Enum from typing import Any, Dict, List, Type from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BaseOutputParser, OutputParserException class EnumOutputParser(BaseOutputParser): """Parse an output that is one of a set of values.""" enum: Type[Enum] """The enum to parse. Its values must be strings.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: enum = values["enum"] if not all(isinstance(e.value, str) for e in enum): raise ValueError("Enum values must be strings") return values @property def _valid_values(self) -> List[str]: return [e.value for e in self.enum] def parse(self, response: str) -> Any: try: return self.enum(response.strip()) except ValueError: raise OutputParserException( f"Response '{response}' is not one of the " f"expected values: {self._valid_values}" ) def get_format_instructions(self) -> str: return f"Select one of the following options: {', '.join(self._valid_values)}"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~structured.py
from __future__ import annotations from typing import Any, List from langchain_core.pydantic_v1 import BaseModel from langchain_core.schema import BaseOutputParser from langchain.output_parsers.format_instructions import ( STRUCTURED_FORMAT_INSTRUCTIONS, STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS, ) from langchain.output_parsers.json import parse_and_check_json_markdown line_template = '\t"{name}": {type} // {description}' class ResponseSchema(BaseModel): """A schema for a response from a structured output parser.""" name: str """The name of the schema.""" description: str """The description of the schema.""" type: str = "string" """The type of the response.""" def _get_sub_string(schema: ResponseSchema) -> str: return line_template.format( name=schema.name, description=schema.description, type=schema.type ) class StructuredOutputParser(BaseOutputParser): """Parse the output of an LLM call to a structured output.""" response_schemas: List[ResponseSchema] """The schemas for the response.""" @classmethod def from_response_schemas( cls, response_schemas: List[ResponseSchema] ) -> StructuredOutputParser: return cls(response_schemas=response_schemas) def get_format_instructions(self, only_json: bool = False) -> str: """Get format instructions for the output parser. example: ```python from langchain.output_parsers.structured import ( StructuredOutputParser, ResponseSchema ) response_schemas = [ ResponseSchema( name="foo", description="a list of strings", type="List[string]" ), ResponseSchema( name="bar", description="a string", type="string" ), ] parser = StructuredOutputParser.from_response_schemas(response_schemas) print(parser.get_format_instructions()) output: # The output should be a Markdown code snippet formatted in the following # schema, including the leading and trailing "```json" and "```": # # ```json # { # "foo": List[string] // a list of strings # "bar": string // a string # } # ``` Args: only_json (bool): If True, only the json in the Markdown code snippet will be returned, without the introducing text. Defaults to False. """ schema_str = "\n".join( [_get_sub_string(schema) for schema in self.response_schemas] ) if only_json: return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str) else: return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str) def parse(self, text: str) -> Any: expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys) @property def _type(self) -> str: return "structured"
[ "\t\"{name}\": {type} // {description}" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~evaluation~agents~trajectory_eval_prompt.py
"""Prompt for trajectory evaluation chain.""" # flake8: noqa from langchain_core.schema.messages import HumanMessage, AIMessage, SystemMessage from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) EVAL_TEMPLATE = """An AI language model has been given access to the following set of tools to help answer a user's question. The tools given to the AI model are: [TOOL_DESCRIPTIONS] {tool_descriptions} [END_TOOL_DESCRIPTIONS] The question the human asked the AI model was: [QUESTION] {question} [END_QUESTION]{reference} The AI language model decided to use the following set of tools to answer the question: [AGENT_TRAJECTORY] {agent_trajectory} [END_AGENT_TRAJECTORY] The AI language model's final answer to the question was: [RESPONSE] {answer} [END_RESPONSE] Let's to do a detailed evaluation of the AI language model's answer step by step. We consider the following criteria before giving a score from 1 to 5: i. Is the final answer helpful? ii. Does the AI language use a logical sequence of tools to answer the question? iii. Does the AI language model use the tools in a helpful way? iv. Does the AI language model use too many steps to answer the question? v. Are the appropriate tools used to answer the question?""" EXAMPLE_INPUT = """An AI language model has been given access to the following set of tools to help answer a user's question. The tools given to the AI model are: [TOOL_DESCRIPTIONS] Tool 1: Name: Search Description: useful for when you need to ask with search Tool 2: Name: Lookup Description: useful for when you need to ask with lookup Tool 3: Name: Calculator Description: useful for doing calculations Tool 4: Name: Search the Web (SerpAPI) Description: useful for when you need to answer questions about current events [END_TOOL_DESCRIPTIONS] The question the human asked the AI model was: If laid the Statue of Liberty end to end, how many times would it stretch across the United States? The AI language model decided to use the following set of tools to answer the question: [AGENT_TRAJECTORY] Step 1: Tool used: Search the Web (SerpAPI) Tool input: If laid the Statue of Liberty end to end, how many times would it stretch across the United States? Tool output: The Statue of Liberty was given to the United States by France, as a symbol of the two countries' friendship. It was erected atop an American-designed ... [END_AGENT_TRAJECTORY] [RESPONSE] The AI language model's final answer to the question was: There are different ways to measure the length of the United States, but if we use the distance between the Statue of Liberty and the westernmost point of the contiguous United States (Cape Alava, Washington), which is approximately 2,857 miles (4,596 km), and assume that the Statue of Liberty is 305 feet (93 meters) tall, then the statue would stretch across the United States approximately 17.5 times if laid end to end. [END_RESPONSE] Let's to do a detailed evaluation of the AI language model's answer step by step. We consider the following criteria before giving a score from 1 to 5: i. Is the final answer helpful? ii. Does the AI language use a logical sequence of tools to answer the question? iii. Does the AI language model use the tools in a helpful way? iv. Does the AI language model use too many steps to answer the question? v. Are the appropriate tools used to answer the question?""" EXAMPLE_OUTPUT = """First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.\ The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?\ The way model uses the search is not helpful. The model should have used the search tool to figure the width of the US or the height of the statue.\ The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.\ The tools were not used in a helpful way. The model did not use too many steps to answer the question.\ The model did not use the appropriate tools to answer the question.\ Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 2""" EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages( messages=[ SystemMessage( content="You are a helpful assistant that evaluates language models." ), HumanMessage(content=EXAMPLE_INPUT), AIMessage(content=EXAMPLE_OUTPUT), HumanMessagePromptTemplate.from_template(EVAL_TEMPLATE), ] ) TOOL_FREE_EVAL_TEMPLATE = """An AI language model has been given access to a set of tools to help answer a user's question. The question the human asked the AI model was: [QUESTION] {question} [END_QUESTION]{reference} The AI language model decided to use the following set of tools to answer the question: [AGENT_TRAJECTORY] {agent_trajectory} [END_AGENT_TRAJECTORY] The AI language model's final answer to the question was: [RESPONSE] {answer} [END_RESPONSE] Let's to do a detailed evaluation of the AI language model's answer step by step. We consider the following criteria before giving a score from 1 to 5: i. Is the final answer helpful? ii. Does the AI language use a logical sequence of tools to answer the question? iii. Does the AI language model use the tools in a helpful way? iv. Does the AI language model use too many steps to answer the question? v. Are the appropriate tools used to answer the question?""" TOOL_FREE_EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages( messages=[ SystemMessage( content="You are a helpful assistant that evaluates language models." ), HumanMessage(content=EXAMPLE_INPUT), AIMessage(content=EXAMPLE_OUTPUT), HumanMessagePromptTemplate.from_template(TOOL_FREE_EVAL_TEMPLATE), ] )
[ "s evaluate the final answer. The final uses good reasonAn AI language model has been given access to a set of tools to help answer a user", "An AI language model has been given access to a set of tools to help answer a user's question.\n\nThe question the human asked the AI model was:\n[QUESTION]\n{question}\n[END_QUESTION]{reference}\n\nThe AI language model decided to use the following set of tools to answer the question:\n[AGENT_TRAJECTORY]\n{agent_trajectory}\n[END_AGENT_TRAJECTORY]\n\nThe AI language model's final answer to the question was:\n[RESPONSE]\n{answer}\n[END_RESPONSE]\n\nLet's to do a detailed evaluation of the AI language model's answer step by step.\n\nWe consider the following criteria before giving a score from 1 to 5:\n\ni. Is the final answer helpful?\nii. Does the AI language use a logical sequence of tools to answer the question?\niii. Does the AI language model use the tools in a helpful way?\niv. Does the AI language model use too many steps to answer the question?\nv. Are the appropriate tools used to answer the question?", "First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?The way model uses the search is not helpful. The model should have used the search tool to figure the width of the US or the height of the statue.The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.The tools were not used in a helpful way. The model did not use too many steps to answer the question.The model did not use the appropriate tools to answer the question. \nJudgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.\n\nScore: 2", "An AI language model has been given access to the following set of tools to help answer a user's question.\n\nThe tools given to the AI model are:\n[TOOL_DESCRIPTIONS]\nTool 1:\nName: Search\nDescription: useful for when you need to ask with search\n\nTool 2:\nName: Lookup\nDescription: useful for when you need to ask with lookup\n\nTool 3:\nName: Calculator\nDescription: useful for doing calculations\n\nTool 4:\nName: Search the Web (SerpAPI)\nDescription: useful for when you need to answer questions about current events\n[END_TOOL_DESCRIPTIONS]\n\nThe question the human asked the AI model was: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?\n\nThe AI language model decided to use the following set of tools to answer the question:\n[AGENT_TRAJECTORY]\nStep 1:\nTool used: Search the Web (SerpAPI)\nTool input: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?\nTool output: The Statue of Liberty was given to the United States by France, as a symbol of the two countries' friendship. It was erected atop an American-designed ...\n[END_AGENT_TRAJECTORY]\n\n[RESPONSE]\nThe AI language model's final answer to the question was: There are different ways to measure the length of the United States, but if we use the distance between the Statue of Liberty and the westernmost point of the contiguous United States (Cape Alava, Washington), which is approximately 2,857 miles (4,596 km), and assume that the Statue of Liberty is 305 feet (93 meters) tall, then the statue would stretch across the United States approximately 17.5 times if laid end to end.\n[END_RESPONSE]\n\nLet's to do a detailed evaluation of the AI language model's answer step by step.\n\nWe consider the following criteria before giving a score from 1 to 5:\n\ni. Is the final answer helpful?\nii. Does the AI language use a logical sequence of tools to answer the question?\niii. Does the AI language model use the tools in a helpful way?\niv. Does the AI language model use too many steps to answer the question?\nv. Are the appropriate tools used to answer the question?", "s to do a detailed evaluation of the AI language model", "You are a helpful assistant that evaluates language models.", "An AI language model has been given access to the following set of tools to help answer a user's question.\n\nThe tools given to the AI model are:\n[TOOL_DESCRIPTIONS]\n{tool_descriptions}\n[END_TOOL_DESCRIPTIONS]\n\nThe question the human asked the AI model was:\n[QUESTION]\n{question}\n[END_QUESTION]{reference}\n\nThe AI language model decided to use the following set of tools to answer the question:\n[AGENT_TRAJECTORY]\n{agent_trajectory}\n[END_AGENT_TRAJECTORY]\n\nThe AI language model's final answer to the question was:\n[RESPONSE]\n{answer}\n[END_RESPONSE]\n\nLet's to do a detailed evaluation of the AI language model's answer step by step.\n\nWe consider the following criteria before giving a score from 1 to 5:\n\ni. Is the final answer helpful?\nii. Does the AI language use a logical sequence of tools to answer the question?\niii. Does the AI language model use the tools in a helpful way?\niv. Does the AI language model use too many steps to answer the question?\nv. Are the appropriate tools used to answer the question?", "s evaluate the final answer. The final uses good reasonAn AI language model has been given access to the following set of tools to help answer a user" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~voyageai.py
from __future__ import annotations import json import logging from typing import ( Any, Callable, Dict, List, Optional, Tuple, Union, cast, ) import requests from langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator from langchain_core.schema.embeddings import Embeddings from langchain_core.utils import convert_to_secret_str from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: VoyageEmbeddings) -> Callable[[Any], Any]: min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _check_response(response: dict) -> dict: if "data" not in response: raise RuntimeError(f"Voyage API Error. Message: {json.dumps(response)}") return response def embed_with_retry(embeddings: VoyageEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = requests.post(**kwargs) return _check_response(response.json()) return _embed_with_retry(**kwargs) class VoyageEmbeddings(BaseModel, Embeddings): """Voyage embedding models. To use, you should have the environment variable ``VOYAGE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import VoyageEmbeddings voyage = VoyageEmbeddings(voyage_api_key="your-api-key") text = "This is a test query." query_result = voyage.embed_query(text) """ model: str = "voyage-01" voyage_api_base: str = "https://api.voyageai.com/v1/embeddings" voyage_api_key: Optional[SecretStr] = None batch_size: int = 8 """Maximum number of texts to embed in each API request.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the API request.""" show_progress_bar: bool = False """Whether to show a progress bar when embedding. Must have tqdm installed if set to True.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["voyage_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "voyage_api_key", "VOYAGE_API_KEY") ) return values def _invocation_params( self, input: List[str], input_type: Optional[str] = None ) -> Dict: api_key = cast(SecretStr, self.voyage_api_key).get_secret_value() params = { "url": self.voyage_api_base, "headers": {"Authorization": f"Bearer {api_key}"}, "json": {"model": self.model, "input": input, "input_type": input_type}, "timeout": self.request_timeout, } return params def _get_embeddings( self, texts: List[str], batch_size: int, input_type: Optional[str] = None ) -> List[List[float]]: embeddings: List[List[float]] = [] if self.show_progress_bar: try: from tqdm.auto import tqdm except ImportError as e: raise ImportError( "Must have tqdm installed if `show_progress_bar` is set to True. " "Please install with `pip install tqdm`." ) from e _iter = tqdm(range(0, len(texts), batch_size)) else: _iter = range(0, len(texts), batch_size) if input_type and input_type not in ["query", "document"]: raise ValueError( f"input_type {input_type} is invalid. Options: None, 'query', " "'document'." ) for i in _iter: response = embed_with_retry( self, **self._invocation_params( input=texts[i : i + batch_size], input_type=input_type ), ) embeddings.extend(r["embedding"] for r in response["data"]) return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Voyage Embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self._get_embeddings( texts, batch_size=self.batch_size, input_type="document" ) def embed_query(self, text: str) -> List[float]: """Call out to Voyage Embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ return self._get_embeddings( [text], batch_size=self.batch_size, input_type="query" )[0]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~messages.py
from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, BaseMessage, BaseMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, get_buffer_string, merge_content, messages_from_dict, messages_to_dict, ) __all__ = [ "get_buffer_string", "BaseMessage", "merge_content", "BaseMessageChunk", "HumanMessage", "HumanMessageChunk", "AIMessage", "AIMessageChunk", "SystemMessage", "SystemMessageChunk", "FunctionMessage", "FunctionMessageChunk", "ToolMessage", "ToolMessageChunk", "ChatMessage", "ChatMessageChunk", "messages_to_dict", "messages_from_dict", ]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~memory~chat_message_histories~test_streamlit.py
"""Unit tests for StreamlitChatMessageHistory functionality.""" import pytest test_script = """ import json import streamlit as st from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain_core.schema.messages import _message_to_dict message_history = StreamlitChatMessageHistory() memory = ConversationBufferMemory(chat_memory=message_history, return_messages=True) # Add some messages if st.checkbox("add initial messages", value=True): memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") else: st.markdown("Skipped add") # Clear messages if checked if st.checkbox("clear messages"): st.markdown("Cleared!") memory.chat_memory.clear() # Write the output to st.code as a json blob for inspection messages = memory.chat_memory.messages messages_json = json.dumps([_message_to_dict(msg) for msg in messages]) st.text(messages_json) """ @pytest.mark.requires("streamlit") def test_memory_with_message_store() -> None: try: from streamlit.testing.script_interactions import InteractiveScriptTests except ModuleNotFoundError: pytest.skip("Incorrect version of Streamlit installed") test_handler = InteractiveScriptTests() test_handler.setUp() try: sr = test_handler.script_from_string(test_script).run() except TypeError: # Earlier version expected 2 arguments sr = test_handler.script_from_string("memory_test.py", test_script).run() # Initial run should write two messages messages_json = sr.get("text")[-1].value assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # Uncheck the initial write, they should persist in session_state sr = sr.get("checkbox")[0].uncheck().run() assert sr.get("markdown")[0].value == "Skipped add" messages_json = sr.get("text")[-1].value assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # Clear the message history sr = sr.get("checkbox")[1].check().run() assert sr.get("markdown")[1].value == "Cleared!" messages_json = sr.get("text")[-1].value assert messages_json == "[]"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~weaviate.py
from __future__ import annotations import datetime import os from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, ) from uuid import uuid4 import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import weaviate def _default_schema(index_name: str) -> Dict: return { "class": index_name, "properties": [ { "name": "text", "dataType": ["text"], } ], } def _create_weaviate_client( url: Optional[str] = None, api_key: Optional[str] = None, **kwargs: Any, ) -> weaviate.Client: try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) url = url or os.environ.get("WEAVIATE_URL") api_key = api_key or os.environ.get("WEAVIATE_API_KEY") auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None return weaviate.Client(url=url, auth_client_secret=auth, **kwargs) def _default_score_normalizer(val: float) -> float: return 1 - 1 / (1 + np.exp(val)) def _json_serializable(value: Any) -> Any: if isinstance(value, datetime.datetime): return value.isoformat() return value class Weaviate(VectorStore): """`Weaviate` vector store. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, by_text: bool = True, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] self.relevance_score_fn = relevance_score_fn self._by_text = by_text if attributes is not None: self._query_attrs.extend(attributes) @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def _select_relevance_score_fn(self) -> Callable[[float], float]: return ( self.relevance_score_fn if self.relevance_score_fn else _default_score_normalizer ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid ids = [] embeddings: Optional[List[List[float]]] = None if self._embedding: if not isinstance(texts, list): texts = list(texts) embeddings = self._embedding.embed_documents(texts) with self._client.batch as batch: for i, text in enumerate(texts): data_properties = {self._text_key: text} if metadatas is not None: for key, val in metadatas[i].items(): data_properties[key] = _json_serializable(val) # Allow for ids (consistent w/ other methods) # # Or uuids (backwards compatible w/ existing arg) # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. _id = get_valid_uuid(uuid4()) if "uuids" in kwargs: _id = kwargs["uuids"][i] elif "ids" in kwargs: _id = kwargs["ids"][i] batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id, vector=embeddings[i] if embeddings else None, ) ids.append(_id) return ids def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(query, k, **kwargs) else: if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search when " "_by_text=False" ) embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(embedding, k, **kwargs) def similarity_search_by_text( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" ) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) results = ( query_obj.with_additional("vector") .with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search_with_score" ) content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) embedded_query = self._embedding.embed_query(query) if not self._by_text: vector = {"vector": embedded_query} result = ( query_obj.with_near_vector(vector) .with_limit(k) .with_additional("vector") .do() ) else: result = ( query_obj.with_near_text(content) .with_limit(k) .with_additional("vector") .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs_and_scores = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) score = np.dot(res["_additional"]["vector"], embedded_query) docs_and_scores.append((Document(page_content=text, metadata=res), score)) return docs_and_scores @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, *, client: Optional[weaviate.Client] = None, weaviate_url: Optional[str] = None, weaviate_api_key: Optional[str] = None, batch_size: Optional[int] = None, index_name: Optional[str] = None, text_key: str = "text", by_text: bool = False, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Args: texts: Texts to add to vector store. embedding: Text embedding model to use. metadatas: Metadata associated with each text. client: weaviate.Client to use. weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it from the ``Details`` tab. Can be passed in as a named param or by setting the environment variable ``WEAVIATE_URL``. Should not be specified if client is provided. weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud Services, get it from ``Details`` tab. Can be passed in as a named param or by setting the environment variable ``WEAVIATE_API_KEY``. Should not be specified if client is provided. batch_size: Size of batch operations. index_name: Index name. text_key: Key to use for uploading/retrieving text to/from vectorstore. by_text: Whether to search by text or by embedding. relevance_score_fn: Function for converting whatever distance function the vector store uses to a relevance score, which is a normalized similarity score (0 means dissimilar, 1 means similar). **kwargs: Additional named parameters to pass to ``Weaviate.__init__()``. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Weaviate embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ try: from weaviate.util import get_valid_uuid except ImportError as e: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) from e client = client or _create_weaviate_client( url=weaviate_url, api_key=weaviate_api_key, ) if batch_size: client.batch.configure(batch_size=batch_size) index_name = index_name or f"LangChain_{uuid4().hex}" schema = _default_schema(index_name) # check whether the index already exists if not client.schema.exists(index_name): client.schema.create_class(schema) embeddings = embedding.embed_documents(texts) if embedding else None attributes = list(metadatas[0].keys()) if metadatas else None # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. if "uuids" in kwargs: uuids = kwargs.pop("uuids") else: uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))] with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] _id = uuids[i] # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush() return cls( client, index_name, text_key, embedding=embedding, attributes=attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, **kwargs, ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # TODO: Check if this can be done in bulk for id in ids: self._client.data_object.delete(uuid=id)
[]
2024-01-10
axgpt/langchain
libs~core~tests~unit_tests~schema~test_imports.py
from langchain_core.schema import __all__ EXPECTED_ALL = [ "BaseCache", "BaseMemory", "BaseStore", "AgentFinish", "AgentAction", "Document", "BaseChatMessageHistory", "BaseDocumentTransformer", "BaseMessage", "ChatMessage", "FunctionMessage", "HumanMessage", "AIMessage", "SystemMessage", "messages_from_dict", "messages_to_dict", "_message_to_dict", "_message_from_dict", "get_buffer_string", "RunInfo", "LLMResult", "ChatResult", "ChatGeneration", "Generation", "PromptValue", "LangChainException", "BaseRetriever", "RUN_KEY", "Memory", "OutputParserException", "StrOutputParser", "BaseOutputParser", "BaseLLMOutputParser", "BasePromptTemplate", "format_document", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~llms~fake_chat_model.py
"""Fake Chat Model wrapper for testing purposes.""" from typing import Any, Dict, List, Optional from langchain_core.schema import ChatGeneration, ChatResult from langchain_core.schema.messages import AIMessage, BaseMessage from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import SimpleChatModel class FakeChatModel(SimpleChatModel): """Fake Chat Model wrapper for testing purposes.""" def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: return "fake response" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = "fake response" message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @property def _llm_type(self) -> str: return "fake-chat-model" @property def _identifying_params(self) -> Dict[str, Any]: return {"key": "fake"}
[ "fake response" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~memory~test_mongodb.py
import json import os from langchain_core.schema.messages import _message_to_dict from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import MongoDBChatMessageHistory # Replace these with your mongodb connection string connection_string = os.environ.get("MONGODB_CONNECTION_STRING", "") def test_memory_with_message_store() -> None: """Test the memory with a message store.""" # setup MongoDB as a message store message_history = MongoDBChatMessageHistory( connection_string=connection_string, session_id="test-session" ) memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) # add some messages memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") # get the message history from the memory store and turn it into a json messages = memory.chat_memory.messages messages_json = json.dumps([_message_to_dict(msg) for msg in messages]) assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # remove the record from Azure Cosmos DB, so the next test run won't pick it up memory.chat_memory.clear() assert memory.chat_memory.messages == []
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~starrocks.py
from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from langchain_core.pydantic_v1 import BaseSettings from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document logger = logging.getLogger() DEBUG = False def has_mul_sub_str(s: str, *args: Any) -> bool: """ Check if a string has multiple substrings. Args: s: The string to check *args: The substrings to check for in the string Returns: bool: True if all substrings are present in the string, False otherwise """ for a in args: if a not in s: return False return True def debug_output(s: Any) -> None: """ Print a debug message if DEBUG is True. Args: s: The message to print """ if DEBUG: print(s) def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]: """ Get a named result from a query. Args: connection: The connection to the database query: The query to execute Returns: List[dict[str, Any]]: The result of the query """ cursor = connection.cursor() cursor.execute(query) columns = cursor.description result = [] for value in cursor.fetchall(): r = {} for idx, datum in enumerate(value): k = columns[idx][0] r[k] = datum result.append(r) debug_output(result) cursor.close() return result class StarRocksSettings(BaseSettings): """StarRocks client configuration. Attribute: StarRocks_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. StarRocks_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 9030 username: str = "root" password: str = "" column_map: Dict[str, str] = { "id": "id", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default" table: str = "langchain" def __getitem__(self, item: str) -> Any: return getattr(self, item) class Config: env_file = ".env" env_prefix = "starrocks_" env_file_encoding = "utf-8" class StarRocks(VectorStore): """`StarRocks` vector store. You need a `pymysql` python package, and a valid account to connect to StarRocks. Right now StarRocks has only implemented `cosine_similarity` function to compute distance between two vectors. And there is no vector inside right now, so we have to iterate all vectors and compute spatial distance. For more information, please visit [StarRocks official site](https://www.starrocks.io/) [StarRocks github](https://github.com/StarRocks/starrocks) """ def __init__( self, embedding: Embeddings, config: Optional[StarRocksSettings] = None, **kwargs: Any, ) -> None: """StarRocks Wrapper to LangChain embedding_function (Embeddings): config (StarRocksSettings): Configuration to StarRocks Client """ try: import pymysql # type: ignore[import] except ImportError: raise ImportError( "Could not import pymysql python package. " "Please install it with `pip install pymysql`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = StarRocksSettings() assert self.config assert self.config.host and self.config.port assert self.config.column_map and self.config.database and self.config.table for k in ["id", "embedding", "document", "metadata"]: assert k in self.config.column_map # initialize the schema dim = len(embedding.embed_query("test")) self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} string, {self.config.column_map['document']} string, {self.config.column_map['embedding']} array<float>, {self.config.column_map['metadata']} string ) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_num" = "1")\ """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding self.dist_order = "DESC" debug_output(self.config) # Create a connection to StarRocks self.connection = pymysql.connect( host=self.config.host, port=self.config.port, user=self.config.username, password=self.config.password, database=self.config.database, **kwargs, ) debug_output(self.schema) get_named_result(self.connection, self.schema) def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) @property def embeddings(self) -> Embeddings: return self.embedding_function def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) embed_tuple_index = tuple(column_names).index( self.config.column_map["embedding"] ) _data = [] for n in transac: n = ",".join( [ f"'{self.escape_str(str(_n))}'" if idx != embed_tuple_index else f"array<float>{str(_n)}" for (idx, _n) in enumerate(n) ] ) _data.append(f"({n})") i_str = f""" INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _insert_query = self._build_insert_sql(transac, column_names) debug_output(_insert_query) get_named_result(self.connection, _insert_query) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[StarRocksSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> StarRocks: """Create StarRocks wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (StarRocksSettings, Optional): StarRocks configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to StarRocks. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Returns: StarRocks Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for StarRocks Vector Store, prints backends, username and schemas. Easy to use with `str(StarRocks())` Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" width = 25 fields = 3 _repr += "-" * (width * fields + 1) + "\n" columns = ["name", "type", "key"] _repr += f"|\033[94m{columns[0]:24s}\033[0m|\033[96m{columns[1]:24s}" _repr += f"\033[0m|\033[96m{columns[2]:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" q_str = f"DESC {self.config.database}.{self.config.table}" debug_output(q_str) rs = get_named_result(self.connection, q_str) for r in rs: _repr += f"|\033[94m{r['Field']:24s}\033[0m|\033[96m{r['Type']:24s}" _repr += f"\033[0m|\033[96m{r['Key']:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"WHERE {where_str}" else: where_str = "" q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_similarity_norm(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ debug_output(q_str) return q_str def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function.embed_query(query), k, where_str, **kwargs ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with StarRocks by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql( self.embedding_function.embed_query(query), k, where_str ) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ), r["dist"], ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def drop(self) -> None: """ Helper function: Drop data """ get_named_result( self.connection, f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}", ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~tracers~run_collector.py
from langchain_core.callbacks.tracers.run_collector import RunCollectorCallbackHandler __all__ = ["RunCollectorCallbackHandler"]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~load~test_dump.py
"""Test for Serializable base class""" from typing import Any, Dict import pytest from langchain_core.load.dump import dumps from langchain_core.load.serializable import Serializable from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.prompts.prompt import PromptTemplate from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.chains.llm import LLMChain from langchain.chat_models.openai import ChatOpenAI from langchain.llms.openai import OpenAI class Person(Serializable): secret: str you_can_see_me: str = "hello" @classmethod def is_lc_serializable(cls) -> bool: return True @property def lc_secrets(self) -> Dict[str, str]: return {"secret": "SECRET"} @property def lc_attributes(self) -> Dict[str, str]: return {"you_can_see_me": self.you_can_see_me} class SpecialPerson(Person): another_secret: str another_visible: str = "bye" # Gets merged with parent class's secrets @property def lc_secrets(self) -> Dict[str, str]: return {"another_secret": "ANOTHER_SECRET"} # Gets merged with parent class's attributes @property def lc_attributes(self) -> Dict[str, str]: return {"another_visible": self.another_visible} class NotSerializable: pass def test_person(snapshot: Any) -> None: p = Person(secret="hello") assert dumps(p, pretty=True) == snapshot sp = SpecialPerson(another_secret="Wooo", secret="Hmm") assert dumps(sp, pretty=True) == snapshot assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"] @pytest.mark.requires("openai") def test_serialize_openai_llm(snapshot: Any) -> None: llm = OpenAI( model="davinci", temperature=0.5, openai_api_key="hello", # This is excluded from serialization callbacks=[LangChainTracer()], ) llm.temperature = 0.7 # this is reflected in serialization assert dumps(llm, pretty=True) == snapshot @pytest.mark.requires("openai") def test_serialize_llmchain(snapshot: Any) -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot @pytest.mark.requires("openai") def test_serialize_llmchain_env() -> None: llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm_2 = OpenAI(model="davinci", temperature=0.5) prompt_2 = PromptTemplate.from_template("hello {name}!") chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_serialize_llmchain_chat(snapshot: Any) -> None: llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = ChatPromptTemplate.from_messages( [HumanMessagePromptTemplate.from_template("hello {name}!")] ) chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot import os has_env = "OPENAI_API_KEY" in os.environ if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" llm_2 = ChatOpenAI(model="davinci", temperature=0.5) prompt_2 = ChatPromptTemplate.from_messages( [HumanMessagePromptTemplate.from_template("hello {name}!")] ) chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ["OPENAI_API_KEY"] @pytest.mark.requires("openai") def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None: llm = OpenAI( model="davinci", temperature=0.5, openai_api_key="hello", client=NotSerializable, ) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot
[ "hello {name}!" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~tavily_search_api.py
import os from enum import Enum from typing import Any, Dict, List, Optional from langchain_core.schema import Document from langchain_core.schema.retriever import BaseRetriever from langchain.callbacks.manager import CallbackManagerForRetrieverRun class SearchDepth(Enum): """Search depth as enumerator.""" BASIC = "basic" ADVANCED = "advanced" class TavilySearchAPIRetriever(BaseRetriever): """Tavily Search API retriever.""" k: int = 10 include_generated_answer: bool = False include_raw_content: bool = False include_images: bool = False search_depth: SearchDepth = SearchDepth.BASIC include_domains: Optional[List[str]] = None exclude_domains: Optional[List[str]] = None kwargs: Optional[Dict[str, Any]] = {} api_key: Optional[str] = None def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: try: from tavily import Client except ImportError: raise ImportError( "Tavily python package not found. " "Please install it with `pip install tavily-python`." ) tavily = Client(api_key=self.api_key or os.environ["TAVILY_API_KEY"]) max_results = self.k if not self.include_generated_answer else self.k - 1 response = tavily.search( query=query, max_results=max_results, search_depth=self.search_depth.value, include_answer=self.include_generated_answer, include_domains=self.include_domains, exclude_domains=self.exclude_domains, include_raw_content=self.include_raw_content, include_images=self.include_images, **self.kwargs, ) docs = [ Document( page_content=result.get("content", "") if not self.include_raw_content else result.get("raw_content", ""), metadata={ "title": result.get("title", ""), "source": result.get("url", ""), **{ k: v for k, v in result.items() if k not in ("content", "title", "url", "raw_content") }, "images": response.get("images"), }, ) for result in response.get("results") ] if self.include_generated_answer: docs = [ Document( page_content=response.get("answer", ""), metadata={ "title": "Suggested Answer", "source": "https://tavily.com/", }, ), *docs, ] return docs
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~vald.py
"""Wrapper around Vald vector database.""" from __future__ import annotations from typing import Any, Iterable, List, Optional, Tuple, Type import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance class Vald(VectorStore): """Wrapper around Vald vector database. To use, you should have the ``vald-client-python`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import Vald texts = ['foo', 'bar', 'baz'] vald = Vald.from_texts( texts=texts, embedding=HuggingFaceEmbeddings(), host="localhost", port=8080, skip_strict_exist_check=False, ) """ def __init__( self, embedding: Embeddings, host: str = "localhost", port: int = 8080, grpc_options: Tuple = ( ("grpc.keepalive_time_ms", 1000 * 10), ("grpc.keepalive_timeout_ms", 1000 * 10), ), ): self._embedding = embedding self.target = host + ":" + str(port) self.grpc_options = grpc_options @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, skip_strict_exist_check: bool = False, **kwargs: Any, ) -> List[str]: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: import grpc from vald.v1.payload import payload_pb2 from vald.v1.vald import upsert_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = grpc.insecure_channel(self.target, options=self.grpc_options) # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = upsert_pb2_grpc.UpsertStub(channel) cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=skip_strict_exist_check) ids = [] embs = self._embedding.embed_documents(list(texts)) for text, emb in zip(texts, embs): vec = payload_pb2.Object.Vector(id=text, vector=emb) res = stub.Upsert(payload_pb2.Upsert.Request(vector=vec, config=cfg)) ids.append(res.uuid) channel.close() return ids def delete( self, ids: Optional[List[str]] = None, skip_strict_exist_check: bool = False, **kwargs: Any, ) -> Optional[bool]: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ try: import grpc from vald.v1.payload import payload_pb2 from vald.v1.vald import remove_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) if ids is None: raise ValueError("No ids provided to delete") channel = grpc.insecure_channel(self.target, options=self.grpc_options) # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = remove_pb2_grpc.RemoveStub(channel) cfg = payload_pb2.Remove.Config(skip_strict_exist_check=skip_strict_exist_check) for _id in ids: oid = payload_pb2.Object.ID(id=_id) _ = stub.Remove(payload_pb2.Remove.Request(id=oid, config=cfg)) channel.close() return True def similarity_search( self, query: str, k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score( query, k, radius, epsilon, timeout ) docs = [] for doc, _ in docs_and_scores: docs.append(doc) return docs def similarity_search_with_score( self, query: str, k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Tuple[Document, float]]: emb = self._embedding.embed_query(query) docs_and_scores = self.similarity_search_with_score_by_vector( emb, k, radius, epsilon, timeout ) return docs_and_scores def similarity_search_by_vector( self, embedding: List[float], k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, radius, epsilon, timeout ) docs = [] for doc, _ in docs_and_scores: docs.append(doc) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: import grpc from vald.v1.payload import payload_pb2 from vald.v1.vald import search_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = grpc.insecure_channel(self.target, options=self.grpc_options) # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = search_pb2_grpc.SearchStub(channel) cfg = payload_pb2.Search.Config( num=k, radius=radius, epsilon=epsilon, timeout=timeout ) res = stub.Search(payload_pb2.Search.Request(vector=embedding, config=cfg)) docs_and_scores = [] for result in res.results: docs_and_scores.append((Document(page_content=result.id), result.distance)) channel.close() return docs_and_scores def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Document]: emb = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( emb, k=k, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, lambda_mult=lambda_mult, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, radius: float = -1.0, epsilon: float = 0.01, timeout: int = 3000000000, **kwargs: Any, ) -> List[Document]: try: import grpc from vald.v1.payload import payload_pb2 from vald.v1.vald import object_pb2_grpc except ImportError: raise ValueError( "Could not import vald-client-python python package. " "Please install it with `pip install vald-client-python`." ) channel = grpc.insecure_channel(self.target, options=self.grpc_options) # Depending on the network quality, # it is necessary to wait for ChannelConnectivity.READY. # _ = grpc.channel_ready_future(channel).result(timeout=10) stub = object_pb2_grpc.ObjectStub(channel) docs_and_scores = self.similarity_search_with_score_by_vector( embedding, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout ) docs = [] embs = [] for doc, _ in docs_and_scores: vec = stub.GetObject( payload_pb2.Object.VectorRequest( id=payload_pb2.Object.ID(id=doc.page_content) ) ) embs.append(vec.vector) docs.append(doc) mmr = maximal_marginal_relevance( np.array(embedding), embs, lambda_mult=lambda_mult, k=k, ) channel.close() return [docs[i] for i in mmr] @classmethod def from_texts( cls: Type[Vald], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, host: str = "localhost", port: int = 8080, grpc_options: Tuple = ( ("grpc.keepalive_time_ms", 1000 * 10), ("grpc.keepalive_timeout_ms", 1000 * 10), ), skip_strict_exist_check: bool = False, **kwargs: Any, ) -> Vald: """ Args: skip_strict_exist_check: Deprecated. This is not used basically. """ vald = cls( embedding=embedding, host=host, port=port, grpc_options=grpc_options, **kwargs, ) vald.add_texts( texts=texts, metadatas=metadatas, skip_strict_exist_check=skip_strict_exist_check, ) return vald """We will support if there are any requests.""" # async def aadd_texts( # self, # texts: Iterable[str], # metadatas: Optional[List[dict]] = None, # **kwargs: Any, # ) -> List[str]: # pass # # def _select_relevance_score_fn(self) -> Callable[[float], float]: # pass # # def _similarity_search_with_relevance_scores( # self, # query: str, # k: int = 4, # **kwargs: Any, # ) -> List[Tuple[Document, float]]: # pass # # def similarity_search_with_relevance_scores( # self, # query: str, # k: int = 4, # **kwargs: Any, # ) -> List[Tuple[Document, float]]: # pass # # async def amax_marginal_relevance_search_by_vector( # self, # embedding: List[float], # k: int = 4, # fetch_k: int = 20, # lambda_mult: float = 0.5, # **kwargs: Any, # ) -> List[Document]: # pass # # @classmethod # async def afrom_texts( # cls: Type[VST], # texts: List[str], # embedding: Embeddings, # metadatas: Optional[List[dict]] = None, # **kwargs: Any, # ) -> VST: # pass
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~xinference.py
"""Wrapper around Xinference embedding models.""" from typing import Any, List, Optional from langchain_core.schema.embeddings import Embeddings class XinferenceEmbeddings(Embeddings): """Xinference embedding models. To use, you should have the xinference library installed: .. code-block:: bash pip install xinference Check out: https://github.com/xorbitsai/inference To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers. Example: To start a local instance of Xinference, run .. code-block:: bash $ xinference You can also deploy Xinference in a distributed cluster. Here are the steps: Starting the supervisor: .. code-block:: bash $ xinference-supervisor Starting the worker: .. code-block:: bash $ xinference-worker Then, launch a model using command line interface (CLI). Example: .. code-block:: bash $ xinference launch -n orca -s 3 -q q4_0 It will return a model UID. Then you can use Xinference Embedding with LangChain. Example: .. code-block:: python from langchain.embeddings import XinferenceEmbeddings xinference = XinferenceEmbeddings( server_url="http://0.0.0.0:9997", model_uid = {model_uid} # replace model_uid with the model UID return from launching the model ) """ # noqa: E501 client: Any server_url: Optional[str] """URL of the xinference server""" model_uid: Optional[str] """UID of the launched model""" def __init__( self, server_url: Optional[str] = None, model_uid: Optional[str] = None ): try: from xinference.client import RESTfulClient except ImportError as e: raise ImportError( "Could not import RESTfulClient from xinference. Please install it" " with `pip install xinference`." ) from e super().__init__() if server_url is None: raise ValueError("Please provide server URL") if model_uid is None: raise ValueError("Please provide the model UID") self.server_url = server_url self.model_uid = model_uid self.client = RESTfulClient(server_url) def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using Xinference. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ model = self.client.get_model(self.model_uid) embeddings = [ model.create_embedding(text)["data"][0]["embedding"] for text in texts ] return [list(map(float, e)) for e in embeddings] def embed_query(self, text: str) -> List[float]: """Embed a query of documents using Xinference. Args: text: The text to embed. Returns: Embeddings for the text. """ model = self.client.get_model(self.model_uid) embedding_res = model.create_embedding(text) embedding = embedding_res["data"][0]["embedding"] return list(map(float, embedding))
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~base_language.py
"""Deprecated module for BaseLanguageModel class, kept for backwards compatibility.""" from __future__ import annotations from langchain_core.schema.language_model import BaseLanguageModel __all__ = ["BaseLanguageModel"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_loaders~apify_dataset.py
from typing import Any, Callable, Dict, List from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class ApifyDatasetLoader(BaseLoader, BaseModel): """Load datasets from `Apify` web scraping, crawling, and data extraction platform. For details, see https://docs.apify.com/platform/integrations/langchain Example: .. code-block:: python from langchain.document_loaders import ApifyDatasetLoader from langchain_core.schema import Document loader = ApifyDatasetLoader( dataset_id="YOUR-DATASET-ID", dataset_mapping_function=lambda dataset_item: Document( page_content=dataset_item["text"], metadata={"source": dataset_item["url"]} ), ) documents = loader.load() """ # noqa: E501 apify_client: Any """An instance of the ApifyClient class from the apify-client Python package.""" dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment. Args: values: The values to validate. """ try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values def load(self) -> List[Document]: """Load documents.""" dataset_items = ( self.apify_client.dataset(self.dataset_id).list_items(clean=True).items ) return list(map(self.dataset_mapping_function, dataset_items))
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~regex.py
from __future__ import annotations import re from typing import Dict, List, Optional from langchain_core.schema import BaseOutputParser class RegexParser(BaseOutputParser): """Parse the output of an LLM call using a regex.""" @classmethod def is_lc_serializable(cls) -> bool: return True regex: str """The regex to use to parse the output.""" output_keys: List[str] """The keys to use for the output.""" default_output_key: Optional[str] = None """The default key to use for the output.""" @property def _type(self) -> str: """Return the type key.""" return "regex_parser" def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} else: if self.default_output_key is None: raise ValueError(f"Could not parse output: {text}") else: return { key: text if key == self.default_output_key else "" for key in self.output_keys }
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~elasticsearch.py
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain_core.schema.embeddings import Embeddings class ElasticsearchEmbeddings(Embeddings): """Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~pai_eas_endpoint.py
import asyncio import json import logging from functools import partial from typing import Any, AsyncIterator, Dict, List, Optional, cast import requests from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import ChatGeneration, ChatResult from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, ) from langchain_core.schema.output import ChatGenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class PaiEasChatEndpoint(BaseChatModel): """Eas LLM Service chat model API. To use, must have a deployed eas chat llm service on AliCloud. One can set the environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas service url and service token. Example: .. code-block:: python from langchain.chat_models import PaiEasChatEndpoint eas_chat_endpoint = PaiEasChatEndpoint( eas_service_url="your_service_url", eas_service_token="your_service_token" ) """ """PAI-EAS Service URL""" eas_service_url: str """PAI-EAS Service TOKEN""" eas_service_token: str """PAI-EAS Service Infer Params""" max_new_tokens: Optional[int] = 512 temperature: Optional[float] = 0.8 top_p: Optional[float] = 0.1 top_k: Optional[int] = 10 do_sample: Optional[bool] = False use_cache: Optional[bool] = True stop_sequences: Optional[List[str]] = None """Enable stream chat mode.""" streaming: bool = False """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None version: Optional[str] = "2.0" timeout: Optional[int] = 5000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["eas_service_url"] = get_from_dict_or_env( values, "eas_service_url", "EAS_SERVICE_URL" ) values["eas_service_token"] = get_from_dict_or_env( values, "eas_service_token", "EAS_SERVICE_TOKEN" ) return values @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { "eas_service_url": self.eas_service_url, "eas_service_token": self.eas_service_token, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "pai_eas_chat_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "stop_sequences": [], "do_sample": self.do_sample, "use_cache": self.use_cache, } def _invocation_params( self, stop_sequences: Optional[List[str]], **kwargs: Any ) -> dict: params = self._default_params if self.model_kwargs: params.update(self.model_kwargs) if self.stop_sequences is not None and stop_sequences is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop_sequences is not None: params["stop"] = self.stop_sequences else: params["stop"] = stop_sequences return {**params, **kwargs} def format_request_payload( self, messages: List[BaseMessage], **model_kwargs: Any ) -> dict: prompt: Dict[str, Any] = {} user_content: List[str] = [] assistant_content: List[str] = [] for message in messages: """Converts message to a dict according to role""" content = cast(str, message.content) if isinstance(message, HumanMessage): user_content = user_content + [content] elif isinstance(message, AIMessage): assistant_content = assistant_content + [content] elif isinstance(message, SystemMessage): prompt["system_prompt"] = content elif isinstance(message, ChatMessage) and message.role in [ "user", "assistant", "system", ]: if message.role == "system": prompt["system_prompt"] = content elif message.role == "user": user_content = user_content + [content] elif message.role == "assistant": assistant_content = assistant_content + [content] else: supported = ",".join([role for role in ["user", "assistant", "system"]]) raise ValueError( f"""Received unsupported role. Supported roles for the LLaMa Foundation Model: {supported}""" ) prompt["prompt"] = user_content[len(user_content) - 1] history = [ history_item for _, history_item in enumerate(zip(user_content[:-1], assistant_content)) ] prompt["history"] = history return {**prompt, **model_kwargs} def _format_response_payload( self, output: bytes, stop_sequences: Optional[List[str]] ) -> str: """Formats response""" try: text = json.loads(output)["response"] if stop_sequences: text = enforce_stop_tokens(text, stop_sequences) return text except Exception as e: if isinstance(e, json.decoder.JSONDecodeError): return output.decode("utf-8") raise e def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: params = self._invocation_params(stop, **kwargs) request_payload = self.format_request_payload(messages, **params) response_payload = self._call_eas(request_payload) generated_text = self._format_response_payload(response_payload, params["stop"]) if run_manager: run_manager.on_llm_new_token(generated_text) return generated_text def _call_eas(self, query_body: dict) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Accept": "application/json", "Authorization": f"{self.eas_service_token}", } # make request response = requests.post( self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout ) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) return response.text def _call_eas_stream(self, query_body: dict) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Accept": "application/json", "Authorization": f"{self.eas_service_token}", } # make request response = requests.post( self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout ) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) return response def _convert_chunk_to_message_message( self, chunk: str, ) -> AIMessageChunk: data = json.loads(chunk.encode("utf-8")) return AIMessageChunk(content=data.get("response", "")) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._invocation_params(stop, **kwargs) request_payload = self.format_request_payload(messages, **params) request_payload["use_stream_chat"] = True response = self._call_eas_stream(request_payload) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: content = self._convert_chunk_to_message_message(chunk) # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in params["stop"]: if stop_seq in content.content: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if stop_seq_found: content.content = content.content[ : content.content.index(stop_seq_found) ] # yield text, if any if text: if run_manager: await run_manager.on_llm_new_token(cast(str, content.content)) yield ChatGenerationChunk(message=content) # break if stop sequence found if stop_seq_found: break async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: if stream if stream is not None else self.streaming: generation: Optional[ChatGenerationChunk] = None async for chunk in self._astream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ): generation = chunk assert generation is not None return ChatResult(generations=[generation]) func = partial( self._generate, messages, stop=stop, run_manager=run_manager, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, func)
[ "{}" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~baidu_qianfan_endpoint.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class QianfanEmbeddingsEndpoint(BaseModel, Embeddings): """`Baidu Qianfan Embeddings` embedding models.""" qianfan_ak: Optional[str] = None """Qianfan application apikey""" qianfan_sk: Optional[str] = None """Qianfan application secretkey""" chunk_size: int = 16 """Chunk size when multiple texts are input""" model: str = "Embedding-V1" """Model name you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu for now, we support Embedding-V1 and - Embedding-V1 (默认模型) - bge-large-en - bge-large-zh preset models are mapping to an endpoint. `model` will be ignored if `endpoint` is set """ endpoint: str = "" """Endpoint of the Qianfan Embedding, required if custom model used.""" client: Any """Qianfan client""" max_retries: int = 5 """Max reties times""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """ Validate whether qianfan_ak and qianfan_sk in the environment variables or configuration file are available or not. init qianfan embedding client with `ak`, `sk`, `model`, `endpoint` Args: values: a dictionary containing configuration information, must include the fields of qianfan_ak and qianfan_sk Returns: a dictionary containing configuration information. If qianfan_ak and qianfan_sk are not provided in the environment variables or configuration file,the original values will be returned; otherwise, values containing qianfan_ak and qianfan_sk will be returned. Raises: ValueError: qianfan package not found, please install it with `pip install qianfan` """ values["qianfan_ak"] = get_from_dict_or_env( values, "qianfan_ak", "QIANFAN_AK", ) values["qianfan_sk"] = get_from_dict_or_env( values, "qianfan_sk", "QIANFAN_SK", ) try: import qianfan params = { "ak": values["qianfan_ak"], "sk": values["qianfan_sk"], "model": values["model"], } if values["endpoint"] is not None and values["endpoint"] != "": params["endpoint"] = values["endpoint"] values["client"] = qianfan.Embedding(**params) except ImportError: raise ImportError( "qianfan package not found, please install it with " "`pip install qianfan`" ) return values def embed_query(self, text: str) -> List[float]: resp = self.embed_documents([text]) return resp[0] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Embeds a list of text documents using the AutoVOT algorithm. Args: texts (List[str]): A list of text documents to embed. Returns: List[List[float]]: A list of embeddings for each document in the input list. Each embedding is represented as a list of float values. """ text_in_chunks = [ texts[i : i + self.chunk_size] for i in range(0, len(texts), self.chunk_size) ] lst = [] for chunk in text_in_chunks: resp = self.client.do(texts=chunk) lst.extend([res["embedding"] for res in resp["data"]]) return lst async def aembed_query(self, text: str) -> List[float]: embeddings = await self.aembed_documents([text]) return embeddings[0] async def aembed_documents(self, texts: List[str]) -> List[List[float]]: text_in_chunks = [ texts[i : i + self.chunk_size] for i in range(0, len(texts), self.chunk_size) ] lst = [] for chunk in text_in_chunks: resp = await self.client.ado(texts=chunk) for res in resp["data"]: lst.extend([res["embedding"]]) return lst
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_loaders~slack.py
import json import logging import re import zipfile from pathlib import Path from typing import Dict, Iterator, List, Union from langchain_core.schema import AIMessage, HumanMessage from langchain_core.schema.chat import ChatSession from langchain.chat_loaders.base import BaseChatLoader logger = logging.getLogger(__name__) class SlackChatLoader(BaseChatLoader): """Load `Slack` conversations from a dump zip file.""" def __init__( self, path: Union[str, Path], ): """ Initialize the chat loader with the path to the exported Slack dump zip file. :param path: Path to the exported Slack dump zip file. """ self.zip_path = path if isinstance(path, Path) else Path(path) if not self.zip_path.exists(): raise FileNotFoundError(f"File {self.zip_path} not found") def _load_single_chat_session(self, messages: List[Dict]) -> ChatSession: results: List[Union[AIMessage, HumanMessage]] = [] previous_sender = None for message in messages: if not isinstance(message, dict): continue text = message.get("text", "") timestamp = message.get("ts", "") sender = message.get("user", "") if not sender: continue skip_pattern = re.compile( r"<@U\d+> has joined the channel", flags=re.IGNORECASE ) if skip_pattern.match(text): continue if sender == previous_sender: results[-1].content += "\n\n" + text results[-1].additional_kwargs["events"].append( {"message_time": timestamp} ) else: results.append( HumanMessage( role=sender, content=text, additional_kwargs={ "sender": sender, "events": [{"message_time": timestamp}], }, ) ) previous_sender = sender return ChatSession(messages=results) def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) if not isinstance(data, list): raise ValueError(f"Expected list of dictionaries, got {type(data)}") return data def lazy_load(self) -> Iterator[ChatSession]: """ Lazy load the chat sessions from the Slack dump file and yield them in the required format. :return: Iterator of chat sessions containing messages. """ with zipfile.ZipFile(str(self.zip_path), "r") as zip_file: for file_path in zip_file.namelist(): if file_path.endswith(".json"): messages = self._read_json(zip_file, file_path) yield self._load_single_chat_session(messages)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~gigachat.py
import logging from typing import Any, AsyncIterator, Iterator, List, Optional from langchain_core.schema import ChatResult from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, HumanMessage, SystemMessage, ) from langchain_core.schema.output import ChatGeneration, ChatGenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import ( BaseChatModel, _agenerate_from_stream, _generate_from_stream, ) from langchain.llms.gigachat import _BaseGigaChat logger = logging.getLogger(__name__) def _convert_dict_to_message(message: Any) -> BaseMessage: from gigachat.models import MessagesRole if message.role == MessagesRole.SYSTEM: return SystemMessage(content=message.content) elif message.role == MessagesRole.USER: return HumanMessage(content=message.content) elif message.role == MessagesRole.ASSISTANT: return AIMessage(content=message.content) else: raise TypeError(f"Got unknown role {message.role} {message}") def _convert_message_to_dict(message: BaseMessage) -> Any: from gigachat.models import Messages, MessagesRole if isinstance(message, SystemMessage): return Messages(role=MessagesRole.SYSTEM, content=message.content) elif isinstance(message, HumanMessage): return Messages(role=MessagesRole.USER, content=message.content) elif isinstance(message, AIMessage): return Messages(role=MessagesRole.ASSISTANT, content=message.content) elif isinstance(message, ChatMessage): return Messages(role=MessagesRole(message.role), content=message.content) else: raise TypeError(f"Got unknown type {message}") class GigaChat(_BaseGigaChat, BaseChatModel): """`GigaChat` large language models API. To use, you should pass login and password to access GigaChat API or use token. Example: .. code-block:: python from langchain.chat_models import GigaChat giga = GigaChat(credentials=..., verify_ssl_certs=False) """ def _build_payload(self, messages: List[BaseMessage]) -> Any: from gigachat.models import Chat payload = Chat( messages=[_convert_message_to_dict(m) for m in messages], profanity_check=self.profanity, ) if self.temperature is not None: payload.temperature = self.temperature if self.max_tokens is not None: payload.max_tokens = self.max_tokens if self.verbose: logger.info("Giga request: %s", payload.dict()) return payload def _create_chat_result(self, response: Any) -> ChatResult: generations = [] for res in response.choices: message = _convert_dict_to_message(res.message) finish_reason = res.finish_reason gen = ChatGeneration( message=message, generation_info={"finish_reason": finish_reason}, ) generations.append(gen) if finish_reason != "stop": logger.warning( "Giga generation stopped with reason: %s", finish_reason, ) if self.verbose: logger.info("Giga response: %s", message.content) llm_output = {"token_usage": response.usage, "model_name": response.model} return ChatResult(generations=generations, llm_output=llm_output) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ) return _generate_from_stream(stream_iter) payload = self._build_payload(messages) response = self._client.chat(payload) return self._create_chat_result(response) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> ChatResult: should_stream = stream if stream is not None else self.streaming if should_stream: stream_iter = self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ) return await _agenerate_from_stream(stream_iter) payload = self._build_payload(messages) response = await self._client.achat(payload) return self._create_chat_result(response) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: payload = self._build_payload(messages) for chunk in self._client.stream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield ChatGenerationChunk(message=AIMessageChunk(content=content)) if run_manager: run_manager.on_llm_new_token(content) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: payload = self._build_payload(messages) async for chunk in self._client.astream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield ChatGenerationChunk(message=AIMessageChunk(content=content)) if run_manager: await run_manager.on_llm_new_token(content) def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" return round(len(text) / 4.6)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~qdrant.py
from __future__ import annotations import asyncio import functools import uuid import warnings from itertools import islice from operator import itemgetter from typing import ( TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Type, Union, ) import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from qdrant_client import grpc # noqa from qdrant_client.conversions import common_types from qdrant_client.http import models as rest DictFilter = Dict[str, Union[str, int, bool, dict, list]] MetadataFilter = Union[DictFilter, common_types.Filter] class QdrantException(Exception): """`Qdrant` related exceptions.""" def sync_call_fallback(method: Callable) -> Callable: """ Decorator to call the synchronous method of the class if the async method is not implemented. This decorator might be only used for the methods that are defined as async in the class. """ @functools.wraps(method) async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: try: return await method(self, *args, **kwargs) except NotImplementedError: # If the async method is not implemented, call the synchronous method # by removing the first letter from the method name. For example, # if the async method is called ``aaad_texts``, the synchronous method # will be called ``aad_texts``. sync_method = functools.partial( getattr(self, method.__name__[1:]), *args, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, sync_method) return wrapper class Qdrant(VectorStore): """`Qdrant` vector store. To use you should have the ``qdrant-client`` package installed. Example: .. code-block:: python from qdrant_client import QdrantClient from langchain.vectorstores import Qdrant client = QdrantClient() collection_name = "MyCollection" qdrant = Qdrant(client, collection_name, embedding_function) """ CONTENT_KEY = "page_content" METADATA_KEY = "metadata" VECTOR_NAME = None def __init__( self, client: Any, collection_name: str, embeddings: Optional[Embeddings] = None, content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, distance_strategy: str = "COSINE", vector_name: Optional[str] = VECTOR_NAME, embedding_function: Optional[Callable] = None, # deprecated ): """Initialize with necessary components.""" try: import qdrant_client except ImportError: raise ImportError( "Could not import qdrant-client python package. " "Please install it with `pip install qdrant-client`." ) if not isinstance(client, qdrant_client.QdrantClient): raise ValueError( f"client should be an instance of qdrant_client.QdrantClient, " f"got {type(client)}" ) if embeddings is None and embedding_function is None: raise ValueError( "`embeddings` value can't be None. Pass `Embeddings` instance." ) if embeddings is not None and embedding_function is not None: raise ValueError( "Both `embeddings` and `embedding_function` are passed. " "Use `embeddings` only." ) self._embeddings = embeddings self._embeddings_function = embedding_function self.client: qdrant_client.QdrantClient = client self.collection_name = collection_name self.content_payload_key = content_payload_key or self.CONTENT_KEY self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY self.vector_name = vector_name or self.VECTOR_NAME if embedding_function is not None: warnings.warn( "Using `embedding_function` is deprecated. " "Pass `Embeddings` instance to `embeddings` instead." ) if not isinstance(embeddings, Embeddings): warnings.warn( "`embeddings` should be an instance of `Embeddings`." "Using `embeddings` as `embedding_function` which is deprecated" ) self._embeddings_function = embeddings self._embeddings = None self.distance_strategy = distance_strategy.upper() @property def embeddings(self) -> Optional[Embeddings]: return self._embeddings def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, batch_size: int = 64, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. batch_size: How many vectors upload per-request. Default: 64 Returns: List of ids from adding the texts into the vectorstore. """ added_ids = [] for batch_ids, points in self._generate_rest_batches( texts, metadatas, ids, batch_size ): self.client.upsert( collection_name=self.collection_name, points=points, **kwargs ) added_ids.extend(batch_ids) return added_ids @sync_call_fallback async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, batch_size: int = 64, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. batch_size: How many vectors upload per-request. Default: 64 Returns: List of ids from adding the texts into the vectorstore. """ from qdrant_client import grpc # noqa from qdrant_client.conversions.conversion import RestToGrpc added_ids = [] async for batch_ids, points in self._agenerate_rest_batches( texts, metadatas, ids, batch_size ): await self.client.async_grpc_points.Upsert( grpc.UpsertPoints( collection_name=self.collection_name, points=[RestToGrpc.convert_point_struct(point) for point in points], ) ) added_ids.extend(batch_ids) return added_ids def similarity_search( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of Documents most similar to the query. """ results = self.similarity_search_with_score( query, k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) @sync_call_fallback async def asimilarity_search( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ results = await self.asimilarity_search_with_score(query, k, filter, **kwargs) return list(map(itemgetter(0), results)) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of documents most similar to the query text and distance for each. """ return self.similarity_search_with_score_by_vector( self._embed_query(query), k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) @sync_call_fallback async def asimilarity_search_with_score( self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.async_grpc_points.Search(). Returns: List of documents most similar to the query text and distance for each. """ return await self.asimilarity_search_with_score_by_vector( self._embed_query(query), k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of Documents most similar to the query. """ results = self.similarity_search_with_score_by_vector( embedding, k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) @sync_call_fallback async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.async_grpc_points.Search(). Returns: List of Documents most similar to the query. """ results = await self.asimilarity_search_with_score_by_vector( embedding, k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of documents most similar to the query text and distance for each. """ if filter is not None and isinstance(filter, dict): warnings.warn( "Using dict as a `filter` is deprecated. Please use qdrant-client " "filters directly: " "https://qdrant.tech/documentation/concepts/filtering/", DeprecationWarning, ) qdrant_filter = self._qdrant_filter_from_dict(filter) else: qdrant_filter = filter query_vector = embedding if self.vector_name is not None: query_vector = (self.vector_name, embedding) # type: ignore[assignment] results = self.client.search( collection_name=self.collection_name, query_vector=query_vector, query_filter=qdrant_filter, search_params=search_params, limit=k, offset=offset, with_payload=True, with_vectors=False, # Langchain does not expect vectors to be returned score_threshold=score_threshold, consistency=consistency, **kwargs, ) return [ ( self._document_from_scored_point( result, self.content_payload_key, self.metadata_payload_key ), result.score, ) for result in results ] async def _asearch_with_score_by_vector( self, embedding: List[float], *, k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, with_vectors: bool = False, **kwargs: Any, ) -> Any: """Return results most similar to embedding vector.""" from qdrant_client import grpc # noqa from qdrant_client.conversions.conversion import RestToGrpc from qdrant_client.http import models as rest if filter is not None and isinstance(filter, dict): warnings.warn( "Using dict as a `filter` is deprecated. Please use qdrant-client " "filters directly: " "https://qdrant.tech/documentation/concepts/filtering/", DeprecationWarning, ) qdrant_filter = self._qdrant_filter_from_dict(filter) else: qdrant_filter = filter if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter): qdrant_filter = RestToGrpc.convert_filter(qdrant_filter) response = await self.client.async_grpc_points.Search( grpc.SearchPoints( collection_name=self.collection_name, vector_name=self.vector_name, vector=embedding, filter=qdrant_filter, params=search_params, limit=k, offset=offset, with_payload=grpc.WithPayloadSelector(enable=True), with_vectors=grpc.WithVectorsSelector(enable=with_vectors), score_threshold=score_threshold, read_consistency=consistency, **kwargs, ) ) return response @sync_call_fallback async def asimilarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, offset: int = 0, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.async_grpc_points.Search(). Returns: List of documents most similar to the query text and distance for each. """ response = await self._asearch_with_score_by_vector( embedding, k=k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return [ ( self._document_from_scored_point_grpc( result, self.content_payload_key, self.metadata_payload_key ), result.score, ) for result in response.result ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter by metadata. Defaults to None. search_params: Additional search params score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( query_embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, search_params=search_params, score_threshold=score_threshold, consistency=consistency, **kwargs, ) @sync_call_fallback async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter by metadata. Defaults to None. search_params: Additional search params score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.async_grpc_points.Search(). Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self._embed_query(query) return await self.amax_marginal_relevance_search_by_vector( query_embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, search_params=search_params, score_threshold=score_threshold, consistency=consistency, **kwargs, ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter by metadata. Defaults to None. search_params: Additional search params score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of Documents selected by maximal marginal relevance. """ results = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, search_params=search_params, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) @sync_call_fallback async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter by metadata. Defaults to None. search_params: Additional search params score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.async_grpc_points.Search(). Returns: List of Documents selected by maximal marginal relevance and distance for each. """ results = await self.amax_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, search_params=search_params, score_threshold=score_threshold, consistency=consistency, **kwargs, ) return list(map(itemgetter(0), results)) def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter by metadata. Defaults to None. search_params: Additional search params score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of Documents selected by maximal marginal relevance and distance for each. """ query_vector = embedding if self.vector_name is not None: query_vector = (self.vector_name, query_vector) # type: ignore[assignment] results = self.client.search( collection_name=self.collection_name, query_vector=query_vector, query_filter=filter, search_params=search_params, limit=fetch_k, with_payload=True, with_vectors=True, score_threshold=score_threshold, consistency=consistency, **kwargs, ) embeddings = [ result.vector.get(self.vector_name) # type: ignore[index, union-attr] if self.vector_name is not None else result.vector for result in results ] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) return [ ( self._document_from_scored_point( results[i], self.content_payload_key, self.metadata_payload_key ), results[i].score, ) for i in mmr_selected ] @sync_call_fallback async def amax_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[MetadataFilter] = None, search_params: Optional[common_types.SearchParams] = None, score_threshold: Optional[float] = None, consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance and distance for each. """ from qdrant_client.conversions.conversion import GrpcToRest response = await self._asearch_with_score_by_vector( embedding, k=fetch_k, filter=filter, search_params=search_params, score_threshold=score_threshold, consistency=consistency, with_vectors=True, **kwargs, ) results = [ GrpcToRest.convert_vectors(result.vectors) for result in response.result ] embeddings: List[List[float]] = [ result.get(self.vector_name) # type: ignore if isinstance(result, dict) else result for result in results ] mmr_selected: List[int] = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult, ) return [ ( self._document_from_scored_point_grpc( response.result[i], self.content_payload_key, self.metadata_payload_key, ), response.result[i].score, ) for i in mmr_selected ] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ from qdrant_client.http import models as rest result = self.client.delete( collection_name=self.collection_name, points_selector=ids, ) return result.status == rest.UpdateStatus.COMPLETED @classmethod def from_texts( cls: Type[Qdrant], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = "Cosine", content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, vector_name: Optional[str] = VECTOR_NAME, batch_size: int = 64, shard_number: Optional[int] = None, replication_factor: Optional[int] = None, write_consistency_factor: Optional[int] = None, on_disk_payload: Optional[bool] = None, hnsw_config: Optional[common_types.HnswConfigDiff] = None, optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, wal_config: Optional[common_types.WalConfigDiff] = None, quantization_config: Optional[common_types.QuantizationConfig] = None, init_from: Optional[common_types.InitFrom] = None, on_disk: Optional[bool] = None, force_recreate: bool = False, **kwargs: Any, ) -> Qdrant: """Construct Qdrant wrapper from a list of texts. Args: texts: A list of texts to be indexed in Qdrant. embedding: A subclass of `Embeddings`, responsible for text vectorization. metadatas: An optional list of metadata. If provided it has to be of the same length as a list of texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. location: If `:memory:` - use in-memory Qdrant instance. If `str` - use it as a `url` parameter. If `None` - fallback to relying on `host` and `port` parameters. url: either host or str of "Optional[scheme], host, Optional[port], Optional[prefix]". Default: `None` port: Port of the REST API interface. Default: 6333 grpc_port: Port of the gRPC interface. Default: 6334 prefer_grpc: If true - use gPRC interface whenever possible in custom methods. Default: False https: If true - use HTTPS(SSL) protocol. Default: None api_key: API key for authentication in Qdrant Cloud. Default: None prefix: If not None - add prefix to the REST URL path. Example: service/v1 will result in http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. Default: None timeout: Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host: Host name of Qdrant service. If url and host are None, set to 'localhost'. Default: None path: Path in which the vectors will be stored while using local mode. Default: None collection_name: Name of the Qdrant collection to be used. If not provided, it will be created randomly. Default: None distance_func: Distance function. One of: "Cosine" / "Euclid" / "Dot". Default: "Cosine" content_payload_key: A payload key used to store the content of the document. Default: "page_content" metadata_payload_key: A payload key used to store the metadata of the document. Default: "metadata" vector_name: Name of the vector to be used internally in Qdrant. Default: None batch_size: How many vectors upload per-request. Default: 64 shard_number: Number of shards in collection. Default is 1, minimum is 1. replication_factor: Replication factor for collection. Default is 1, minimum is 1. Defines how many copies of each shard will be created. Have effect only in distributed mode. write_consistency_factor: Write consistency factor for collection. Default is 1, minimum is 1. Defines how many replicas should apply the operation for us to consider it successful. Increasing this number will make the collection more resilient to inconsistencies, but will also make it fail if not enough replicas are available. Does not have any performance impact. Have effect only in distributed mode. on_disk_payload: If true - point`s payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM. hnsw_config: Params for HNSW index optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled init_from: Use data stored in another collection to initialize this collection force_recreate: Force recreating the collection **kwargs: Additional arguments passed directly into REST client initialization This is a user-friendly interface that: 1. Creates embeddings, one for each text 2. Initializes the Qdrant database as an in-memory docstore by default (and overridable to a remote docstore) 3. Adds the text embeddings to the Qdrant database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Qdrant from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, "localhost") """ qdrant = cls.construct_instance( texts, embedding, location, url, port, grpc_port, prefer_grpc, https, api_key, prefix, timeout, host, path, collection_name, distance_func, content_payload_key, metadata_payload_key, vector_name, shard_number, replication_factor, write_consistency_factor, on_disk_payload, hnsw_config, optimizers_config, wal_config, quantization_config, init_from, on_disk, force_recreate, **kwargs, ) qdrant.add_texts(texts, metadatas, ids, batch_size) return qdrant @classmethod @sync_call_fallback async def afrom_texts( cls: Type[Qdrant], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = "Cosine", content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, vector_name: Optional[str] = VECTOR_NAME, batch_size: int = 64, shard_number: Optional[int] = None, replication_factor: Optional[int] = None, write_consistency_factor: Optional[int] = None, on_disk_payload: Optional[bool] = None, hnsw_config: Optional[common_types.HnswConfigDiff] = None, optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, wal_config: Optional[common_types.WalConfigDiff] = None, quantization_config: Optional[common_types.QuantizationConfig] = None, init_from: Optional[common_types.InitFrom] = None, on_disk: Optional[bool] = None, force_recreate: bool = False, **kwargs: Any, ) -> Qdrant: """Construct Qdrant wrapper from a list of texts. Args: texts: A list of texts to be indexed in Qdrant. embedding: A subclass of `Embeddings`, responsible for text vectorization. metadatas: An optional list of metadata. If provided it has to be of the same length as a list of texts. ids: Optional list of ids to associate with the texts. Ids have to be uuid-like strings. location: If `:memory:` - use in-memory Qdrant instance. If `str` - use it as a `url` parameter. If `None` - fallback to relying on `host` and `port` parameters. url: either host or str of "Optional[scheme], host, Optional[port], Optional[prefix]". Default: `None` port: Port of the REST API interface. Default: 6333 grpc_port: Port of the gRPC interface. Default: 6334 prefer_grpc: If true - use gPRC interface whenever possible in custom methods. Default: False https: If true - use HTTPS(SSL) protocol. Default: None api_key: API key for authentication in Qdrant Cloud. Default: None prefix: If not None - add prefix to the REST URL path. Example: service/v1 will result in http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. Default: None timeout: Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host: Host name of Qdrant service. If url and host are None, set to 'localhost'. Default: None path: Path in which the vectors will be stored while using local mode. Default: None collection_name: Name of the Qdrant collection to be used. If not provided, it will be created randomly. Default: None distance_func: Distance function. One of: "Cosine" / "Euclid" / "Dot". Default: "Cosine" content_payload_key: A payload key used to store the content of the document. Default: "page_content" metadata_payload_key: A payload key used to store the metadata of the document. Default: "metadata" vector_name: Name of the vector to be used internally in Qdrant. Default: None batch_size: How many vectors upload per-request. Default: 64 shard_number: Number of shards in collection. Default is 1, minimum is 1. replication_factor: Replication factor for collection. Default is 1, minimum is 1. Defines how many copies of each shard will be created. Have effect only in distributed mode. write_consistency_factor: Write consistency factor for collection. Default is 1, minimum is 1. Defines how many replicas should apply the operation for us to consider it successful. Increasing this number will make the collection more resilient to inconsistencies, but will also make it fail if not enough replicas are available. Does not have any performance impact. Have effect only in distributed mode. on_disk_payload: If true - point`s payload will not be stored in memory. It will be read from the disk every time it is requested. This setting saves RAM by (slightly) increasing the response time. Note: those payload values that are involved in filtering and are indexed - remain in RAM. hnsw_config: Params for HNSW index optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled init_from: Use data stored in another collection to initialize this collection force_recreate: Force recreating the collection **kwargs: Additional arguments passed directly into REST client initialization This is a user-friendly interface that: 1. Creates embeddings, one for each text 2. Initializes the Qdrant database as an in-memory docstore by default (and overridable to a remote docstore) 3. Adds the text embeddings to the Qdrant database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Qdrant from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost") """ qdrant = await cls.aconstruct_instance( texts, embedding, location, url, port, grpc_port, prefer_grpc, https, api_key, prefix, timeout, host, path, collection_name, distance_func, content_payload_key, metadata_payload_key, vector_name, shard_number, replication_factor, write_consistency_factor, on_disk_payload, hnsw_config, optimizers_config, wal_config, quantization_config, init_from, on_disk, force_recreate, **kwargs, ) await qdrant.aadd_texts(texts, metadatas, ids, batch_size) return qdrant @classmethod def construct_instance( cls: Type[Qdrant], texts: List[str], embedding: Embeddings, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = "Cosine", content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, vector_name: Optional[str] = VECTOR_NAME, shard_number: Optional[int] = None, replication_factor: Optional[int] = None, write_consistency_factor: Optional[int] = None, on_disk_payload: Optional[bool] = None, hnsw_config: Optional[common_types.HnswConfigDiff] = None, optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, wal_config: Optional[common_types.WalConfigDiff] = None, quantization_config: Optional[common_types.QuantizationConfig] = None, init_from: Optional[common_types.InitFrom] = None, on_disk: Optional[bool] = None, force_recreate: bool = False, **kwargs: Any, ) -> Qdrant: try: import qdrant_client except ImportError: raise ValueError( "Could not import qdrant-client python package. " "Please install it with `pip install qdrant-client`." ) from grpc import RpcError from qdrant_client.http import models as rest from qdrant_client.http.exceptions import UnexpectedResponse # Just do a single quick embedding to get vector size partial_embeddings = embedding.embed_documents(texts[:1]) vector_size = len(partial_embeddings[0]) collection_name = collection_name or uuid.uuid4().hex distance_func = distance_func.upper() client = qdrant_client.QdrantClient( location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs, ) try: # Skip any validation in case of forced collection recreate. if force_recreate: raise ValueError # Get the vector configuration of the existing collection and vector, if it # was specified. If the old configuration does not match the current one, # an exception is being thrown. collection_info = client.get_collection(collection_name=collection_name) current_vector_config = collection_info.config.params.vectors if isinstance(current_vector_config, dict) and vector_name is not None: if vector_name not in current_vector_config: raise QdrantException( f"Existing Qdrant collection {collection_name} does not " f"contain vector named {vector_name}. Did you mean one of the " f"existing vectors: {', '.join(current_vector_config.keys())}? " f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment] elif isinstance(current_vector_config, dict) and vector_name is None: raise QdrantException( f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " f"{', '.join(current_vector_config.keys())}." # noqa f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) elif ( not isinstance(current_vector_config, dict) and vector_name is not None ): raise QdrantException( f"Existing Qdrant collection {collection_name} doesn't use named " f"vectors. If you want to reuse it, please set `vector_name` to " f"`None`. If you want to recreate the collection, set " f"`force_recreate` parameter to `True`." ) # Check if the vector configuration has the same dimensionality. if current_vector_config.size != vector_size: # type: ignore[union-attr] raise QdrantException( f"Existing Qdrant collection is configured for vectors with " f"{current_vector_config.size} " # type: ignore[union-attr] f"dimensions. Selected embeddings are {vector_size}-dimensional. " f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) current_distance_func = ( current_vector_config.distance.name.upper() # type: ignore[union-attr] ) if current_distance_func != distance_func: raise QdrantException( f"Existing Qdrant collection is configured for " f"{current_distance_func} similarity, but requested " f"{distance_func}. Please set `distance_func` parameter to " f"`{current_distance_func}` if you want to reuse it. " f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) except (UnexpectedResponse, RpcError, ValueError): vectors_config = rest.VectorParams( size=vector_size, distance=rest.Distance[distance_func], on_disk=on_disk, ) # If vector name was provided, we're going to use the named vectors feature # with just a single vector. if vector_name is not None: vectors_config = { # type: ignore[assignment] vector_name: vectors_config, } client.recreate_collection( collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, # type: ignore[arg-type] ) qdrant = cls( client=client, collection_name=collection_name, embeddings=embedding, content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, distance_strategy=distance_func, vector_name=vector_name, ) return qdrant @classmethod async def aconstruct_instance( cls: Type[Qdrant], texts: List[str], embedding: Embeddings, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = "Cosine", content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, vector_name: Optional[str] = VECTOR_NAME, shard_number: Optional[int] = None, replication_factor: Optional[int] = None, write_consistency_factor: Optional[int] = None, on_disk_payload: Optional[bool] = None, hnsw_config: Optional[common_types.HnswConfigDiff] = None, optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, wal_config: Optional[common_types.WalConfigDiff] = None, quantization_config: Optional[common_types.QuantizationConfig] = None, init_from: Optional[common_types.InitFrom] = None, on_disk: Optional[bool] = None, force_recreate: bool = False, **kwargs: Any, ) -> Qdrant: try: import qdrant_client except ImportError: raise ValueError( "Could not import qdrant-client python package. " "Please install it with `pip install qdrant-client`." ) from grpc import RpcError from qdrant_client.http import models as rest from qdrant_client.http.exceptions import UnexpectedResponse # Just do a single quick embedding to get vector size partial_embeddings = await embedding.aembed_documents(texts[:1]) vector_size = len(partial_embeddings[0]) collection_name = collection_name or uuid.uuid4().hex distance_func = distance_func.upper() client = qdrant_client.QdrantClient( location=location, url=url, port=port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs, ) try: # Skip any validation in case of forced collection recreate. if force_recreate: raise ValueError # Get the vector configuration of the existing collection and vector, if it # was specified. If the old configuration does not match the current one, # an exception is being thrown. collection_info = client.get_collection(collection_name=collection_name) current_vector_config = collection_info.config.params.vectors if isinstance(current_vector_config, dict) and vector_name is not None: if vector_name not in current_vector_config: raise QdrantException( f"Existing Qdrant collection {collection_name} does not " f"contain vector named {vector_name}. Did you mean one of the " f"existing vectors: {', '.join(current_vector_config.keys())}? " f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment] elif isinstance(current_vector_config, dict) and vector_name is None: raise QdrantException( f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " f"{', '.join(current_vector_config.keys())}." # noqa f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) elif ( not isinstance(current_vector_config, dict) and vector_name is not None ): raise QdrantException( f"Existing Qdrant collection {collection_name} doesn't use named " f"vectors. If you want to reuse it, please set `vector_name` to " f"`None`. If you want to recreate the collection, set " f"`force_recreate` parameter to `True`." ) # Check if the vector configuration has the same dimensionality. if current_vector_config.size != vector_size: # type: ignore[union-attr] raise QdrantException( f"Existing Qdrant collection is configured for vectors with " f"{current_vector_config.size} " # type: ignore[union-attr] f"dimensions. Selected embeddings are {vector_size}-dimensional. " f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) current_distance_func = ( current_vector_config.distance.name.upper() # type: ignore[union-attr] ) if current_distance_func != distance_func: raise QdrantException( f"Existing Qdrant collection is configured for " f"{current_vector_config.distance} " # type: ignore[union-attr] f"similarity. Please set `distance_func` parameter to " f"`{distance_func}` if you want to reuse it. If you want to " f"recreate the collection, set `force_recreate` parameter to " f"`True`." ) except (UnexpectedResponse, RpcError, ValueError): vectors_config = rest.VectorParams( size=vector_size, distance=rest.Distance[distance_func], on_disk=on_disk, ) # If vector name was provided, we're going to use the named vectors feature # with just a single vector. if vector_name is not None: vectors_config = { # type: ignore[assignment] vector_name: vectors_config, } client.recreate_collection( collection_name=collection_name, vectors_config=vectors_config, shard_number=shard_number, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, on_disk_payload=on_disk_payload, hnsw_config=hnsw_config, optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, init_from=init_from, timeout=timeout, # type: ignore[arg-type] ) qdrant = cls( client=client, collection_name=collection_name, embeddings=embedding, content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, distance_strategy=distance_func, vector_name=vector_name, ) return qdrant @staticmethod def _cosine_relevance_score_fn(distance: float) -> float: """Normalize the distance to a score on a scale [0, 1].""" return (distance + 1.0) / 2.0 def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.distance_strategy == "COSINE": return self._cosine_relevance_score_fn elif self.distance_strategy == "DOT": return self._max_inner_product_relevance_score_fn elif self.distance_strategy == "EUCLID": return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, " "max_inner_product, or euclidean" ) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ return self.similarity_search_with_score(query, k, **kwargs) @classmethod def _build_payloads( cls, texts: Iterable[str], metadatas: Optional[List[dict]], content_payload_key: str, metadata_payload_key: str, ) -> List[dict]: payloads = [] for i, text in enumerate(texts): if text is None: raise ValueError( "At least one of the texts is None. Please remove it before " "calling .from_texts or .add_texts on Qdrant instance." ) metadata = metadatas[i] if metadatas is not None else None payloads.append( { content_payload_key: text, metadata_payload_key: metadata, } ) return payloads @classmethod def _document_from_scored_point( cls, scored_point: Any, content_payload_key: str, metadata_payload_key: str, ) -> Document: return Document( page_content=scored_point.payload.get(content_payload_key), metadata=scored_point.payload.get(metadata_payload_key) or {}, ) @classmethod def _document_from_scored_point_grpc( cls, scored_point: Any, content_payload_key: str, metadata_payload_key: str, ) -> Document: from qdrant_client.conversions.conversion import grpc_to_payload payload = grpc_to_payload(scored_point.payload) return Document( page_content=payload[content_payload_key], metadata=payload.get(metadata_payload_key) or {}, ) def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]: from qdrant_client.http import models as rest out = [] if isinstance(value, dict): for _key, value in value.items(): out.extend(self._build_condition(f"{key}.{_key}", value)) elif isinstance(value, list): for _value in value: if isinstance(_value, dict): out.extend(self._build_condition(f"{key}[]", _value)) else: out.extend(self._build_condition(f"{key}", _value)) else: out.append( rest.FieldCondition( key=f"{self.metadata_payload_key}.{key}", match=rest.MatchValue(value=value), ) ) return out def _qdrant_filter_from_dict( self, filter: Optional[DictFilter] ) -> Optional[rest.Filter]: from qdrant_client.http import models as rest if not filter: return None return rest.Filter( must=[ condition for key, value in filter.items() for condition in self._build_condition(key, value) ] ) def _embed_query(self, query: str) -> List[float]: """Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings is not None: embedding = self.embeddings.embed_query(query) else: if self._embeddings_function is not None: embedding = self._embeddings_function(query) else: raise ValueError("Neither of embeddings or embedding_function is set") return embedding.tolist() if hasattr(embedding, "tolist") else embedding def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]: """Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding. """ if self.embeddings is not None: embeddings = self.embeddings.embed_documents(list(texts)) if hasattr(embeddings, "tolist"): embeddings = embeddings.tolist() elif self._embeddings_function is not None: embeddings = [] for text in texts: embedding = self._embeddings_function(text) if hasattr(embeddings, "tolist"): embedding = embedding.tolist() embeddings.append(embedding) else: raise ValueError("Neither of embeddings or embedding_function is set") return embeddings async def _aembed_texts(self, texts: Iterable[str]) -> List[List[float]]: """Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding. """ if self.embeddings is not None: embeddings = await self.embeddings.aembed_documents(list(texts)) if hasattr(embeddings, "tolist"): embeddings = embeddings.tolist() elif self._embeddings_function is not None: embeddings = [] for text in texts: embedding = self._embeddings_function(text) if hasattr(embeddings, "tolist"): embedding = embedding.tolist() embeddings.append(embedding) else: raise ValueError("Neither of embeddings or embedding_function is set") return embeddings def _generate_rest_batches( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, batch_size: int = 64, ) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]: from qdrant_client.http import models as rest texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while batch_texts := list(islice(texts_iterator, batch_size)): # Take the corresponding metadata and id for each text in a batch batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) # Generate the embeddings for all the texts in a batch batch_embeddings = self._embed_texts(batch_texts) points = [ rest.PointStruct( id=point_id, vector=vector if self.vector_name is None else {self.vector_name: vector}, payload=payload, ) for point_id, vector, payload in zip( batch_ids, batch_embeddings, self._build_payloads( batch_texts, batch_metadatas, self.content_payload_key, self.metadata_payload_key, ), ) ] yield batch_ids, points async def _agenerate_rest_batches( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[Sequence[str]] = None, batch_size: int = 64, ) -> AsyncGenerator[Tuple[List[str], List[rest.PointStruct]], None]: from qdrant_client.http import models as rest texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while batch_texts := list(islice(texts_iterator, batch_size)): # Take the corresponding metadata and id for each text in a batch batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) # Generate the embeddings for all the texts in a batch batch_embeddings = await self._aembed_texts(batch_texts) points = [ rest.PointStruct( id=point_id, vector=vector if self.vector_name is None else {self.vector_name: vector}, payload=payload, ) for point_id, vector, payload in zip( batch_ids, batch_embeddings, self._build_payloads( batch_texts, batch_metadatas, self.content_payload_key, self.metadata_payload_key, ), ) ] yield batch_ids, points
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~storage.py
from langchain_core.schema.storage import BaseStore __all__ = ["BaseStore"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~openai_functions~citation_fuzzy_match.py
from typing import Iterator, List from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.schema.language_model import BaseLanguageModel from langchain_core.schema.messages import HumanMessage, SystemMessage from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import get_llm_kwargs from langchain.output_parsers.openai_functions import ( PydanticOutputFunctionsParser, ) class FactWithEvidence(BaseModel): """Class representing a single statement. Each fact has a body and a list of sources. If there are multiple facts make sure to break them apart such that each one only uses a set of sources that are relevant to it. """ fact: str = Field(..., description="Body of the sentence, as part of a response") substring_quote: List[str] = Field( ..., description=( "Each source should be a direct quote from the context, " "as a substring of the original content" ), ) def _get_span(self, quote: str, context: str, errs: int = 100) -> Iterator[str]: import regex minor = quote major = context errs_ = 0 s = regex.search(f"({minor}){{e<={errs_}}}", major) while s is None and errs_ <= errs: errs_ += 1 s = regex.search(f"({minor}){{e<={errs_}}}", major) if s is not None: yield from s.spans() def get_spans(self, context: str) -> Iterator[str]: for quote in self.substring_quote: yield from self._get_span(quote, context) class QuestionAnswer(BaseModel): """A question and its answer as a list of facts each one should have a source. each sentence contains a body and a list of sources.""" question: str = Field(..., description="Question that was asked") answer: List[FactWithEvidence] = Field( ..., description=( "Body of the answer, each fact should be " "its separate object with a body and a list of sources" ), ) def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain: """Create a citation fuzzy match chain. Args: llm: Language model to use for the chain. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer) schema = QuestionAnswer.schema() function = { "name": schema["title"], "description": schema["description"], "parameters": schema, } llm_kwargs = get_llm_kwargs(function) messages = [ SystemMessage( content=( "You are a world class algorithm to answer " "questions with correct and exact citations." ) ), HumanMessage(content="Answer question using the following context"), HumanMessagePromptTemplate.from_template("{context}"), HumanMessagePromptTemplate.from_template("Question: {question}"), HumanMessage( content=( "Tips: Make sure to cite your sources, " "and use the exact words from the context." ) ), ] prompt = ChatPromptTemplate(messages=messages) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain
[ "{context}", "Question: {question}", "You are a world class algorithm to answer questions with correct and exact citations.", "Answer question using the following context", "Tips: Make sure to cite your sources, and use the exact words from the context." ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~document_loaders~test_quip.py
from typing import Dict from unittest.mock import MagicMock, patch import pytest from langchain_core.schema import Document from langchain.document_loaders.quip import QuipLoader try: from quip_api.quip import QuipClient # noqa: F401 quip_installed = True except ImportError: quip_installed = False @pytest.fixture def mock_quip(): # type: ignore # mock quip_client with patch("quip_api.quip.QuipClient") as mock_quip: yield mock_quip @pytest.mark.requires("quip_api") class TestQuipLoader: API_URL = "https://example-api.quip.com" DOC_URL_PREFIX = ("https://example.quip.com",) ACCESS_TOKEN = "api_token" MOCK_FOLDER_IDS = ["ABC"] MOCK_THREAD_IDS = ["ABC", "DEF"] def test_quip_loader_initialization(self, mock_quip: MagicMock) -> None: QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60) mock_quip.assert_called_once_with( access_token=self.ACCESS_TOKEN, base_url=self.API_URL, request_timeout=60 ) def test_quip_loader_load_date_invalid_args(self) -> None: quip_loader = QuipLoader( self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60 ) with pytest.raises( ValueError, match="Must specify at least one among `folder_ids`, `thread_ids` or " "set `include_all`_folders as True", ): quip_loader.load() def test_quip_loader_load_data_by_folder_id(self, mock_quip: MagicMock) -> None: mock_quip.get_folder.side_effect = [ self._get_mock_folder(self.MOCK_FOLDER_IDS[0]) ] mock_quip.get_thread.side_effect = [ self._get_mock_thread(self.MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1]), ] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(folder_ids=[self.MOCK_FOLDER_IDS[0]]) assert mock_quip.get_folder.call_count == 1 assert mock_quip.get_thread.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert ( documents[0].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}" ) assert ( documents[1].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}" ) def test_quip_loader_load_data_all_folder(self, mock_quip: MagicMock) -> None: mock_quip.get_authenticated_user.side_effect = [ self._get_mock_authenticated_user() ] mock_quip.get_folder.side_effect = [ self._get_mock_folder(self.MOCK_FOLDER_IDS[0]), ] mock_quip.get_thread.side_effect = [ self._get_mock_thread(self.MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1]), ] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(include_all_folders=True) assert mock_quip.get_folder.call_count == 1 assert mock_quip.get_thread.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert ( documents[0].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}" ) assert ( documents[1].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}" ) def test_quip_loader_load_data_by_thread_id(self, mock_quip: MagicMock) -> None: mock_quip.get_thread.side_effect = [ self._get_mock_thread(self.MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1]), ] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(thread_ids=self.MOCK_THREAD_IDS) assert mock_quip.get_folder.call_count == 0 assert mock_quip.get_thread.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert ( documents[0].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[0]}" ) assert ( documents[1].metadata.get("source") == f"https://example.quip.com/{self.MOCK_THREAD_IDS[1]}" ) def _get_mock_quip_loader(self, mock_quip: MagicMock) -> QuipLoader: quip_loader = QuipLoader( self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60 ) quip_loader.quip_client = mock_quip return quip_loader def _get_mock_folder(self, folder_id: str) -> Dict: return { "folder": { "title": "runbook", "creator_id": "testing", "folder_type": "shared", "parent_id": "ABCD", "inherit_mode": "inherit", "color": "manila", "id": f"{folder_id}", "created_usec": 1668405728528904, "updated_usec": 1697356632672453, "link": "https://example.quip.com/YPH9OAR2Eu5", }, "member_ids": [], "children": [ {"thread_id": "ABC"}, {"thread_id": "DEF"}, ], } def _get_mock_thread(self, thread_id: str) -> Dict: return { "thread": { "author_id": "testing", "thread_class": "document", "owning_company_id": "ABC", "id": f"{thread_id}", "created_usec": 1690873126670055, "updated_usec": 1690874891638991, "title": f"Unit Test Doc {thread_id}", "link": f"https://example.quip.com/{thread_id}", "document_id": "ABC", "type": "document", "is_template": False, "is_deleted": False, }, "user_ids": [], "shared_folder_ids": ["ABC"], "expanded_user_ids": ["ABCDEFG"], "invited_user_emails": [], "access_levels": {"ABCD": {"access_level": "OWN"}}, "html": "<h1 id='temp:C:ABCD'>How to write Python Test </h1>", } def _get_mock_authenticated_user(self) -> Dict: return {"shared_folder_ids": self.MOCK_FOLDER_IDS, "id": "Test"}
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~singlestoredb.py
from __future__ import annotations import json import re from typing import ( Any, Callable, Iterable, List, Optional, Tuple, Type, ) from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore, VectorStoreRetriever from sqlalchemy.pool import QueuePool from langchain.docstore.document import Document from langchain.vectorstores.utils import DistanceStrategy DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.DOT_PRODUCT ORDERING_DIRECTIVE: dict = { DistanceStrategy.EUCLIDEAN_DISTANCE: "", DistanceStrategy.DOT_PRODUCT: "DESC", } class SingleStoreDB(VectorStore): """`SingleStore DB` vector store. The prerequisite for using this class is the installation of the ``singlestoredb`` Python package. The SingleStoreDB vectorstore can be created by providing an embedding function and the relevant parameters for the database connection, connection pool, and optionally, the names of the table and the fields to use. """ def _get_connection(self: SingleStoreDB) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def __init__( self, embedding: Embeddings, *, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. Defaults to DOT_PRODUCT. Available options are: - DOT_PRODUCT: Computes the scalar product of two vectors. This is the default behavior - EUCLIDEAN_DISTANCE: Computes the Euclidean distance between two vectors. This metric considers the geometric distance in the vector space, and might be more suitable for embeddings that rely on spatial relationships. table_name (str, optional): Specifies the name of the table in use. Defaults to "embeddings". content_field (str, optional): Specifies the field to store the content. Defaults to "content". metadata_field (str, optional): Specifies the field to store metadata. Defaults to "metadata". vector_field (str, optional): Specifies the field to store the vector. Defaults to "vector". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), host="https://user:[email protected]:3306/database" ) Advanced Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db' vectorstore = SingleStoreDB(OpenAIEmbeddings()) """ self.embedding = embedding self.distance_strategy = distance_strategy self.table_name = self._sanitize_input(table_name) self.content_field = self._sanitize_input(content_field) self.metadata_field = self._sanitize_input(metadata_field) self.vector_field = self._sanitize_input(vector_field) # Pass the rest of the kwargs to the connection. self.connection_kwargs = kwargs # Add program name and version to connection attributes. if "conn_attrs" not in self.connection_kwargs: self.connection_kwargs["conn_attrs"] = dict() self.connection_kwargs["conn_attrs"]["_connector_name"] = "langchain python sdk" self.connection_kwargs["conn_attrs"]["_connector_version"] = "1.0.1" # Create connection pool. self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self._create_table() @property def embeddings(self) -> Embeddings: return self.embedding def _sanitize_input(self, input_str: str) -> str: # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) def _select_relevance_score_fn(self) -> Callable[[float], float]: return self._max_inner_product_relevance_score_fn def _create_table(self: SingleStoreDB) -> None: """Create table if it doesn't exist.""" conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, {} BLOB, {} JSON);""".format( self.table_name, self.content_field, self.vector_field, self.metadata_field, ), ) finally: cur.close() finally: conn.close() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: # Write data to singlestore db for i, text in enumerate(texts): # Use provided values by default or fallback metadata = metadatas[i] if metadatas else {} embedding = ( embeddings[i] if embeddings else self.embedding.embed_documents([text])[0] ) cur.execute( "INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format( self.table_name ), ( text, "[{}]".format(",".join(map(str, embedding))), json.dumps(metadata), ), ) finally: cur.close() finally: conn.close() return [] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Returns the most similar indexed documents to the query text. Uses cosine similarity. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (dict): A dictionary of metadata fields and values to filter by. Returns: List[Document]: A list of documents that are most similar to the query text. Examples: .. code-block:: python from langchain.vectorstores import SingleStoreDB from langchain.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_documents( docs, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) s2.similarity_search("query text", 1, {"metadata_field": "metadata_value"}) """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ # Creates embedding vector from user query embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] where_clause: str = "" where_clause_values: List[Any] = [] if filter: where_clause = "WHERE " arguments = [] def build_where_clause( where_clause_values: List[Any], sub_filter: dict, prefix_args: Optional[List[str]] = None, ) -> None: prefix_args = prefix_args or [] for key in sub_filter.keys(): if isinstance(sub_filter[key], dict): build_where_clause( where_clause_values, sub_filter[key], prefix_args + [key] ) else: arguments.append( "JSON_EXTRACT_JSON({}, {}) = %s".format( self.metadata_field, ", ".join(["%s"] * (len(prefix_args) + 1)), ) ) where_clause_values += prefix_args + [key] where_clause_values.append(json.dumps(sub_filter[key])) build_where_clause(where_clause_values, filter) where_clause += " AND ".join(arguments) try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score FROM {} {} ORDER BY __score {} LIMIT %s""".format( self.content_field, self.metadata_field, self.distance_strategy.name if isinstance(self.distance_strategy, DistanceStrategy) else self.distance_strategy, self.vector_field, self.table_name, where_clause, ORDERING_DIRECTIVE[self.distance_strategy], ), ("[{}]".format(",".join(map(str, embedding))),) + tuple(where_clause_values) + (k,), ) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result @classmethod def from_texts( cls: Type[SingleStoreDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ) -> SingleStoreDB: """Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import SingleStoreDB from langchain.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls( embedding, distance_strategy=distance_strategy, table_name=table_name, content_field=content_field, metadata_field=metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout, **kwargs, ) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs) return instance # SingleStoreDBRetriever is not needed, but we keep it for backwards compatibility SingleStoreDBRetriever = VectorStoreRetriever
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~tracers~log_stream.py
from langchain_core.callbacks.tracers.log_stream import ( LogEntry, LogStreamCallbackHandler, RunLog, RunLogPatch, RunState, ) __all__ = ["LogEntry", "RunState", "RunLog", "RunLogPatch", "LogStreamCallbackHandler"]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~document_transformers~test_beautiful_soup_transformer.py
"""Unit tests for beautiful soup document transformer.""" import pytest from langchain_core.schema.document import Document from langchain.document_transformers import BeautifulSoupTransformer @pytest.mark.requires("bs4") def test_transform_empty_html() -> None: bs_transformer = BeautifulSoupTransformer() empty_html = "<html></html>" documents = [Document(page_content=empty_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == "" @pytest.mark.requires("bs4") def test_extract_paragraphs() -> None: bs_transformer = BeautifulSoupTransformer() paragraphs_html = ( "<html><h1>Header</h1><p>First paragraph.</p>" "<p>Second paragraph.</p><h1>Ignore at end</h1></html>" ) documents = [Document(page_content=paragraphs_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == "First paragraph. Second paragraph." @pytest.mark.requires("bs4") def test_strip_whitespace() -> None: bs_transformer = BeautifulSoupTransformer() paragraphs_html = ( "<html><h1>Header</h1><p><span>First</span> paragraph.</p>" "<p>Second paragraph. </p></html>" ) documents = [Document(page_content=paragraphs_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == "First paragraph. Second paragraph." @pytest.mark.requires("bs4") def test_extract_html() -> None: bs_transformer = BeautifulSoupTransformer() paragraphs_html = ( "<html>Begin of html tag" "<h1>Header</h1>" "<p>First paragraph.</p>" "Middle of html tag" "<p>Second paragraph.</p>" "End of html tag" "</html>" ) documents = [Document(page_content=paragraphs_html)] docs_transformed = bs_transformer.transform_documents( documents, tags_to_extract=["html", "p"] ) assert docs_transformed[0].page_content == ( "Begin of html tag " "Header First paragraph. " "Middle of html tag " "Second paragraph. " "End of html tag" ) @pytest.mark.requires("bs4") def test_remove_style() -> None: bs_transformer = BeautifulSoupTransformer() with_style_html = ( "<html><style>my_funky_style</style><p>First paragraph.</p></html>" ) documents = [Document(page_content=with_style_html)] docs_transformed = bs_transformer.transform_documents( documents, tags_to_extract=["html"] ) assert docs_transformed[0].page_content == "First paragraph." @pytest.mark.requires("bs4") def test_remove_nested_tags() -> None: """ If a tag_to_extract is _inside_ an unwanted_tag, it should be removed (e.g. a <p> inside a <table> if <table> is unwanted).) If an unwanted tag is _inside_ a tag_to_extract, it should be removed, but the rest of the tag_to_extract should stay. This means that "unwanted_tags" have a higher "priority" than "tags_to_extract". """ bs_transformer = BeautifulSoupTransformer() with_style_html = ( "<html><style>my_funky_style</style>" "<table><td><p>First paragraph, inside a table.</p></td></table>" "<p>Second paragraph<table><td> with a cell </td></table>.</p>" "</html>" ) documents = [Document(page_content=with_style_html)] docs_transformed = bs_transformer.transform_documents( documents, unwanted_tags=["script", "style", "table"] ) assert docs_transformed[0].page_content == "Second paragraph." @pytest.mark.requires("bs4") def test_remove_unwanted_lines() -> None: bs_transformer = BeautifulSoupTransformer() with_lines_html = "<html>\n\n<p>First \n\n paragraph.</p>\n</html>\n\n" documents = [Document(page_content=with_lines_html)] docs_transformed = bs_transformer.transform_documents(documents, remove_lines=True) assert docs_transformed[0].page_content == "First paragraph." @pytest.mark.requires("bs4") def test_do_not_remove_repeated_content() -> None: bs_transformer = BeautifulSoupTransformer() with_lines_html = "<p>1\n1\n1\n1</p>" documents = [Document(page_content=with_lines_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == "1 1 1 1" @pytest.mark.requires("bs4") def test_extract_nested_tags() -> None: bs_transformer = BeautifulSoupTransformer() nested_html = ( "<html><div class='some_style'>" "<p><span>First</span> paragraph.</p>" "<p>Second <div>paragraph.</div></p>" "<p><p>Third paragraph.</p></p>" "</div></html>" ) documents = [Document(page_content=nested_html)] docs_transformed = bs_transformer.transform_documents(documents) assert ( docs_transformed[0].page_content == "First paragraph. Second paragraph. Third paragraph." ) @pytest.mark.requires("bs4") def test_extract_more_nested_tags() -> None: bs_transformer = BeautifulSoupTransformer() nested_html = ( "<html><div class='some_style'>" "<p><span>First</span> paragraph.</p>" "<p>Second paragraph.</p>" "<p>Third paragraph with a list:" "<ul>" "<li>First list item.</li>" "<li>Second list item.</li>" "</ul>" "</p>" "<p>Fourth paragraph.</p>" "</div></html>" ) documents = [Document(page_content=nested_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ( "First paragraph. Second paragraph. " "Third paragraph with a list: " "First list item. Second list item. " "Fourth paragraph." ) @pytest.mark.requires("bs4") def test_transform_keeps_order() -> None: bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( "<h1>First heading.</h1>" "<p>First paragraph.</p>" "<h1>Second heading.</h1>" "<p>Second paragraph.</p>" ) documents = [Document(page_content=multiple_tags_html)] # Order of "p" and "h1" in the "tags_to_extract" parameter is NOT important here: # it will keep the order of the original HTML. docs_transformed_p_then_h1 = bs_transformer.transform_documents( documents, tags_to_extract=["p", "h1"] ) assert ( docs_transformed_p_then_h1[0].page_content == "First heading. First paragraph. Second heading. Second paragraph." ) # Recreating `documents` because transform_documents() modifies it. documents = [Document(page_content=multiple_tags_html)] # changing the order of "h1" and "p" in "tags_to_extract" does NOT flip the order # of the extracted tags: docs_transformed_h1_then_p = bs_transformer.transform_documents( documents, tags_to_extract=["h1", "p"] ) assert ( docs_transformed_h1_then_p[0].page_content == "First heading. First paragraph. Second heading. Second paragraph." ) @pytest.mark.requires("bs4") def test_extracts_href() -> None: bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( "<h1>First heading.</h1>" "<p>First paragraph with an <a href='http://example.com'>example</a></p>" "<p>Second paragraph with an <a>a tag without href</a></p>" ) documents = [Document(page_content=multiple_tags_html)] docs_transformed = bs_transformer.transform_documents( documents, tags_to_extract=["p"] ) assert docs_transformed[0].page_content == ( "First paragraph with an example (http://example.com) " "Second paragraph with an a tag without href" ) @pytest.mark.requires("bs4") def test_invalid_html() -> None: bs_transformer = BeautifulSoupTransformer() invalid_html_1 = "<html><h1>First heading." invalid_html_2 = "<html 1234 xyz" documents = [ Document(page_content=invalid_html_1), Document(page_content=invalid_html_2), ] docs_transformed = bs_transformer.transform_documents( documents, tags_to_extract=["h1"] ) assert docs_transformed[0].page_content == "First heading." assert docs_transformed[1].page_content == ""
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~chat_models~test_hunyuan.py
from langchain_core.schema.messages import AIMessage, HumanMessage from langchain.chat_models.hunyuan import ChatHunyuan def test_chat_hunyuan() -> None: chat = ChatHunyuan() message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_chat_hunyuan_with_temperature() -> None: chat = ChatHunyuan(temperature=0.6) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_extra_kwargs() -> None: chat = ChatHunyuan(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7
[ "Hello" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~jinachat.py
"""JinaChat wrapper.""" from __future__ import annotations import logging from typing import ( Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional, Tuple, Type, Union, ) from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, FunctionMessage, HumanMessage, SystemMessage, ) from langchain_core.schema.messages import ( AIMessageChunk, BaseMessageChunk, ChatMessageChunk, HumanMessageChunk, SystemMessageChunk, ) from langchain_core.schema.output import ChatGenerationChunk from langchain_core.utils import get_pydantic_field_names from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import ( BaseChatModel, _agenerate_from_stream, _generate_from_stream, ) from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: JinaChat, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_delta_to_message_chunk( _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: role = _dict.get("role") content = _dict.get("content") or "" if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": content = _dict["content"] or "" return AIMessage(content=content) elif role == "system": return SystemMessage(content=_dict["content"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "name": message.name, "content": message.content, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class JinaChat(BaseChatModel): """`Jina AI` Chat models API. To use, you should have the ``openai`` python package installed, and the environment variable ``JINACHAT_API_KEY`` set to your API key, which you can generate at https://chat.jina.ai/api. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import JinaChat chat = JinaChat() """ @property def lc_secrets(self) -> Dict[str, str]: return {"jinachat_api_key": "JINACHAT_API_KEY"} @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" return True client: Any #: :meta private: temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" jinachat_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to JinaChat completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["jinachat_api_key"] = get_from_dict_or_env( values, "jinachat_api_key", "JINACHAT_API_KEY" ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling JinaChat API.""" return { "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(self, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: # Happens in streaming continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage} def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, **params): delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: stream_iter = self._stream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return _generate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._invocation_params) if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"]} return ChatResult(generations=generations, llm_output=llm_output) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, "stream": True} default_chunk_class = AIMessageChunk async for chunk in await acompletion_with_retry( self, messages=message_dicts, **params ): delta = chunk["choices"][0]["delta"] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: await run_manager.on_llm_new_token(chunk.content) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: stream_iter = self._astream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return await _agenerate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} response = await acompletion_with_retry(self, messages=message_dicts, **params) return self._create_chat_result(response) @property def _invocation_params(self) -> Mapping[str, Any]: """Get the parameters used to invoke the model.""" jinachat_creds: Dict[str, Any] = { "api_key": self.jinachat_api_key, "api_base": "https://api.chat.jina.ai/v1", "model": "jinachat", } return {**jinachat_creds, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "jinachat"
[ "content" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~document_loaders~test_pubmed.py
"""Integration test for PubMed API Wrapper.""" from typing import List import pytest from langchain_core.schema import Document from langchain.document_loaders import PubMedLoader xmltodict = pytest.importorskip("xmltodict") def test_load_success() -> None: """Test that returns the correct answer""" api_client = PubMedLoader(query="chatgpt") docs = api_client.load() print(docs) assert len(docs) == api_client.load_max_docs == 3 assert_docs(docs) def test_load_success_load_max_docs() -> None: """Test that returns the correct answer""" api_client = PubMedLoader(query="chatgpt", load_max_docs=2) docs = api_client.load() print(docs) assert len(docs) == api_client.load_max_docs == 2 assert_docs(docs) def test_load_returns_no_result() -> None: """Test that gives no result.""" api_client = PubMedLoader(query="1605.08386WWW") docs = api_client.load() assert len(docs) == 0 def test_load_no_content() -> None: """Returns a Document without content.""" api_client = PubMedLoader(query="37548971") docs = api_client.load() print(docs) assert len(docs) > 0 assert docs[0].page_content == "" def assert_docs(docs: List[Document]) -> None: for doc in docs: assert doc.metadata assert set(doc.metadata) == { "Copyright Information", "uid", "Title", "Published", }
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~singlestoredb.py
import json import logging import re from typing import ( Any, List, ) from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) class SingleStoreDBChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a SingleStoreDB database.""" def __init__( self, session_id: str, *, table_name: str = "message_store", id_field: str = "id", session_id_field: str = "session_id", message_field: str = "message", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: table_name (str, optional): Specifies the name of the table in use. Defaults to "message_store". id_field (str, optional): Specifies the name of the id field in the table. Defaults to "id". session_id_field (str, optional): Specifies the name of the session_id field in the table. Defaults to "session_id". message_field (str, optional): Specifies the name of the message field in the table. Defaults to "message". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain.memory.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="https://user:[email protected]:3306/database" ) Advanced Usage: .. code-block:: python from langchain.memory.chat_message_histories import ( SingleStoreDBChatMessageHistory ) message_history = SingleStoreDBChatMessageHistory( session_id="my-session", host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain.memory.chat_message_histories import ( SingleStoreDBChatMessageHistory ) os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db' message_history = SingleStoreDBChatMessageHistory("my-session") """ self.table_name = self._sanitize_input(table_name) self.session_id = self._sanitize_input(session_id) self.id_field = self._sanitize_input(id_field) self.session_id_field = self._sanitize_input(session_id_field) self.message_field = self._sanitize_input(message_field) # Pass the rest of the kwargs to the connection. self.connection_kwargs = kwargs # Add connection attributes to the connection kwargs. if "conn_attrs" not in self.connection_kwargs: self.connection_kwargs["conn_attrs"] = dict() self.connection_kwargs["conn_attrs"]["_connector_name"] = "langchain python sdk" self.connection_kwargs["conn_attrs"]["_connector_version"] = "1.0.1" # Create a connection pool. try: from sqlalchemy.pool import QueuePool except ImportError: raise ImportError( "Could not import sqlalchemy.pool python package. " "Please install it with `pip install singlestoredb`." ) self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self.table_created = False def _sanitize_input(self, input_str: str) -> str: # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) def _get_connection(self) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def _create_table_if_not_exists(self) -> None: """Create table if it doesn't exist.""" if self.table_created: return conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} BIGINT PRIMARY KEY AUTO_INCREMENT, {} TEXT NOT NULL, {} JSON NOT NULL);""".format( self.table_name, self.id_field, self.session_id_field, self.message_field, ), ) self.table_created = True finally: cur.close() finally: conn.close() @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() items = [] try: cur = conn.cursor() try: cur.execute( """SELECT {} FROM {} WHERE {} = %s""".format( self.message_field, self.table_name, self.session_id_field, ), (self.session_id), ) for row in cur.fetchall(): items.append(row[0]) finally: cur.close() finally: conn.close() messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """INSERT INTO {} ({}, {}) VALUES (%s, %s)""".format( self.table_name, self.session_id_field, self.message_field, ), (self.session_id, json.dumps(_message_to_dict(message))), ) finally: cur.close() finally: conn.close() def clear(self) -> None: """Clear session memory from SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """DELETE FROM {} WHERE {} = %s""".format( self.table_name, self.session_id_field, ), (self.session_id), ) finally: cur.close() finally: conn.close()
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~tigris.py
from __future__ import annotations import itertools from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple from langchain_core.schema import Document from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore if TYPE_CHECKING: from tigrisdb import TigrisClient from tigrisdb import VectorStore as TigrisVectorStore from tigrisdb.types.filters import Filter as TigrisFilter from tigrisdb.types.vector import Document as TigrisDocument class Tigris(VectorStore): """`Tigris` vector store.""" def __init__(self, client: TigrisClient, embeddings: Embeddings, index_name: str): """Initialize Tigris vector store.""" try: import tigrisdb # noqa: F401 except ImportError: raise ImportError( "Could not import tigrisdb python package. " "Please install it with `pip install tigrisdb`" ) self._embed_fn = embeddings self._vector_store = TigrisVectorStore(client.get_search(), index_name) @property def embeddings(self) -> Embeddings: return self._embed_fn @property def search_index(self) -> TigrisVectorStore: return self._vector_store def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids for documents. Ids will be autogenerated if not provided. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ docs = self._prep_docs(texts, metadatas, ids) result = self.search_index.add_documents(docs) return [r.id for r in result] def similarity_search( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" docs_with_scores = self.similarity_search_with_score(query, k, filter) return [doc for doc, _ in docs_with_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[TigrisFilter]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ vector = self._embed_fn.embed_query(query) result = self.search_index.similarity_search( vector=vector, k=k, filter_by=filter ) docs: List[Tuple[Document, float]] = [] for r in result: docs.append( ( Document( page_content=r.doc["text"], metadata=r.doc.get("metadata") ), r.score, ) ) return docs @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, client: Optional[TigrisClient] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> Tigris: """Return VectorStore initialized from texts and embeddings.""" if not index_name: raise ValueError("`index_name` is required") if not client: client = TigrisClient() store = cls(client, embedding, index_name) store.add_texts(texts=texts, metadatas=metadatas, ids=ids) return store def _prep_docs( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[TigrisDocument]: embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts)) docs: List[TigrisDocument] = [] for t, m, e, _id in itertools.zip_longest( texts, metadatas or [], embeddings or [], ids or [] ): doc: TigrisDocument = { "text": t, "embeddings": e or [], "metadata": m or {}, } if _id: doc["id"] = _id docs.append(doc) return docs
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~wikipedia.py
from typing import List from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.utilities.wikipedia import WikipediaAPIWrapper class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """`Wikipedia API` retriever. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load(query=query)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~tfidf.py
from __future__ import annotations import pickle from pathlib import Path from typing import Any, Dict, Iterable, List, Optional from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun class TFIDFRetriever(BaseRetriever): """`TF-IDF` retriever. Largely based on https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb """ vectorizer: Any """TF-IDF vectorizer.""" docs: List[Document] """Documents.""" tfidf_array: Any """TF-IDF array.""" k: int = 4 """Number of documents to return.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: try: from sklearn.feature_extraction.text import TfidfVectorizer except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) tfidf_params = tfidf_params or {} vectorizer = TfidfVectorizer(**tfidf_params) tfidf_array = vectorizer.fit_transform(texts) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs) @classmethod def from_documents( cls, documents: Iterable[Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform( [query] ) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) results = cosine_similarity(self.tfidf_array, query_vec).reshape( (-1,) ) # Op -- (n_docs,1) -- Cosine Sim with each doc return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]] return return_docs def save_local( self, folder_path: str, file_name: str = "tfidf_vectorizer", ) -> None: try: import joblib except ImportError: raise ImportError( "Could not import joblib, please install with `pip install joblib`." ) path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) # Save vectorizer with joblib dump. joblib.dump(self.vectorizer, path / f"{file_name}.joblib") # Save docs and tfidf array as pickle. with open(path / f"{file_name}.pkl", "wb") as f: pickle.dump((self.docs, self.tfidf_array), f) @classmethod def load_local( cls, folder_path: str, file_name: str = "tfidf_vectorizer", ) -> TFIDFRetriever: try: import joblib except ImportError: raise ImportError( "Could not import joblib, please install with `pip install joblib`." ) path = Path(folder_path) # Load vectorizer with joblib load. vectorizer = joblib.load(path / f"{file_name}.joblib") # Load docs and tfidf array as pickle. with open(path / f"{file_name}.pkl", "rb") as f: docs, tfidf_array = pickle.load(f) return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~svm.py
from __future__ import annotations import concurrent.futures from typing import Any, Iterable, List, Optional import numpy as np from langchain_core.schema import BaseRetriever, Document from langchain_core.schema.embeddings import Embeddings from langchain.callbacks.manager import CallbackManagerForRetrieverRun def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: """ Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) class SVMRetriever(BaseRetriever): """`SVM` retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb """ embeddings: Embeddings """Embeddings model to use.""" index: Any """Index of embeddings.""" texts: List[str] """List of texts to index.""" metadatas: Optional[List[dict]] = None """List of metadatas corresponding with each text.""" k: int = 4 """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> SVMRetriever: index = create_index(texts, embeddings) return cls( embeddings=embeddings, index=index, texts=texts, metadatas=metadatas, **kwargs, ) @classmethod def from_documents( cls, documents: Iterable[Document], embeddings: Embeddings, **kwargs: Any, ) -> SVMRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: try: from sklearn import svm except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) query_embeds = np.array(self.embeddings.embed_query(query)) x = np.concatenate([query_embeds[None, ...], self.index]) y = np.zeros(x.shape[0]) y[0] = 1 clf = svm.LinearSVC( class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1 ) clf.fit(x, y) similarities = clf.decision_function(x) sorted_ix = np.argsort(-similarities) # svm.LinearSVC in scikit-learn is non-deterministic. # if a text is the same as a query, there is no guarantee # the query will be in the first index. # this performs a simple swap, this works because anything # left of the 0 should be equivalent. zero_index = np.where(sorted_ix == 0)[0][0] if zero_index != 0: sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0] denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [] for row in sorted_ix[1 : self.k + 1]: if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ): metadata = self.metadatas[row - 1] if self.metadatas else {} doc = Document(page_content=self.texts[row - 1], metadata=metadata) top_k_results.append(doc) return top_k_results
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~memory~test_neo4j.py
import json from langchain_core.schema.messages import _message_to_dict from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import Neo4jChatMessageHistory def test_memory_with_message_store() -> None: """Test the memory with a message store.""" # setup MongoDB as a message store message_history = Neo4jChatMessageHistory(session_id="test-session") memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) # add some messages memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") # get the message history from the memory store and turn it into a json messages = memory.chat_memory.messages messages_json = json.dumps([_message_to_dict(msg) for msg in messages]) assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # remove the record from Azure Cosmos DB, so the next test run won't pick it up memory.chat_memory.clear() assert memory.chat_memory.messages == []
[]
2024-01-10
axgpt/langchain
libs~core~langchain_core~prompts~example_selector~semantic_similarity.py
"""Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from langchain_core.prompts.example_selector.base import BaseExampleSelector from langchain_core.pydantic_v1 import BaseModel, Extra from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~router~multi_retrieval_qa.py
"""Use a single chain to route an input to one of multiple retrieval qa chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain_core.prompts import PromptTemplate from langchain_core.schema import BaseRetriever from langchain_core.schema.language_model import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA from langchain.chains.router.base import MultiRouteChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_retrieval_prompt import ( MULTI_RETRIEVAL_ROUTER_TEMPLATE, ) from langchain.chat_models import ChatOpenAI class MultiRetrievalQAChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst retrieval qa chains.""" router_chain: LLMRouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, BaseRetrievalQA] """Map of name to candidate chains that inputs can be routed to.""" default_chain: Chain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["result"] @classmethod def from_retrievers( cls, llm: BaseLanguageModel, retriever_infos: List[Dict[str, Any]], default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( "`default_retriever` must be specified if `default_prompt` is " "provided. Received only `default_prompt`." ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] destinations_str = "\n".join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(next_inputs_inner_key="query"), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get("prompt") retriever = r_info["retriever"] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info["name"] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm( llm, prompt=default_prompt, retriever=default_retriever ) else: prompt_template = DEFAULT_TEMPLATE.replace("input", "query") prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) _default_chain = ConversationChain( llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" ) return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
[ "input" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~pgvector.py
from __future__ import annotations import asyncio import contextlib import enum import logging import uuid from functools import partial from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Type, ) import numpy as np import sqlalchemy from sqlalchemy import delete from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import Session try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_dict_or_env from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from langchain.vectorstores._pgvector_data_models import CollectionStore class DistanceStrategy(str, enum.Enum): """Enumerator of the Distance strategies.""" EUCLIDEAN = "l2" COSINE = "cosine" MAX_INNER_PRODUCT = "inner" DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE Base = declarative_base() # type: Any _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): """Base model for the SQL stores.""" __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) def _results_to_docs(docs_and_scores: Any) -> List[Document]: """Return docs from docs and scores.""" return [doc for doc, _ in docs_and_scores] class PGVector(VectorStore): """`Postgres`/`PGVector` vector store. To use, you should have the ``pgvector`` python package installed. Args: connection_string: Postgres connection string. embedding_function: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. collection_name: The name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete the collection if it exists. (default: False). Useful for testing. engine_args: SQLAlchemy's create engine arguments. Example: .. code-block:: python from langchain.vectorstores import PGVector from langchain.embeddings.openai import OpenAIEmbeddings CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3" COLLECTION_NAME = "state_of_the_union_test" embeddings = OpenAIEmbeddings() vectorestore = PGVector.from_documents( embedding=embeddings, documents=docs, collection_name=COLLECTION_NAME, connection_string=CONNECTION_STRING, ) """ def __init__( self, connection_string: str, embedding_function: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, *, connection: Optional[sqlalchemy.engine.Connection] = None, engine_args: Optional[dict[str, Any]] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.engine_args = engine_args or {} # Create a connection if not provided, otherwise use the provided connection self._conn = connection if connection else self.connect() self.__post_init__() def __post_init__( self, ) -> None: """Initialize the store.""" self.create_vector_extension() from langchain.vectorstores._pgvector_data_models import ( CollectionStore, EmbeddingStore, ) self.CollectionStore = CollectionStore self.EmbeddingStore = EmbeddingStore self.create_tables_if_not_exists() self.create_collection() def __del__(self) -> None: if self._conn: self._conn.close() @property def embeddings(self) -> Embeddings: return self.embedding_function def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string, **self.engine_args) conn = engine.connect() return conn def create_vector_extension(self) -> None: try: with Session(self._conn) as session: # The advisor lock fixes issue arising from concurrent # creation of the vector extension. # https://github.com/langchain-ai/langchain/issues/12933 # For more information see: # https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS statement = sqlalchemy.text( "BEGIN;" "SELECT pg_advisory_xact_lock(1573678846307946496);" "CREATE EXTENSION IF NOT EXISTS vector;" "COMMIT;" ) session.execute(statement) session.commit() except Exception as e: raise Exception(f"Failed to create vector extension: {e}") from e def create_tables_if_not_exists(self) -> None: with self._conn.begin(): Base.metadata.create_all(self._conn) def drop_tables(self) -> None: with self._conn.begin(): Base.metadata.drop_all(self._conn) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._conn) as session: self.CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._conn) as session: collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return session.delete(collection) session.commit() @contextlib.contextmanager def _make_session(self) -> Generator[Session, None, None]: """Create a context manager for the session, bind to _conn string.""" yield Session(self._conn) def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Delete vectors by ids or uuids. Args: ids: List of ids to delete. """ with Session(self._conn) as session: if ids is not None: self.logger.debug( "Trying to delete vectors by ids (represented by the model " "using the custom ids field)" ) stmt = delete(self.EmbeddingStore).where( self.EmbeddingStore.custom_id.in_(ids) ) session.execute(stmt) session.commit() def get_collection(self, session: Session) -> Optional["CollectionStore"]: return self.CollectionStore.get_by_name(session, self.collection_name) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, connection_string: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if connection_string is None: connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = self.EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, collection_id=collection.uuid, ) session.add(embedding_store) session.commit() return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with PGVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs @property def distance_strategy(self) -> Any: if self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self.EmbeddingStore.embedding.l2_distance elif self._distance_strategy == DistanceStrategy.COSINE: return self.EmbeddingStore.embedding.cosine_distance elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self.EmbeddingStore.embedding.max_inner_product else: raise ValueError( f"Got unexpected value for distance: {self._distance_strategy}. " f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}." ) def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results = self.__query_collection(embedding=embedding, k=k, filter=filter) return self._results_to_docs_and_scores(results) def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]: """Return docs and scores from results.""" docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs def __query_collection( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, ) -> List[Any]: """Query the collection.""" with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") filter_by = self.EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = "in" if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = { k.lower(): v for k, v in value.items() } filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext.in_(value_case_insensitive[IN]) filter_clauses.append(filter_by_metadata) else: filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) _type = self.EmbeddingStore results: List[Any] = ( session.query( self.EmbeddingStore, self.distance_strategy(embedding).label("distance"), # type: ignore ) .filter(filter_by) .order_by(sqlalchemy.asc("distance")) .join( self.CollectionStore, self.EmbeddingStore.collection_id == self.CollectionStore.uuid, ) .limit(k) .all() ) return results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return _results_to_docs(docs_and_scores) @classmethod def from_texts( cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain.vectorstores import PGVector from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[PGVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Get instance of an existing PGVector store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PGVECTOR_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PGVECTOR_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[PGVector], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, collection_name=collection_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to PGVector constructor." ) def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) def max_marginal_relevance_search_with_score( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_with_score_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return _results_to_docs(docs_and_scores) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial( self.max_marginal_relevance_search_by_vector, embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return await asyncio.get_event_loop().run_in_executor(None, func)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~mlflow_gateway.py
from __future__ import annotations from typing import Any, Iterator, List, Optional from langchain_core.pydantic_v1 import BaseModel from langchain_core.schema.embeddings import Embeddings def _chunk(texts: List[str], size: int) -> Iterator[List[str]]: for i in range(0, len(texts), size): yield texts[i : i + size] class MlflowAIGatewayEmbeddings(Embeddings, BaseModel): """ Wrapper around embeddings LLMs in the MLflow AI Gateway. To use, you should have the ``mlflow[gateway]`` python package installed. For more information, see https://mlflow.org/docs/latest/gateway/index.html. Example: .. code-block:: python from langchain.embeddings import MlflowAIGatewayEmbeddings embeddings = MlflowAIGatewayEmbeddings( gateway_uri="<your-mlflow-ai-gateway-uri>", route="<your-mlflow-ai-gateway-embeddings-route>" ) """ route: str """The route to use for the MLflow AI Gateway API.""" gateway_uri: Optional[str] = None """The URI for the MLflow AI Gateway API.""" def __init__(self, **kwargs: Any): try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e super().__init__(**kwargs) if self.gateway_uri: mlflow.gateway.set_gateway_uri(self.gateway_uri) def _query(self, texts: List[str]) -> List[List[float]]: try: import mlflow.gateway except ImportError as e: raise ImportError( "Could not import `mlflow.gateway` module. " "Please install it with `pip install mlflow[gateway]`." ) from e embeddings = [] for txt in _chunk(texts, 20): resp = mlflow.gateway.query(self.route, data={"text": txt}) embeddings.append(resp["embeddings"]) return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: return self._query(texts) def embed_query(self, text: str) -> List[float]: return self._query([text])[0]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~document_loaders~test_xorbits.py
import pytest from langchain_core.schema import Document from langchain.document_loaders import XorbitsLoader try: import xorbits # noqa: F401 xorbits_installed = True except ImportError: xorbits_installed = False @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_returns_list_of_documents() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } loader = XorbitsLoader(pd.DataFrame(data)) docs = loader.load() assert isinstance(docs, list) assert all(isinstance(doc, Document) for doc in docs) assert len(docs) == 2 @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_converts_dataframe_columns_to_document_metadata() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } loader = XorbitsLoader(pd.DataFrame(data)) docs = loader.load() expected = { "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } for i, doc in enumerate(docs): assert doc.metadata["author"] == expected["author"][i] assert doc.metadata["date"] == expected["date"][i] @pytest.mark.skipif(not xorbits_installed, reason="xorbits not installed") def test_load_uses_page_content_column_to_create_document_text() -> None: import xorbits.pandas as pd data = { "text": ["Hello", "World"], "author": ["Alice", "Bob"], "date": ["2022-01-01", "2022-01-02"], } sample_data_frame = pd.DataFrame(data) sample_data_frame = sample_data_frame.rename(columns={"text": "dummy_test_column"}) loader = XorbitsLoader(sample_data_frame, page_content_column="dummy_test_column") docs = loader.load() assert docs[0].page_content == "Hello" assert docs[1].page_content == "World"
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~retrievers~test_azure_cognitive_search.py
"""Test Azure Cognitive Search wrapper.""" import pytest from langchain_core.schema import Document from langchain.retrievers.azure_cognitive_search import AzureCognitiveSearchRetriever def test_azure_cognitive_search_get_relevant_documents() -> None: """Test valid call to Azure Cognitive Search.""" retriever = AzureCognitiveSearchRetriever() documents = retriever.get_relevant_documents("what is langchain") for doc in documents: assert isinstance(doc, Document) assert doc.page_content retriever = AzureCognitiveSearchRetriever(top_k=1) documents = retriever.get_relevant_documents("what is langchain") assert len(documents) <= 1 @pytest.mark.asyncio async def test_azure_cognitive_search_aget_relevant_documents() -> None: """Test valid async call to Azure Cognitive Search.""" retriever = AzureCognitiveSearchRetriever() documents = await retriever.aget_relevant_documents("what is langchain") for doc in documents: assert isinstance(doc, Document) assert doc.page_content
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~embaas.py
from typing import Any, Dict, List, Mapping, Optional import requests from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain_core.schema.embeddings import Embeddings from typing_extensions import NotRequired, TypedDict from langchain.utils import get_from_dict_or_env # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" class EmbaasEmbeddingsPayload(TypedDict): """Payload for the Embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] class EmbaasEmbeddings(BaseModel, Embeddings): """Embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Initialise with default model and instruction from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~deepsparse.py
# flake8: noqa from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union from langchain_core.pydantic_v1 import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain_core.schema.output import GenerationChunk class DeepSparse(LLM): """Neural Magic DeepSparse LLM interface. To use, you should have the ``deepsparse`` or ``deepsparse-nightly`` python package installed. See https://github.com/neuralmagic/deepsparse This interface let's you deploy optimized LLMs straight from the [SparseZoo](https://sparsezoo.neuralmagic.com/?useCase=text_generation) Example: .. code-block:: python from langchain.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") """ # noqa: E501 pipeline: Any #: :meta private: model: str """The path to a model file or directory or the name of a SparseZoo model stub.""" model_config: Optional[Dict[str, Any]] = None """Keyword arguments passed to the pipeline construction. Common parameters are sequence_length, prompt_sequence_length""" generation_config: Union[None, str, Dict] = None """GenerationConfig dictionary consisting of parameters used to control sequences generated for each prompt. Common parameters are: max_length, max_new_tokens, num_return_sequences, output_scores, top_p, top_k, repetition_penalty.""" streaming: bool = False """Whether to stream the results, token by token.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_config": self.model_config, "generation_config": self.generation_config, "streaming": self.streaming, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepsparse" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``deepsparse`` package is installed.""" try: from deepsparse import Pipeline except ImportError: raise ImportError( "Could not import `deepsparse` package. " "Please install it with `pip install deepsparse`" ) model_config = values["model_config"] or {} values["pipeline"] = Pipeline.create( task="text_generation", model_path=values["model"], **model_config, ) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") llm("Tell me a joke.") """ if self.streaming: combined_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): combined_output += chunk.text text = combined_output else: text = ( self.pipeline( sequences=prompt, generation_config=self.generation_config ) .generations[0] .text ) if stop is not None: text = enforce_stop_tokens(text, stop) return text async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") llm("Tell me a joke.") """ if self.streaming: combined_output = "" async for chunk in self._astream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs ): combined_output += chunk.text text = combined_output else: text = ( self.pipeline( sequences=prompt, generation_config=self.generation_config ) .generations[0] .text ) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): print(chunk, end='', flush=True) """ inference = self.pipeline( sequences=prompt, generation_config=self.generation_config, streaming=True ) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like object containing a string token. Example: .. code-block:: python from langchain.llms import DeepSparse llm = DeepSparse( model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none", streaming=True ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): print(chunk, end='', flush=True) """ inference = self.pipeline( sequences=prompt, generation_config=self.generation_config, streaming=True ) for token in inference: chunk = GenerationChunk(text=token.generations[0].text) yield chunk if run_manager: await run_manager.on_llm_new_token(token=chunk.text)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~tracers~root_listeners.py
from langchain_core.callbacks.tracers.root_listeners import RootListenersTracer __all__ = ["RootListenersTracer"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~combine_documents~stuff.py
"""Chain that combines documents by stuffing into context.""" from typing import Any, Dict, List, Optional, Tuple from langchain_core.prompts.prompt import PromptTemplate from langchain_core.pydantic_v1 import Extra, Field, root_validator from langchain_core.schema import BasePromptTemplate, format_document from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( BaseCombineDocumentsChain, ) from langchain.chains.llm import LLMChain from langchain.docstore.document import Document def _get_default_document_prompt() -> PromptTemplate: return PromptTemplate(input_variables=["page_content"], template="{page_content}") class StuffDocumentsChain(BaseCombineDocumentsChain): """Chain that combines documents by stuffing into context. This chain takes a list of documents and first combines them into a single string. It does this by formatting each document into a string with the `document_prompt` and then joining them together with `document_separator`. It then adds that new string to the inputs with the variable name set by `document_variable_name`. Those inputs are then passed to the `llm_chain`. Example: .. code-block:: python from langchain.chains import StuffDocumentsChain, LLMChain from langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) llm_chain = LLMChain(llm=llm, prompt=prompt) chain = StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name ) """ llm_chain: LLMChain """LLM chain which is called with the formatted document string, along with any other inputs.""" document_prompt: BasePromptTemplate = Field( default_factory=_get_default_document_prompt ) """Prompt to use to format each document, gets passed to `format_document`.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" document_separator: str = "\n\n" """The string with which to join the formatted documents""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided. If only one variable is present in the llm_chain.prompt, we can infer that the formatted documents should be passed in with this variable name. """ llm_chain_variables = values["llm_chain"].prompt.input_variables if "document_variable_name" not in values: if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain_variables" ) else: if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values @property def input_keys(self) -> List[str]: extra_keys = [ k for k in self.llm_chain.input_keys if k != self.document_variable_name ] return super().input_keys + extra_keys def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict: """Construct inputs from kwargs and docs. Format and the join all the documents together into one input with name `self.document_variable_name`. The pluck any additional variables from **kwargs. Args: docs: List of documents to format and then join into single input **kwargs: additional inputs to chain, will pluck any other required arguments from here. Returns: dictionary of inputs to LLMChain """ # Format each document according to the prompt doc_strings = [format_document(doc, self.document_prompt) for doc in docs] # Join the documents together to put them in the prompt. inputs = { k: v for k, v in kwargs.items() if k in self.llm_chain.prompt.input_variables } inputs[self.document_variable_name] = self.document_separator.join(doc_strings) return inputs def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: """Return the prompt length given the documents passed in. This can be used by a caller to determine whether passing in a list of documents would exceed a certain prompt length. This useful when trying to ensure that the size of a prompt remains below a certain context limit. Args: docs: List[Document], a list of documents to use to calculate the total prompt length. Returns: Returns None if the method does not depend on the prompt length, otherwise the length of the prompt in tokens. """ inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) return self.llm_chain._get_num_tokens(prompt) def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Stuff all documents into one prompt and pass to LLM. Args: docs: List of documents to join together into one variable callbacks: Optional callbacks to pass along **kwargs: additional parameters to use to get inputs to LLMChain. Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return self.llm_chain.predict(callbacks=callbacks, **inputs), {} async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Async stuff all documents into one prompt and pass to LLM. Args: docs: List of documents to join together into one variable callbacks: Optional callbacks to pass along **kwargs: additional parameters to use to get inputs to LLMChain. Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {} @property def _chain_type(self) -> str: return "stuff_documents_chain"
[ "{page_content}" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~smith~evaluation~runner_utils.py
"""Utilities for running language models or Chains over datasets.""" from __future__ import annotations import functools import inspect import logging import uuid from enum import Enum from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, cast, ) from langchain_core._api import warn_deprecated from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda from langchain_core.runnables import config as runnable_config from langchain_core.runnables import utils as runnable_utils from langchain_core.schema import ChatResult, LLMResult from langchain_core.schema.language_model import BaseLanguageModel from langchain_core.schema.messages import BaseMessage, messages_from_dict from langsmith.client import Client from langsmith.evaluation import RunEvaluator from langsmith.run_helpers import as_runnable, is_traceable_function from langsmith.schemas import Dataset, DataType, Example from langsmith.utils import LangSmithError from requests import HTTPError from langchain.callbacks.manager import Callbacks from langchain.callbacks.tracers.evaluation import ( EvaluatorCallbackHandler, wait_for_all_evaluators, ) from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.chains.base import Chain from langchain.evaluation.loading import load_evaluator from langchain.evaluation.schema import ( EvaluatorType, PairwiseStringEvaluator, StringEvaluator, ) from langchain.smith import evaluation as smith_eval from langchain.smith.evaluation import config as smith_eval_config from langchain.smith.evaluation import name_generation, progress if TYPE_CHECKING: import pandas as pd logger = logging.getLogger(__name__) MODEL_OR_CHAIN_FACTORY = Union[ Callable[[], Union[Chain, Runnable]], BaseLanguageModel, Callable[[dict], Any], Runnable, Chain, ] MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel] class InputFormatError(Exception): """Raised when the input format is invalid.""" ## Shared Utilities class TestResult(dict): """A dictionary of the results of a single test run.""" def get_aggregate_feedback( self, quantiles: Optional[Sequence[float]] = None ) -> pd.DataFrame: """Return quantiles for the feedback scores. This method calculates and prints the quantiles for the feedback scores across all feedback keys. Returns: A DataFrame containing the quantiles for each feedback key. """ df = self.to_dataframe() feedback_cols = [ col for col in df.columns if col not in ["input", "output", "reference"] ] _quantiles = df[feedback_cols].quantile( quantiles or [0.25, 0.5, 0.75], numeric_only=True ) _quantiles.loc["mean"] = df[feedback_cols].mean() _quantiles.loc["mode"] = df[feedback_cols].mode().iloc[0] return _quantiles.transpose() def to_dataframe(self) -> pd.DataFrame: """Convert the results to a dataframe.""" try: import pandas as pd except ImportError as e: raise ImportError( "Pandas is required to convert the results to a dataframe." " to install pandas, run `pip install pandas`." ) from e indices = [] records = [] for example_id, result in self["results"].items(): feedback = result["feedback"] r = { **{f.key: f.score for f in feedback}, "input": result["input"], "output": result["output"], "execution_time": result["execution_time"], } if "reference" in result: r["reference"] = result["reference"] records.append(r) indices.append(example_id) return pd.DataFrame(records, index=indices) def _wrap_in_chain_factory( llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, dataset_name: str = "<my_dataset>", ) -> MCF: """Forgive the user if they pass in a chain without memory instead of a chain factory. It's a common mistake. Raise a more helpful error message as well.""" if isinstance(llm_or_chain_factory, Chain): chain = llm_or_chain_factory chain_class = chain.__class__.__name__ if llm_or_chain_factory.memory is not None: memory_class = chain.memory.__class__.__name__ raise ValueError( "Cannot directly evaluate a chain with stateful memory." " To evaluate this chain, pass in a chain constructor" " that initializes fresh memory each time it is called." " This will safegaurd against information" " leakage between dataset examples." "\nFor example:\n\n" "def chain_constructor():\n" f" new_memory = {memory_class}(...)\n" f" return {chain_class}" "(memory=new_memory, ...)\n\n" f'run_on_dataset("{dataset_name}", chain_constructor, ...)' ) return lambda: chain elif isinstance(llm_or_chain_factory, BaseLanguageModel): return llm_or_chain_factory elif isinstance(llm_or_chain_factory, Runnable): # Memory may exist here, but it's not elegant to check all those cases. lcf = llm_or_chain_factory return lambda: lcf elif callable(llm_or_chain_factory): if is_traceable_function(llm_or_chain_factory): runnable_ = as_runnable(cast(Callable, llm_or_chain_factory)) return lambda: runnable_ try: _model = llm_or_chain_factory() # type: ignore[call-arg] except TypeError: # It's an arbitrary function, wrap it in a RunnableLambda user_func = cast(Callable, llm_or_chain_factory) sig = inspect.signature(user_func) logger.info(f"Wrapping function {sig} as RunnableLambda.") wrapped = RunnableLambda(user_func) return lambda: wrapped constructor = cast(Callable, llm_or_chain_factory) if isinstance(_model, BaseLanguageModel): # It's not uncommon to do an LLM constructor instead of raw LLM, # so we'll unpack it for the user. return _model elif is_traceable_function(cast(Callable, _model)): runnable_ = as_runnable(cast(Callable, _model)) return lambda: runnable_ elif not isinstance(_model, Runnable): # This is unlikely to happen - a constructor for a model function return lambda: RunnableLambda(constructor) else: # Typical correct case return constructor # noqa return llm_or_chain_factory def _get_prompt(inputs: Dict[str, Any]) -> str: """Get prompt from inputs. Args: inputs: The input dictionary. Returns: A string prompt. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") prompts = [] if "prompt" in inputs: if not isinstance(inputs["prompt"], str): raise InputFormatError( "Expected string for 'prompt', got" f" {type(inputs['prompt']).__name__}" ) prompts = [inputs["prompt"]] elif "prompts" in inputs: if not isinstance(inputs["prompts"], list) or not all( isinstance(i, str) for i in inputs["prompts"] ): raise InputFormatError( "Expected list of strings for 'prompts'," f" got {type(inputs['prompts']).__name__}" ) prompts = inputs["prompts"] elif len(inputs) == 1: prompt_ = next(iter(inputs.values())) if isinstance(prompt_, str): prompts = [prompt_] elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_): prompts = prompt_ else: raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}") else: raise InputFormatError( f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}" ) if len(prompts) == 1: return prompts[0] else: raise InputFormatError( f"LLM Run expects single prompt input. Got {len(prompts)} prompts." ) def _get_messages(inputs: Dict[str, Any]) -> List[BaseMessage]: """Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") if "messages" in inputs: single_input = inputs["messages"] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError( f"Chat Run expects 'messages' in inputs when example has multiple" f" input keys. Got {inputs}" ) if isinstance(single_input, list) and all( isinstance(i, dict) for i in single_input ): raw_messages = [single_input] elif isinstance(single_input, list) and all( isinstance(i, list) for i in single_input ): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] values for" f" 'messages' key input. Got {inputs}" ) if len(raw_messages) == 1: return messages_from_dict(raw_messages[0]) else: raise InputFormatError( f"Chat Run expects single List[dict] or List[List[dict]] 'messages'" f" input. Got {len(raw_messages)} messages from inputs {inputs}" ) ## Shared data validation utilities def _validate_example_inputs_for_language_model( first_example: Example, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: if input_mapper: prompt_input = input_mapper(first_example.inputs) if not isinstance(prompt_input, str) and not ( isinstance(prompt_input, list) and all(isinstance(msg, BaseMessage) for msg in prompt_input) ): raise InputFormatError( "When using an input_mapper to prepare dataset example inputs" " for an LLM or chat model, the output must a single string or" " a list of chat messages." f"\nGot: {prompt_input} of type {type(prompt_input)}." ) else: try: _get_prompt(first_example.inputs) except InputFormatError: try: _get_messages(first_example.inputs) except InputFormatError: raise InputFormatError( "Example inputs do not match language model input format. " "Expected a dictionary with messages or a single prompt." f" Got: {first_example.inputs}" " Please update your dataset OR provide an input_mapper" " to convert the example.inputs to a compatible format" " for the llm or chat model you wish to evaluate." ) def _validate_example_inputs_for_chain( first_example: Example, chain: Chain, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: """Validate that the example inputs match the chain input keys.""" if input_mapper: first_inputs = input_mapper(first_example.inputs) missing_keys = set(chain.input_keys).difference(first_inputs) if not isinstance(first_inputs, dict): raise InputFormatError( "When using an input_mapper to prepare dataset example" " inputs for a chain, the mapped value must be a dictionary." f"\nGot: {first_inputs} of type {type(first_inputs)}." ) if missing_keys: raise InputFormatError( "Missing keys after loading example using input_mapper." f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}" ) else: first_inputs = first_example.inputs missing_keys = set(chain.input_keys).difference(first_inputs) if len(first_inputs) == 1 and len(chain.input_keys) == 1: # We can pass this through the run method. # Refrain from calling to validate. pass elif missing_keys: raise InputFormatError( "Example inputs missing expected chain input keys." " Please provide an input_mapper to convert the example.inputs" " to a compatible format for the chain you wish to evaluate." f"Expected: {chain.input_keys}. " f"Got: {first_inputs.keys()}" ) def _validate_example_inputs( example: Example, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]], ) -> None: """Validate that the example inputs are valid for the model.""" if isinstance(llm_or_chain_factory, BaseLanguageModel): _validate_example_inputs_for_language_model(example, input_mapper) else: chain = llm_or_chain_factory() if isinstance(chain, Chain): # Otherwise it's a runnable _validate_example_inputs_for_chain(example, chain, input_mapper) elif isinstance(chain, Runnable): logger.debug(f"Skipping input validation for {chain}") ## Shared Evaluator Setup Utilities def _setup_evaluation( llm_or_chain_factory: MCF, examples: List[Example], evaluation: Optional[smith_eval.RunEvalConfig], data_type: DataType, ) -> Optional[List[RunEvaluator]]: """Configure the evaluators to run on the results of the chain.""" if evaluation: if isinstance(llm_or_chain_factory, BaseLanguageModel): run_inputs, run_outputs = None, None run_type = "llm" else: run_type = "chain" if data_type in (DataType.chat, DataType.llm): val = data_type.value if isinstance(data_type, Enum) else data_type raise ValueError( "Cannot evaluate a chain on dataset with " f"data_type={val}. " "Please specify a dataset with the default 'kv' data type." ) chain = llm_or_chain_factory() run_inputs = chain.input_keys if isinstance(chain, Chain) else None run_outputs = chain.output_keys if isinstance(chain, Chain) else None run_evaluators = _load_run_evaluators( evaluation, run_type, data_type, list(examples[0].outputs) if examples[0].outputs else None, run_inputs, run_outputs, ) else: # TODO: Create a default helpfulness evaluator run_evaluators = None return run_evaluators def _determine_input_key( config: smith_eval.RunEvalConfig, run_inputs: Optional[List[str]], ) -> Optional[str]: input_key = None if config.input_key: input_key = config.input_key if run_inputs and input_key not in run_inputs: logger.warning( f"Input key {input_key} not in chain's specified" f" input keys {run_inputs}. Evaluation behavior may be undefined." ) elif run_inputs and len(run_inputs) == 1: input_key = run_inputs[0] elif run_inputs is not None and len(run_inputs) > 1: logger.warning( f"Chain expects multiple input keys: {run_inputs}," f" Evaluator is likely to fail. Evaluation behavior may be undefined." " Specify an input_key in the RunEvalConfig to avoid this warning." ) return input_key def _determine_prediction_key( config: smith_eval.RunEvalConfig, run_outputs: Optional[List[str]], ) -> Optional[str]: prediction_key = None if config.prediction_key: prediction_key = config.prediction_key if run_outputs and prediction_key not in run_outputs: logger.warning( f"Prediction key {prediction_key} not in chain's specified" f" output keys {run_outputs}. Evaluation behavior may be undefined." ) elif run_outputs and len(run_outputs) == 1: prediction_key = run_outputs[0] elif run_outputs is not None and len(run_outputs) > 1: logger.warning( f"Chain expects multiple output keys: {run_outputs}," f" Evaluation behavior may be undefined. Specify a prediction_key" " in the RunEvalConfig to avoid this warning." ) return prediction_key def _determine_reference_key( config: smith_eval.RunEvalConfig, example_outputs: Optional[List[str]], ) -> Optional[str]: if config.reference_key: reference_key = config.reference_key if example_outputs and reference_key not in example_outputs: raise ValueError( f"Reference key {reference_key} not in Dataset" f" example outputs: {example_outputs}" ) elif example_outputs and len(example_outputs) == 1: reference_key = list(example_outputs)[0] else: reference_key = None return reference_key def _construct_run_evaluator( eval_config: Union[EvaluatorType, str, smith_eval_config.EvalConfig], eval_llm: Optional[BaseLanguageModel], run_type: str, data_type: DataType, example_outputs: Optional[List[str]], reference_key: Optional[str], input_key: Optional[str], prediction_key: Optional[str], ) -> RunEvaluator: if isinstance(eval_config, (EvaluatorType, str)): if not isinstance(eval_config, EvaluatorType): eval_config = EvaluatorType(eval_config) evaluator_ = load_evaluator(eval_config, llm=eval_llm) eval_type_tag = eval_config.value else: kwargs = {"llm": eval_llm, **eval_config.get_kwargs()} evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs) eval_type_tag = eval_config.evaluator_type.value # Override keys if specified in the config if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig): input_key = eval_config.input_key or input_key prediction_key = eval_config.prediction_key or prediction_key reference_key = eval_config.reference_key or reference_key if isinstance(evaluator_, StringEvaluator): if evaluator_.requires_reference and reference_key is None: raise ValueError( f"Must specify reference_key in smith_eval.RunEvalConfig to use" f" evaluator of type {eval_type_tag} with" f" dataset with multiple output keys: {example_outputs}." ) run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type( evaluator_, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, tags=[eval_type_tag], ) elif isinstance(evaluator_, PairwiseStringEvaluator): raise NotImplementedError( f"Run evaluator for {eval_type_tag} is not implemented." " PairwiseStringEvaluators compare the outputs of two different models" " rather than the output of a single model." " Did you mean to use a StringEvaluator instead?" "\nSee: https://python.langchain.com/docs/guides/evaluation/string/" ) else: raise NotImplementedError( f"Run evaluator for {eval_type_tag} is not implemented" ) return run_evaluator def _get_keys( config: smith_eval.RunEvalConfig, run_inputs: Optional[List[str]], run_outputs: Optional[List[str]], example_outputs: Optional[List[str]], ) -> Tuple[Optional[str], Optional[str], Optional[str]]: input_key = _determine_input_key(config, run_inputs) prediction_key = _determine_prediction_key(config, run_outputs) reference_key = _determine_reference_key(config, example_outputs) return input_key, prediction_key, reference_key def _load_run_evaluators( config: smith_eval.RunEvalConfig, run_type: str, data_type: DataType, example_outputs: Optional[List[str]], run_inputs: Optional[List[str]], run_outputs: Optional[List[str]], ) -> List[RunEvaluator]: """ Load run evaluators from a configuration. Args: config: Configuration for the run evaluators. Returns: A list of run evaluators. """ run_evaluators = [] input_key, prediction_key, reference_key = None, None, None if ( config.evaluators or any([isinstance(e, EvaluatorType) for e in config.evaluators]) or ( config.custom_evaluators and any([isinstance(e, StringEvaluator) for e in config.custom_evaluators]) ) ): input_key, prediction_key, reference_key = _get_keys( config, run_inputs, run_outputs, example_outputs ) for eval_config in config.evaluators: run_evaluator = _construct_run_evaluator( eval_config, config.eval_llm, run_type, data_type, example_outputs, reference_key, input_key, prediction_key, ) run_evaluators.append(run_evaluator) custom_evaluators = config.custom_evaluators or [] for custom_evaluator in custom_evaluators: if isinstance(custom_evaluator, RunEvaluator): run_evaluators.append(custom_evaluator) elif isinstance(custom_evaluator, StringEvaluator): run_evaluators.append( smith_eval.StringRunEvaluatorChain.from_run_and_data_type( custom_evaluator, run_type, data_type, input_key=input_key, prediction_key=prediction_key, reference_key=reference_key, ) ) else: raise ValueError( f"Unsupported custom evaluator: {custom_evaluator}." f" Expected RunEvaluator or StringEvaluator." ) return run_evaluators ### Async Helpers async def _arun_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], *, tags: Optional[List[str]] = None, callbacks: Callbacks = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """Asynchronously run the language model. Args: llm: The language model to run. inputs: The input dictionary. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map inputs to the expected format. Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): return await llm.apredict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): return await llm.apredict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format" f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: prompt = _get_prompt(inputs) llm_output: Union[str, BaseMessage] = await llm.apredict( prompt, callbacks=callbacks, tags=tags ) except InputFormatError: messages = _get_messages(inputs) llm_output = await llm.apredict_messages( messages, callbacks=callbacks, tags=tags ) return llm_output async def _arun_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str]: """Run a chain asynchronously on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if ( isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_) == 1 and chain.input_keys ): val = next(iter(inputs_.values())) output = await chain.acall(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = await chain.ainvoke(inputs_, config=runnable_config) return output async def _arun_llm_or_chain( example: Example, config: RunnableConfig, *, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str, LLMResult, ChatResult]: """Asynchronously run the Chain or language model. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map the input to the expected format. Returns: A list of outputs. """ chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) result = None try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = await _arun_llm( llm_or_chain_factory, example.inputs, tags=config["tags"], callbacks=config["callbacks"], input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = await _arun_chain( chain, example.inputs, tags=config["tags"], callbacks=config["callbacks"], input_mapper=input_mapper, ) result = output except Exception as e: logger.warning( f"{chain_or_llm} failed for example {example.id} " f"with inputs {example.inputs}" f"\n{repr(e)}" ) result = {"Error": repr(e)} return result ## Sync Utilities def _run_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[str, BaseMessage]: """ Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: prompt_or_messages = input_mapper(inputs) if isinstance(prompt_or_messages, str): llm_output: Union[str, BaseMessage] = llm.predict( prompt_or_messages, callbacks=callbacks, tags=tags ) elif isinstance(prompt_or_messages, list) and all( isinstance(msg, BaseMessage) for msg in prompt_or_messages ): llm_output = llm.predict_messages( prompt_or_messages, callbacks=callbacks, tags=tags ) else: raise InputFormatError( "Input mapper returned invalid format: " f" {prompt_or_messages}" "\nExpected a single string or list of chat messages." ) else: try: llm_prompts = _get_prompt(inputs) llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags) except InputFormatError: llm_messages = _get_messages(inputs) llm_output = llm.predict_messages(llm_messages, callbacks=callbacks) return llm_output def _run_chain( chain: Union[Chain, Runnable], inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[Dict, str]: """Run a chain on inputs.""" inputs_ = inputs if input_mapper is None else input_mapper(inputs) if ( isinstance(chain, Chain) and isinstance(inputs_, dict) and len(inputs_) == 1 and chain.input_keys ): val = next(iter(inputs_.values())) output = chain(val, callbacks=callbacks, tags=tags) else: runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks) output = chain.invoke(inputs_, config=runnable_config) return output def _run_llm_or_chain( example: Example, config: RunnableConfig, *, llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[dict, str, LLMResult, ChatResult]: """ Run the Chain or language model synchronously. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. Returns: Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: The outputs of the model or chain. """ chain_or_llm = ( "LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain" ) result = None try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = _run_llm( llm_or_chain_factory, example.inputs, config["callbacks"], tags=config["tags"], input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() output = _run_chain( chain, example.inputs, config["callbacks"], tags=config["tags"], input_mapper=input_mapper, ) result = output except Exception as e: error_type = type(e).__name__ logger.warning( f"{chain_or_llm} failed for example {example.id} " f"with inputs {example.inputs}" f"\nError Type: {error_type}, Message: {e}" ) result = {"Error": repr(e)} return result ## Public API def _prepare_eval_run( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: str, project_metadata: Optional[Dict[str, Any]] = None, ) -> Tuple[MCF, str, Dataset, List[Example]]: wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name) dataset = client.read_dataset(dataset_name=dataset_name) try: project = client.create_project( project_name, reference_dataset_id=dataset.id, project_extra={"metadata": project_metadata} if project_metadata else {}, ) except (HTTPError, ValueError, LangSmithError) as e: if "already exists " not in str(e): raise e uid = uuid.uuid4() example_msg = f""" run_on_dataset( ... project_name="{project_name} - {uid}", # Update since {project_name} already exists ) """ raise ValueError( f"Test project {project_name} already exists. Please use a different name:" f"\n\n{example_msg}" ) print( f"View the evaluation results for project '{project_name}'" f" at:\n{project.url}?eval=true\n\n" f"View all tests for Dataset {dataset_name} at:\n{dataset.url}", flush=True, ) examples = list(client.list_examples(dataset_id=dataset.id)) if not examples: raise ValueError(f"Dataset {dataset_name} has no example rows.") return wrapped_model, project_name, dataset, examples def _prepare_run_on_dataset( client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: Optional[str], evaluation: Optional[smith_eval.RunEvalConfig] = None, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, concurrency_level: int = 5, project_metadata: Optional[Dict[str, Any]] = None, ) -> Tuple[MCF, str, List[Example], List[RunnableConfig]]: project_name = project_name or name_generation.random_name() wrapped_model, project_name, dataset, examples = _prepare_eval_run( client, dataset_name, llm_or_chain_factory, project_name, project_metadata=project_metadata, ) wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory) run_evaluators = _setup_evaluation( wrapped_model, examples, evaluation, dataset.data_type or DataType.kv ) _validate_example_inputs(examples[0], wrapped_model, input_mapper) progress_bar = progress.ProgressBarCallback(len(examples)) configs = [ RunnableConfig( callbacks=[ LangChainTracer( project_name=project_name, client=client, use_threading=False, example_id=example.id, ), EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client, example_id=example.id, ), progress_bar, ], tags=tags or [], max_concurrency=concurrency_level, ) for example in examples ] return wrapped_model, project_name, examples, configs def _collect_test_results( examples: List[Example], batch_results: List[Union[dict, str, LLMResult, ChatResult]], configs: List[RunnableConfig], project_name: str, ) -> TestResult: wait_for_all_evaluators() all_eval_results = {} for c in configs: for callback in cast(list, c["callbacks"]): if isinstance(callback, EvaluatorCallbackHandler): eval_results = callback.logged_eval_results all_eval_results.update( {example_id: v for (_, example_id), v in eval_results.items()} ) elif isinstance(callback, LangChainTracer): run = callback.latest_run execution_time = ( (run.end_time - run.start_time).total_seconds() if run and run.end_time else None ) results = {} for example, output in zip(examples, batch_results): feedback = all_eval_results.get(str(example.id), []) results[str(example.id)] = { "output": output, "input": example.inputs, "feedback": feedback, "execution_time": execution_time, } if example.outputs: results[str(example.id)]["reference"] = example.outputs return TestResult( project_name=project_name, results=results, ) _INPUT_MAPPER_DEP_WARNING = ( "The input_mapper argument is deprecated and " "will be removed in a future release. Please add a " " RunnableLambda to your chain to map inputs to the expected format" " instead. Example:\n" "def construct_chain():\n" " my_chain = ...\n" " input_mapper = {'other_key': 'MyOtherInput', 'my_input_key': x}\n" " return input_mapper | my_chain\n" "run_on_dataset(..., llm_or_chain_factory=construct_chain)\n" "(See https://api.python.langchain.com/en/latest/schema/" "langchain.schema.runnable.base.RunnableLambda.html)" ) async def arun_on_dataset( client: Optional[Client], dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[smith_eval.RunEvalConfig] = None, concurrency_level: int = 5, project_name: Optional[str] = None, project_metadata: Optional[Dict[str, Any]] = None, verbose: bool = False, tags: Optional[List[str]] = None, **kwargs: Any, ) -> Dict[str, Any]: input_mapper = kwargs.pop("input_mapper", None) if input_mapper: warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) if kwargs: warn_deprecated( "0.0.305", message="The following arguments are deprecated and " "will be removed in a future release: " f"{kwargs.keys()}.", removal="0.0.305", ) client = client or Client() wrapped_model, project_name, examples, configs = _prepare_run_on_dataset( client, dataset_name, llm_or_chain_factory, project_name, evaluation, tags, input_mapper, concurrency_level, project_metadata=project_metadata, ) batch_results = await runnable_utils.gather_with_concurrency( configs[0].get("max_concurrency"), *map( functools.partial( _arun_llm_or_chain, llm_or_chain_factory=wrapped_model, input_mapper=input_mapper, ), examples, configs, ), ) results = _collect_test_results(examples, batch_results, configs, project_name) if verbose: try: agg_feedback = results.get_aggregate_feedback() print("\n Eval quantiles:") print(agg_feedback) except Exception as e: logger.debug(f"Failed to print aggregate feedback: {repr(e)}") return results def run_on_dataset( client: Optional[Client], dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[smith_eval.RunEvalConfig] = None, concurrency_level: int = 5, project_name: Optional[str] = None, project_metadata: Optional[Dict[str, Any]] = None, verbose: bool = False, tags: Optional[List[str]] = None, **kwargs: Any, ) -> Dict[str, Any]: input_mapper = kwargs.pop("input_mapper", None) if input_mapper: warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True) if kwargs: warn_deprecated( "0.0.305", message="The following arguments are deprecated and " "will be removed in a future release: " f"{kwargs.keys()}.", removal="0.0.305", ) client = client or Client() wrapped_model, project_name, examples, configs = _prepare_run_on_dataset( client, dataset_name, llm_or_chain_factory, project_name, evaluation, tags, input_mapper, concurrency_level, project_metadata=project_metadata, ) if concurrency_level == 0: batch_results = [ _run_llm_or_chain( example, config, llm_or_chain_factory=wrapped_model, input_mapper=input_mapper, ) for example, config in zip(examples, configs) ] else: with runnable_config.get_executor_for_config(configs[0]) as executor: batch_results = list( executor.map( functools.partial( _run_llm_or_chain, llm_or_chain_factory=wrapped_model, input_mapper=input_mapper, ), examples, configs, ) ) results = _collect_test_results(examples, batch_results, configs, project_name) if verbose: try: agg_feedback = results.get_aggregate_feedback() print("\n Eval quantiles:") print(agg_feedback) except Exception as e: logger.debug(f"Failed to print aggregate feedback: {repr(e)}") return results _RUN_ON_DATASET_DOCSTRING = """ Run the Chain or language model on a dataset and store traces to the specified project name. Args: dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. evaluation: Configuration for evaluators to run on the results of the chain concurrency_level: The number of async tasks to run concurrently. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. project_metadata: Optional metadata to add to the project. Useful for storing information the test variant. (prompt version, model version, etc.) client: LangSmith client to use to access the dataset and to log feedback and run traces. verbose: Whether to print progress. tags: Tags to add to each run in the project. Returns: A dictionary containing the run's project name and the resulting model outputs. For the (usually faster) async version of this function, see :func:`arun_on_dataset`. Examples -------- .. code-block:: python from langsmith import Client from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset # Chains may have memory. Passing in a constructor function lets the # evaluation framework avoid cross-contamination between runs. def construct_chain(): llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string( llm, "What's the answer to {your_input_key}" ) return chain # Load off-the-shelf evaluators via config or the EvaluatorType (string or enum) evaluation_config = smith_eval.RunEvalConfig( evaluators=[ "qa", # "Correctness" against a reference answer "embedding_distance", smith_eval.RunEvalConfig.Criteria("helpfulness"), smith_eval.RunEvalConfig.Criteria({ "fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?" }), ] ) client = Client() run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) You can also create custom evaluators by subclassing the :class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>` or LangSmith's `RunEvaluator` classes. .. code-block:: python from typing import Optional from langchain.evaluation import StringEvaluator class MyStringEvaluator(StringEvaluator): @property def requires_input(self) -> bool: return False @property def requires_reference(self) -> bool: return True @property def evaluation_name(self) -> str: return "exact_match" def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict: return {"score": prediction == reference} evaluation_config = smith_eval.RunEvalConfig( custom_evaluators = [MyStringEvaluator()], ) run_on_dataset( client, "<my_dataset_name>", construct_chain, evaluation=evaluation_config, ) """ # noqa: E501 run_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING arun_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING.replace( "run_on_dataset(", "await arun_on_dataset(" )
[ "['PLACEHOLDER']", "[]" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~epsilla.py
"""Wrapper around Epsilla vector database.""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document if TYPE_CHECKING: from pyepsilla import vectordb logger = logging.getLogger() class Epsilla(VectorStore): """ Wrapper around Epsilla vector database. As a prerequisite, you need to install ``pyepsilla`` package and have a running Epsilla vector database (for example, through our docker image) See the following documentation for how to run an Epsilla vector database: https://epsilla-inc.gitbook.io/epsilladb/quick-start Args: client (Any): Epsilla client to connect to. embeddings (Embeddings): Function used to embed the texts. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". Example: .. code-block:: python from langchain.vectorstores import Epsilla from pyepsilla import vectordb client = vectordb.Client() embeddings = OpenAIEmbeddings() db_path = "/tmp/vectorstore" db_name = "langchain_store" epsilla = Epsilla(client, embeddings, db_path, db_name) """ _LANGCHAIN_DEFAULT_DB_NAME = "langchain_store" _LANGCHAIN_DEFAULT_DB_PATH = "/tmp/langchain-epsilla" _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_collection" def __init__( self, client: Any, embeddings: Embeddings, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, ): """Initialize with necessary components.""" try: import pyepsilla except ImportError as e: raise ImportError( "Could not import pyepsilla python package. " "Please install pyepsilla package with `pip install pyepsilla`." ) from e if not isinstance(client, pyepsilla.vectordb.Client): raise TypeError( f"client should be an instance of pyepsilla.vectordb.Client, " f"got {type(client)}" ) self._client: vectordb.Client = client self._db_name = db_name self._embeddings = embeddings self._collection_name = Epsilla._LANGCHAIN_DEFAULT_TABLE_NAME self._client.load_db(db_name=db_name, db_path=db_path) self._client.use_db(db_name=db_name) @property def embeddings(self) -> Optional[Embeddings]: return self._embeddings def use_collection(self, collection_name: str) -> None: """ Set default collection to use. Args: collection_name (str): The name of the collection. """ self._collection_name = collection_name def clear_data(self, collection_name: str = "") -> None: """ Clear data in a collection. Args: collection_name (Optional[str]): The name of the collection. If not provided, the default collection will be used. """ if not collection_name: collection_name = self._collection_name self._client.drop_table(collection_name) def get( self, collection_name: str = "", response_fields: Optional[List[str]] = None ) -> List[dict]: """Get the collection. Args: collection_name (Optional[str]): The name of the collection to retrieve data from. If not provided, the default collection will be used. response_fields (Optional[List[str]]): List of field names in the result. If not specified, all available fields will be responded. Returns: A list of the retrieved data. """ if not collection_name: collection_name = self._collection_name status_code, response = self._client.get( table_name=collection_name, response_fields=response_fields ) if status_code != 200: logger.error(f"Failed to get records: {response['message']}") raise Exception("Error: {}.".format(response["message"])) return response["result"] def _create_collection( self, table_name: str, embeddings: list, metadatas: Optional[list[dict]] = None ) -> None: if not embeddings: raise ValueError("Embeddings list is empty.") dim = len(embeddings[0]) fields: List[dict] = [ {"name": "id", "dataType": "INT"}, {"name": "text", "dataType": "STRING"}, {"name": "embeddings", "dataType": "VECTOR_FLOAT", "dimensions": dim}, ] if metadatas is not None: field_names = [field["name"] for field in fields] for metadata in metadatas: for key, value in metadata.items(): if key in field_names: continue d_type: str if isinstance(value, str): d_type = "STRING" elif isinstance(value, int): d_type = "INT" elif isinstance(value, float): d_type = "FLOAT" elif isinstance(value, bool): d_type = "BOOL" else: raise ValueError(f"Unsupported data type for {key}.") fields.append({"name": key, "dataType": d_type}) field_names.append(key) status_code, response = self._client.create_table( table_name, table_fields=fields ) if status_code != 200: if status_code == 409: logger.info(f"Continuing with the existing table {table_name}.") else: logger.error( f"Failed to create collection {table_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, collection_name: Optional[str] = "", drop_old: Optional[bool] = False, **kwargs: Any, ) -> List[str]: """ Embed texts and add them to the database. Args: texts (Iterable[str]): The texts to embed. metadatas (Optional[List[dict]]): Metadata dicts attached to each of the texts. Defaults to None. collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: List of ids of the added texts. """ if not collection_name: collection_name = self._collection_name else: self._collection_name = collection_name if drop_old: self._client.drop_db(db_name=collection_name) texts = list(texts) try: embeddings = self._embeddings.embed_documents(texts) except NotImplementedError: embeddings = [self._embeddings.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] self._create_collection( table_name=collection_name, embeddings=embeddings, metadatas=metadatas ) ids = [hash(uuid.uuid4()) for _ in texts] records = [] for index, id in enumerate(ids): record = { "id": id, "text": texts[index], "embeddings": embeddings[index], } if metadatas is not None: metadata = metadatas[index].items() for key, value in metadata: record[key] = value records.append(record) status_code, response = self._client.insert( table_name=collection_name, records=records ) if status_code != 200: logger.error( f"Failed to add records to {collection_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) return [str(id) for id in ids] def similarity_search( self, query: str, k: int = 4, collection_name: str = "", **kwargs: Any ) -> List[Document]: """ Return the documents that are semantically most relevant to the query. Args: query (str): String to query the vectorstore with. k (Optional[int]): Number of documents to return. Defaults to 4. collection_name (Optional[str]): Collection to use. Defaults to "langchain_store" or the one provided before. Returns: List of documents that are semantically most relevant to the query """ if not collection_name: collection_name = self._collection_name query_vector = self._embeddings.embed_query(query) status_code, response = self._client.query( table_name=collection_name, query_field="embeddings", query_vector=query_vector, limit=k, ) if status_code != 200: logger.error(f"Search failed: {response['message']}.") raise Exception("Error: {}.".format(response["message"])) exclude_keys = ["id", "text", "embeddings"] return list( map( lambda item: Document( page_content=item["text"], metadata={ key: item[key] for key in item if key not in exclude_keys }, ), response["result"], ) ) @classmethod def from_texts( cls: Type[Epsilla], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ instance = Epsilla(client, embedding, db_path=db_path, db_name=db_name) instance.add_texts( texts, metadatas=metadatas, collection_name=collection_name, drop_old=drop_old, **kwargs, ) return instance @classmethod def from_documents( cls: Type[Epsilla], documents: List[Document], embedding: Embeddings, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from a list of documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts, embedding, metadatas=metadatas, client=client, db_path=db_path, db_name=db_name, collection_name=collection_name, drop_old=drop_old, **kwargs, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~zep.py
from __future__ import annotations import logging import warnings from dataclasses import asdict, dataclass from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document if TYPE_CHECKING: from zep_python.document import Document as ZepDocument from zep_python.document import DocumentCollection logger = logging.getLogger() @dataclass class CollectionConfig: """Configuration for a `Zep Collection`. If the collection does not exist, it will be created. Attributes: name (str): The name of the collection. description (Optional[str]): An optional description of the collection. metadata (Optional[Dict[str, Any]]): Optional metadata for the collection. embedding_dimensions (int): The number of dimensions for the embeddings in the collection. This should match the Zep server configuration if auto-embed is true. is_auto_embedded (bool): A flag indicating whether the collection is automatically embedded by Zep. """ name: str description: Optional[str] metadata: Optional[Dict[str, Any]] embedding_dimensions: int is_auto_embedded: bool class ZepVectorStore(VectorStore): """`Zep` vector store. It provides methods for adding texts or documents to the store, searching for similar documents, and deleting documents. Search scores are calculated using cosine similarity normalized to [0, 1]. Args: api_url (str): The URL of the Zep API. collection_name (str): The name of the collection in the Zep store. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. Required if the collection does not already exist. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. Required if the collection is not auto-embedded. """ def __init__( self, collection_name: str, api_url: str, *, api_key: Optional[str] = None, config: Optional[CollectionConfig] = None, embedding: Optional[Embeddings] = None, ) -> None: super().__init__() if not collection_name: raise ValueError( "collection_name must be specified when using ZepVectorStore." ) try: from zep_python import ZepClient except ImportError: raise ImportError( "Could not import zep-python python package. " "Please install it with `pip install zep-python`." ) self._client = ZepClient(api_url, api_key=api_key) self.collection_name = collection_name # If for some reason the collection name is not the same as the one in the # config, update it. if config and config.name != self.collection_name: config.name = self.collection_name self._collection_config = config self._collection = self._load_collection() self._embedding = embedding # self.add_texts(texts, metadatas=metadatas, **kwargs) @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" return self._embedding def _load_collection(self) -> DocumentCollection: """ Load the collection from the Zep backend. """ from zep_python import NotFoundError try: collection = self._client.document.get_collection(self.collection_name) except NotFoundError: logger.info( f"Collection {self.collection_name} not found. Creating new collection." ) collection = self._create_collection() return collection def _create_collection(self) -> DocumentCollection: """ Create a new collection in the Zep backend. """ if not self._collection_config: raise ValueError( "Collection config must be specified when creating a new collection." ) collection = self._client.document.add_collection( **asdict(self._collection_config) ) return collection def _generate_documents_to_add( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, document_ids: Optional[List[str]] = None, ) -> List[ZepDocument]: from zep_python.document import Document as ZepDocument embeddings = None if self._collection and self._collection.is_auto_embedded: if self._embedding is not None: warnings.warn( """The collection is set to auto-embed and an embedding function is present. Ignoring the embedding function.""", stacklevel=2, ) elif self._embedding is not None: embeddings = self._embedding.embed_documents(list(texts)) if self._collection and self._collection.embedding_dimensions != len( embeddings[0] ): raise ValueError( "The embedding dimensions of the collection and the embedding" " function do not match. Collection dimensions:" f" {self._collection.embedding_dimensions}, Embedding dimensions:" f" {len(embeddings[0])}" ) else: pass documents: List[ZepDocument] = [] for i, d in enumerate(texts): documents.append( ZepDocument( content=d, metadata=metadatas[i] if metadatas else None, document_id=document_ids[i] if document_ids else None, embedding=embeddings[i] if embeddings else None, ) ) return documents def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, document_ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. document_ids: Optional list of document ids associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) documents = self._generate_documents_to_add(texts, metadatas, document_ids) uuids = self._collection.add_documents(documents) return uuids async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, document_ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) documents = self._generate_documents_to_add(texts, metadatas, document_ids) uuids = await self._collection.aadd_documents(documents) return uuids def search( self, query: str, search_type: str, metadata: Optional[Dict[str, Any]] = None, k: int = 3, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return self.similarity_search(query, k=k, metadata=metadata, **kwargs) elif search_type == "mmr": return self.max_marginal_relevance_search( query, k=k, metadata=metadata, **kwargs ) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) async def asearch( self, query: str, search_type: str, metadata: Optional[Dict[str, Any]] = None, k: int = 3, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return await self.asimilarity_search( query, k=k, metadata=metadata, **kwargs ) elif search_type == "mmr": return await self.amax_marginal_relevance_search( query, k=k, metadata=metadata, **kwargs ) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) def similarity_search( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" results = self._similarity_search_with_relevance_scores( query, k=k, metadata=metadata, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with distance.""" return self._similarity_search_with_relevance_scores( query, k=k, metadata=metadata, **kwargs ) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Default similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. metadata: Optional, metadata filter **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 and filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = self._collection.search( embedding=query_vector, limit=k, metadata=metadata, **kwargs ) else: results = self._collection.search( query, limit=k, metadata=metadata, **kwargs ) return [ ( Document( page_content=doc.content, metadata=doc.metadata, ), doc.score or 0.0, ) for doc in results ] async def asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = await self._collection.asearch( embedding=query_vector, limit=k, metadata=metadata, **kwargs ) else: results = await self._collection.asearch( query, limit=k, metadata=metadata, **kwargs ) return [ ( Document( page_content=doc.content, metadata=doc.metadata, ), doc.score or 0.0, ) for doc in results ] async def asimilarity_search( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" results = await self.asimilarity_search_with_relevance_scores( query, k, metadata=metadata, **kwargs ) return [doc for doc, _ in results] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata: Optional, metadata filter Returns: List of Documents most similar to the query vector. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, **kwargs ) return [ Document( page_content=doc.content, metadata=doc.metadata, ) for doc in results ] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, **kwargs ) return [ Document( page_content=doc.content, metadata=doc.metadata, ) for doc in results ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = self._collection.search( embedding=query_vector, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) else: results, query_vector = self._collection.search_return_query_vector( query, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = await self._collection.asearch( embedding=query_vector, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) else: results, query_vector = await self._collection.asearch_return_query_vector( query, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = await self._collection.asearch( embedding=embedding, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, collection_name: str = "", api_url: str = "", api_key: Optional[str] = None, config: Optional[CollectionConfig] = None, **kwargs: Any, ) -> ZepVectorStore: """ Class method that returns a ZepVectorStore instance initialized from texts. If the collection does not exist, it will be created. Args: texts (List[str]): The list of texts to add to the vectorstore. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata associated with the texts. collection_name (str): The name of the collection in the Zep store. api_url (str): The URL of the Zep API. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. **kwargs: Additional parameters specific to the vectorstore. Returns: ZepVectorStore: An instance of ZepVectorStore. """ vecstore = cls( collection_name, api_url, api_key=api_key, config=config, embedding=embedding, ) vecstore.add_texts(texts, metadatas) return vecstore def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by Zep vector UUIDs. Parameters ---------- ids : Optional[List[str]] The UUIDs of the vectors to delete. Raises ------ ValueError If no UUIDs are provided. """ if ids is None or len(ids) == 0: raise ValueError("No uuids provided to delete.") if self._collection is None: raise ValueError("No collection name provided.") for u in ids: self._collection.delete_document(u)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~router~multi_prompt.py
"""Use a single chain to route an input to one of multiple llm chains.""" from __future__ import annotations from typing import Any, Dict, List, Optional from langchain_core.prompts import PromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.router.base import MultiRouteChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE class MultiPromptChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst prompts.""" @property def output_keys(self) -> List[str]: return ["text"] @classmethod def from_prompts( cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str, str]], default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiPromptChain: """Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = "\n".join(destinations) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for p_info in prompt_infos: name = p_info["name"] prompt_template = p_info["prompt_template"] prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) chain = LLMChain(llm=llm, prompt=prompt) destination_chains[name] = chain _default_chain = default_chain or ConversationChain(llm=llm, output_key="text") return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
[ "input", "prompt_template" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~evaluation~qa~eval_chain.py
"""LLM Chains for evaluating question answering.""" from __future__ import annotations import re import string from typing import Any, List, Optional, Sequence, Tuple from langchain_core.prompts import PromptTemplate from langchain_core.pydantic_v1 import Extra from langchain_core.schema import RUN_KEY from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT from langchain.evaluation.schema import LLMEvalChain, StringEvaluator def _get_score(text: str) -> Optional[Tuple[str, int]]: match = re.search(r"grade:\s*(correct|incorrect)", text.strip(), re.IGNORECASE) if match: if match.group(1).upper() == "CORRECT": return "CORRECT", 1 elif match.group(1).upper() == "INCORRECT": return "INCORRECT", 0 try: first_word = ( text.strip().split()[0].translate(str.maketrans("", "", string.punctuation)) ) if first_word.upper() == "CORRECT": return "CORRECT", 1 elif first_word.upper() == "INCORRECT": return "INCORRECT", 0 last_word = ( text.strip() .split()[-1] .translate(str.maketrans("", "", string.punctuation)) ) if last_word.upper() == "CORRECT": return "CORRECT", 1 elif last_word.upper() == "INCORRECT": return "INCORRECT", 0 except IndexError: pass return None def _parse_string_eval_output(text: str) -> dict: """Parse the output text. Args: text (str): The output text to parse. Returns: Any: The parsed output. """ reasoning = text.strip() parsed_scores = _get_score(reasoning) if parsed_scores is None: value, score = None, None else: value, score = parsed_scores return { "reasoning": reasoning, "value": value, "score": score, } class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating question answering.""" output_key: str = "results" #: :meta private: class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @property def evaluation_name(self) -> str: return "correctness" @property def requires_reference(self) -> bool: return True @property def requires_input(self) -> bool: return True @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> QAEvalChain: """Load QA Eval Chain from LLM. Args: llm (BaseLanguageModel): the base language model to use. prompt (PromptTemplate): A prompt template containing the input_variables: 'input', 'answer' and 'result' that will be used as the prompt for evaluation. Defaults to PROMPT. **kwargs: additional keyword arguments. Returns: QAEvalChain: the loaded QA eval chain. """ prompt = prompt or PROMPT expected_input_vars = {"query", "answer", "result"} if expected_input_vars != set(prompt.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt.input_variables}" ) return cls(llm=llm, prompt=prompt, **kwargs) def evaluate( self, examples: Sequence[dict], predictions: Sequence[dict], question_key: str = "query", answer_key: str = "answer", prediction_key: str = "result", *, callbacks: Callbacks = None, ) -> List[dict]: """Evaluate question answering examples and predictions.""" inputs = [ { "query": example[question_key], "answer": example[answer_key], "result": predictions[i][prediction_key], } for i, example in enumerate(examples) ] return self.apply(inputs, callbacks=callbacks) def _prepare_output(self, result: dict) -> dict: parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): the LLM or chain prediction to evaluate. reference (Optional[str], optional): the reference label to evaluate against. input (Optional[str], optional): the input to consider during evaluation callbacks (Callbacks, optional): the callbacks to use for tracing. include_run_info (bool, optional): whether to include run info in the returned results. **kwargs: additional keyword arguments, including callbacks, tags, etc. Returns: dict: The evaluation results containing the score or value. """ result = self( { "query": input, "answer": reference, "result": prediction, }, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = await self.acall( inputs={"query": input, "answer": reference, "result": prediction}, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating QA w/o GT based on context""" @property def requires_reference(self) -> bool: """Whether the chain requires a reference string.""" return True @property def requires_input(self) -> bool: """Whether the chain requires an input string.""" return True class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @classmethod def _validate_input_vars(cls, prompt: PromptTemplate) -> None: expected_input_vars = {"query", "context", "result"} if expected_input_vars != set(prompt.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt.input_variables}" ) @property def evaluation_name(self) -> str: return "Contextual Accuracy" @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> ContextQAEvalChain: """Load QA Eval Chain from LLM. Args: llm (BaseLanguageModel): the base language model to use. prompt (PromptTemplate): A prompt template containing the input_variables: 'query', 'context' and 'result' that will be used as the prompt for evaluation. Defaults to PROMPT. **kwargs: additional keyword arguments. Returns: ContextQAEvalChain: the loaded QA eval chain. """ prompt = prompt or CONTEXT_PROMPT cls._validate_input_vars(prompt) return cls(llm=llm, prompt=prompt, **kwargs) def evaluate( self, examples: List[dict], predictions: List[dict], question_key: str = "query", context_key: str = "context", prediction_key: str = "result", *, callbacks: Callbacks = None, ) -> List[dict]: """Evaluate question answering examples and predictions.""" inputs = [ { "query": example[question_key], "context": example[context_key], "result": predictions[i][prediction_key], } for i, example in enumerate(examples) ] return self.apply(inputs, callbacks=callbacks) def _prepare_output(self, result: dict) -> dict: parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = self( { "query": input, "context": reference, "result": prediction, }, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: result = await self.acall( inputs={"query": input, "context": reference, "result": prediction}, callbacks=callbacks, include_run_info=include_run_info, ) return self._prepare_output(result) class CotQAEvalChain(ContextQAEvalChain): """LLM Chain for evaluating QA using chain of thought reasoning.""" @property def evaluation_name(self) -> str: return "COT Contextual Accuracy" @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> CotQAEvalChain: """Load QA Eval Chain from LLM.""" prompt = prompt or COT_PROMPT cls._validate_input_vars(prompt) return cls(llm=llm, prompt=prompt, **kwargs)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~typesense.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_env if TYPE_CHECKING: from typesense.client import Client from typesense.collection import Collection class Typesense(VectorStore): """`Typesense` vector store. To use, you should have the ``typesense`` python package installed. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense import typesense node = { "host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net "port": "8108", # For Typesense Cloud use 443 "protocol": "http" # For Typesense Cloud use https } typesense_client = typesense.Client( { "nodes": [node], "api_key": "<API_KEY>", "connection_timeout_seconds": 2 } ) typesense_collection_name = "langchain-memory" embedding = OpenAIEmbeddings() vectorstore = Typesense( typesense_client=typesense_client, embedding=embedding, typesense_collection_name=typesense_collection_name, text_key="text", ) """ def __init__( self, typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = "text", ): """Initialize with Typesense client.""" try: from typesense import Client except ImportError: raise ImportError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) if not isinstance(typesense_client, Client): raise ValueError( f"typesense_client should be an instance of typesense.Client, " f"got {type(typesense_client)}" ) self._typesense_client = typesense_client self._embedding = embedding self._typesense_collection_name = ( typesense_collection_name or f"langchain-{str(uuid.uuid4())}" ) self._text_key = text_key @property def _collection(self) -> Collection: return self._typesense_client.collections[self._typesense_collection_name] @property def embeddings(self) -> Embeddings: return self._embedding def _prep_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [ {"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata} for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas) ] def _create_collection(self, num_dim: int) -> None: fields = [ {"name": "vec", "type": "float[]", "num_dim": num_dim}, {"name": f"{self._text_key}", "type": "string"}, {"name": ".*", "type": "auto"}, ] self._typesense_client.collections.create( {"name": self._typesense_collection_name, "fields": fields} ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vectorstore. """ from typesense.exceptions import ObjectNotFound docs = self._prep_texts(texts, metadatas, ids) try: self._collection.documents.import_(docs, {"action": "upsert"}) except ObjectNotFound: # Create the collection if it doesn't already exist self._create_collection(len(docs[0]["vec"])) self._collection.documents.import_(docs, {"action": "upsert"}) return [doc["id"] for doc in docs] def similarity_search_with_score( self, query: str, k: int = 10, filter: Optional[str] = "", ) -> List[Tuple[Document, float]]: """Return typesense documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', "filter_by": filter, "collection": self._typesense_collection_name, } docs = [] response = self._typesense_client.multi_search.perform( {"searches": [query_obj]}, {} ) for hit in response["results"][0]["hits"]: document = hit["document"] metadata = document["metadata"] text = document[self._text_key] score = hit["vector_distance"] docs.append((Document(page_content=text, metadata=metadata), score)) return docs def similarity_search( self, query: str, k: int = 10, filter: Optional[str] = "", **kwargs: Any, ) -> List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score] @classmethod def from_client_params( cls, embedding: Embeddings, *, host: str = "localhost", port: Union[str, int] = "8108", protocol: str = "http", typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any, ) -> Typesense: """Initialize Typesense directly from client parameters. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense # Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY". vectorstore = Typesense( OpenAIEmbeddings(), host="localhost", port="8108", protocol="http", typesense_collection_name="langchain-memory", ) """ try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) node = { "host": host, "port": str(port), "protocol": protocol, } typesense_api_key = typesense_api_key or get_from_env( "typesense_api_key", "TYPESENSE_API_KEY" ) client_config = { "nodes": [node], "api_key": typesense_api_key, "connection_timeout_seconds": connection_timeout_seconds, } return cls(Client(client_config), embedding, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = "text", **kwargs: Any, ) -> Typesense: """Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params( embedding, **typesense_client_params, **kwargs ) else: raise ValueError( "Must specify one of typesense_client or typesense_client_params." ) vectorstore.add_texts(texts, metadatas=metadatas, ids=ids) return vectorstore
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~elastic_vector_search.py
from __future__ import annotations import uuid import warnings from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from langchain_core._api import deprecated from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from elasticsearch import Elasticsearch def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict: if filter: ((key, value),) = filter.items() filter = {"match": {f"metadata.{key}.keyword": f"{value}"}} else: filter = {"match_all": {}} return { "script_score": { "query": filter, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } class ElasticVectorSearch(VectorStore): """ ElasticVectorSearch uses the brute force method of searching on vectors. Recommended to use ElasticsearchStore instead, which gives you the option to uses the approx HNSW algorithm which performs better on large datasets. ElasticsearchStore also supports metadata filtering, customising the query retriever and much more! You can read more on ElasticsearchStore: https://python.langchain.com/docs/integrations/vectorstores/elasticsearch To connect to an `Elasticsearch` instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example: .. code-block:: python from langchain.vectorstores import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example: .. code-block:: python from langchain.vectorstores import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Args: elasticsearch_url (str): The URL for the Elasticsearch instance. index_name (str): The name of the Elasticsearch index for the embeddings. embedding (Embeddings): An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises: ValueError: If the elasticsearch python package is not installed. """ def __init__( self, elasticsearch_url: str, index_name: str, embedding: Embeddings, *, ssl_verify: Optional[Dict[str, Any]] = None, ): """Initialize with necessary components.""" warnings.warn( "ElasticVectorSearch will be removed in a future release. See" "Elasticsearch integration docs on how to upgrade." ) try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name _ssl_verify = ssl_verify or {} try: self.client = elasticsearch.Elasticsearch( elasticsearch_url, **_ssl_verify, headers={"user-agent": self.get_user_agent()}, ) except ValueError as e: raise ValueError( f"Your elasticsearch client string is mis-formatted. Got error: {e} " ) @staticmethod def get_user_agent() -> str: from langchain import __version__ return f"langchain-py-dvs/{__version__}" @property def embeddings(self) -> Embeddings: return self.embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: self.client.indices.get(index=self.index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": ids[i], } requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding, filter) response = self.client_search( self.client, self.index_name, script_query, size=k ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"]["text"], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, index_name: Optional[str] = None, refresh_indices: bool = True, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = get_from_dict_or_env( kwargs, "elasticsearch_url", "ELASTICSEARCH_URL" ) if "elasticsearch_url" in kwargs: del kwargs["elasticsearch_url"] index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts( texts, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices ) return vectorsearch def create_index(self, client: Any, index_name: str, mapping: Dict) -> None: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: client.indices.create(index=index_name, mappings=mapping) else: client.indices.create(index=index_name, body={"mappings": mapping}) def client_search( self, client: Any, index_name: str, script_query: Dict, size: int ) -> Any: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size=size) else: response = client.search( index=index_name, body={"query": script_query, "size": size} ) return response def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # TODO: Check if this can be done in bulk for id in ids: self.client.delete(index=self.index_name, id=id) @deprecated("0.0.265", alternative="ElasticsearchStore class.", pending=True) class ElasticKnnSearch(VectorStore): """[DEPRECATED] `Elasticsearch` with k-nearest neighbor search (`k-NN`) vector store. Recommended to use ElasticsearchStore instead, which supports metadata filtering, customising the query retriever and much more! You can read more on ElasticsearchStore: https://python.langchain.com/docs/integrations/vectorstores/elasticsearch It creates an Elasticsearch index of text data that can be searched using k-NN search. The text data is transformed into vector embeddings using a provided embedding model, and these embeddings are stored in the Elasticsearch index. Attributes: index_name (str): The name of the Elasticsearch index. embedding (Embeddings): The embedding model to use for transforming text data into vector embeddings. es_connection (Elasticsearch, optional): An existing Elasticsearch connection. es_cloud_id (str, optional): The Cloud ID of your Elasticsearch Service deployment. es_user (str, optional): The username for your Elasticsearch Service deployment. es_password (str, optional): The password for your Elasticsearch Service deployment. vector_query_field (str, optional): The name of the field in the Elasticsearch index that contains the vector embeddings. query_field (str, optional): The name of the field in the Elasticsearch index that contains the original text data. Usage: >>> from embeddings import Embeddings >>> embedding = Embeddings.load('glove') >>> es_search = ElasticKnnSearch('my_index', embedding) >>> es_search.add_texts(['Hello world!', 'Another text']) >>> results = es_search.knn_search('Hello') [(Document(page_content='Hello world!', metadata={}), 0.9)] """ def __init__( self, index_name: str, embedding: Embeddings, es_connection: Optional["Elasticsearch"] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, vector_query_field: Optional[str] = "vector", query_field: Optional[str] = "text", ): try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) warnings.warn( "ElasticKnnSearch will be removed in a future release." "Use ElasticsearchStore instead. See Elasticsearch " "integration docs on how to upgrade." ) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field # If a pre-existing Elasticsearch connection is provided, use it. if es_connection is not None: self.client = es_connection else: # If credentials for a new Elasticsearch connection are provided, # create a new connection. if es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) @staticmethod def _default_knn_mapping( dims: int, similarity: Optional[str] = "dot_product" ) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": { "type": "dense_vector", "dims": dims, "index": True, "similarity": similarity, }, } } def _default_knn_query( self, query_vector: Optional[List[float]] = None, query: Optional[str] = None, model_id: Optional[str] = None, k: Optional[int] = 10, num_candidates: Optional[int] = 10, ) -> Dict: knn: Dict = { "field": self.vector_query_field, "k": k, "num_candidates": num_candidates, } # Case 1: `query_vector` is provided, but not `model_id` -> use query_vector if query_vector and not model_id: knn["query_vector"] = query_vector # Case 2: `query` and `model_id` are provided, -> use query_vector_builder elif query and model_id: knn["query_vector_builder"] = { "text_embedding": { "model_id": model_id, # use 'model_id' argument "model_text": query, # use 'query' argument } } else: raise ValueError( "Either `query_vector` or `model_id` must be provided, but not both." ) return knn def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """ Pass through to `knn_search` """ results = self.knn_search(query=query, k=k, **kwargs) return [doc for doc, score in results] def similarity_search_with_score( self, query: str, k: int = 10, **kwargs: Any ) -> List[Tuple[Document, float]]: """Pass through to `knn_search including score`""" return self.knn_search(query=query, k=k, **kwargs) def knn_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, page_content: Optional[str] = "text", ) -> List[Tuple[Document, float]]: """ Perform a k-NN search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model to use for transforming the query text into a vector. size (int, optional): The number of search results to return. source (bool, optional): Whether to return the source of the search results. fields (List[Mapping[str, Any]], optional): The fields to return in the search results. page_content (str, optional): The name of the field that contains the page content. Returns: A list of tuples, where each tuple contains a Document object and a score. """ # if not source and (fields == None or page_content not in fields): if not source and ( fields is None or not any(page_content in field for field in fields) ): raise ValueError("If source=False `page_content` field must be in `fields`") knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Perform the kNN search on the Elasticsearch index and return the results. response = self.client.search( index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields, ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][page_content] if source else hit["fields"][page_content][0], metadata=hit["fields"] if fields else {}, ), hit["_score"], ) for hit in hits ] return docs_and_scores def knn_hybrid_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, knn_boost: Optional[float] = 0.9, query_boost: Optional[float] = 0.1, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, page_content: Optional[str] = "text", ) -> List[Tuple[Document, float]]: """ Perform a hybrid k-NN and text search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model to use for transforming the query text into a vector. size (int, optional): The number of search results to return. source (bool, optional): Whether to return the source of the search results. knn_boost (float, optional): The boost value to apply to the k-NN search results. query_boost (float, optional): The boost value to apply to the text search results. fields (List[Mapping[str, Any]], optional): The fields to return in the search results. page_content (str, optional): The name of the field that contains the page content. Returns: A list of tuples, where each tuple contains a Document object and a score. """ # if not source and (fields == None or page_content not in fields): if not source and ( fields is None or not any(page_content in field for field in fields) ): raise ValueError("If source=False `page_content` field must be in `fields`") knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Modify the knn_query_body to add a "boost" parameter knn_query_body["boost"] = knn_boost # Generate the body of the standard Elasticsearch query match_query_body = { "match": {self.query_field: {"query": query, "boost": query_boost}} } # Perform the hybrid search on the Elasticsearch index and return the results. response = self.client.search( index=self.index_name, query=match_query_body, knn=knn_query_body, fields=fields, size=size, source=source, ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][page_content] if source else hit["fields"][page_content][0], metadata=hit["fields"] if fields else {}, ), hit["_score"], ) for hit in hits ] return docs_and_scores def create_knn_index(self, mapping: Dict) -> None: """ Create a new k-NN index in Elasticsearch. Args: mapping (Dict): The mapping to use for the new index. Returns: None """ self.client.indices.create(index=self.index_name, mappings=mapping) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, model_id: Optional[str] = None, refresh_indices: bool = False, **kwargs: Any, ) -> List[str]: """ Add a list of texts to the Elasticsearch index. Args: texts (Iterable[str]): The texts to add to the index. metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries to associate with the texts. model_id (str, optional): The ID of the model to use for transforming the texts into vectors. refresh_indices (bool, optional): Whether to refresh the Elasticsearch indices after adding the texts. **kwargs: Arbitrary keyword arguments. Returns: A list of IDs for the added texts. """ # Check if the index exists. if not self.client.indices.exists(index=self.index_name): dims = kwargs.get("dims") if dims is None: raise ValueError("ElasticKnnSearch requires 'dims' parameter") similarity = kwargs.get("similarity") optional_args = {} if similarity is not None: optional_args["similarity"] = similarity mapping = self._default_knn_mapping(dims=dims, **optional_args) self.create_knn_index(mapping) embeddings = self.embedding.embed_documents(list(texts)) # body = [] body: List[Mapping[str, Any]] = [] for text, vector in zip(texts, embeddings): body.extend( [ {"index": {"_index": self.index_name}}, {"text": text, "vector": vector}, ] ) responses = self.client.bulk(operations=body) ids = [ item["index"]["_id"] for item in responses["items"] if item["index"]["result"] == "created" ] if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> ElasticKnnSearch: """ Create a new ElasticKnnSearch instance and add a list of texts to the Elasticsearch index. Args: texts (List[str]): The texts to add to the index. embedding (Embeddings): The embedding model to use for transforming the texts into vectors. metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries to associate with the texts. **kwargs: Arbitrary keyword arguments. Returns: A new ElasticKnnSearch instance. """ index_name = kwargs.get("index_name", str(uuid.uuid4())) es_connection = kwargs.get("es_connection") es_cloud_id = kwargs.get("es_cloud_id") es_user = kwargs.get("es_user") es_password = kwargs.get("es_password") vector_query_field = kwargs.get("vector_query_field", "vector") query_field = kwargs.get("query_field", "text") model_id = kwargs.get("model_id") dims = kwargs.get("dims") if dims is None: raise ValueError("ElasticKnnSearch requires 'dims' parameter") optional_args = {} if vector_query_field is not None: optional_args["vector_query_field"] = vector_query_field if query_field is not None: optional_args["query_field"] = query_field knnvectorsearch = cls( index_name=index_name, embedding=embedding, es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=es_user, es_password=es_password, **optional_args, ) # Encode the provided texts and add them to the newly created index. knnvectorsearch.add_texts(texts, model_id=model_id, dims=dims, **optional_args) return knnvectorsearch
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~storage~file_system.py
import re from pathlib import Path from typing import Iterator, List, Optional, Sequence, Tuple, Union from langchain_core.schema import BaseStore from langchain.storage.exceptions import InvalidKeyException class LocalFileStore(BaseStore[str, bytes]): """BaseStore interface that works on the local file system. Examples: Create a LocalFileStore instance and perform operations on it: .. code-block:: python from langchain.storage import LocalFileStore # Instantiate the LocalFileStore with the root path file_store = LocalFileStore("/path/to/root") # Set values for keys file_store.mset([("key1", b"value1"), ("key2", b"value2")]) # Get values for keys values = file_store.mget(["key1", "key2"]) # Returns [b"value1", b"value2"] # Delete keys file_store.mdelete(["key1"]) # Iterate over keys for key in file_store.yield_keys(): print(key) """ def __init__(self, root_path: Union[str, Path]) -> None: """Implement the BaseStore interface for the local file system. Args: root_path (Union[str, Path]): The root path of the file store. All keys are interpreted as paths relative to this root. """ self.root_path = Path(root_path) def _get_full_path(self, key: str) -> Path: """Get the full path for a given key relative to the root path. Args: key (str): The key relative to the root path. Returns: Path: The full path for the given key. """ if not re.match(r"^[a-zA-Z0-9_.\-/]+$", key): raise InvalidKeyException(f"Invalid characters in key: {key}") return self.root_path / key def mget(self, keys: Sequence[str]) -> List[Optional[bytes]]: """Get the values associated with the given keys. Args: keys: A sequence of keys. Returns: A sequence of optional values associated with the keys. If a key is not found, the corresponding value will be None. """ values: List[Optional[bytes]] = [] for key in keys: full_path = self._get_full_path(key) if full_path.exists(): value = full_path.read_bytes() values.append(value) else: values.append(None) return values def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None: """Set the values for the given keys. Args: key_value_pairs: A sequence of key-value pairs. Returns: None """ for key, value in key_value_pairs: full_path = self._get_full_path(key) full_path.parent.mkdir(parents=True, exist_ok=True) full_path.write_bytes(value) def mdelete(self, keys: Sequence[str]) -> None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.exists(): full_path.unlink() def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]: """Get an iterator over keys that match the given prefix. Args: prefix (Optional[str]): The prefix to match. Returns: Iterator[str]: An iterator over keys that match the given prefix. """ prefix_path = self._get_full_path(prefix) if prefix else self.root_path for file in prefix_path.rglob("*"): if file.is_file(): relative_path = file.relative_to(self.root_path) yield str(relative_path)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~retrievers~document_compressors~test_base.py
"""Integration test for compression pipelines.""" from langchain_core.schema import Document from langchain.document_transformers import EmbeddingsRedundantFilter from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, ) from langchain.text_splitter import CharacterTextSplitter def test_document_compressor_pipeline() -> None: embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator=". ") redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_filter = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter] ) texts = [ "This sentence is about cows", "This sentence was about cows", "foo bar baz", ] docs = [Document(page_content=". ".join(texts))] actual = pipeline_filter.compress_documents(docs, "Tell me about farm animals") assert len(actual) == 1 assert actual[0].page_content in texts[:2]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~everlyai.py
"""EverlyAI Endpoints chat wrapper. Relies heavily on ChatOpenAI.""" from __future__ import annotations import logging import sys from typing import TYPE_CHECKING, Dict, Optional, Set from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema.messages import BaseMessage from langchain.adapters.openai import convert_message_to_dict from langchain.chat_models.openai import ( ChatOpenAI, _import_tiktoken, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) DEFAULT_API_BASE = "https://everlyai.xyz/hosted" DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf" class ChatEverlyAI(ChatOpenAI): """`EverlyAI` Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``EVERLYAI_API_KEY`` set with your API key. Alternatively, you can use the everlyai_api_key keyword argument. Any parameters that are valid to be passed to the `openai.create` call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import ChatEverlyAI chat = ChatEverlyAI(model_name="meta-llama/Llama-2-7b-chat-hf") """ @property def _llm_type(self) -> str: """Return type of chat model.""" return "everlyai-chat" @property def lc_secrets(self) -> Dict[str, str]: return {"everlyai_api_key": "EVERLYAI_API_KEY"} everlyai_api_key: Optional[str] = None """EverlyAI Endpoints API keys.""" model_name: str = Field(default=DEFAULT_MODEL, alias="model") """Model name to use.""" everlyai_api_base: str = DEFAULT_API_BASE """Base URL path for API requests.""" available_models: Optional[Set[str]] = None """Available models from EverlyAI API.""" @staticmethod def get_available_models() -> Set[str]: """Get available models from EverlyAI API.""" # EverlyAI doesn't yet support dynamically query for available models. return set( [ "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf-quantized", ] ) @root_validator(pre=True) def validate_environment_override(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "everlyai_api_key", "EVERLYAI_API_KEY", ) values["openai_api_base"] = DEFAULT_API_BASE try: import openai except ImportError as e: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`.", ) from e try: values["client"] = openai.ChatCompletion except AttributeError as exc: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`.", ) from exc if "model_name" not in values.keys(): values["model_name"] = DEFAULT_MODEL model_name = values["model_name"] available_models = cls.get_available_models() if model_name not in available_models: raise ValueError( f"Model name {model_name} not found in available models: " f"{available_models}.", ) values["available_models"] = available_models return values def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]: tiktoken_ = _import_tiktoken() if self.tiktoken_model_name is not None: model = self.tiktoken_model_name else: model = self.model_name # Returns the number of tokens used by a list of messages. try: encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301") except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding def get_num_tokens_from_messages(self, messages: list[BaseMessage]) -> int: """Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() tokens_per_message = 3 tokens_per_name = 1 num_tokens = 0 messages_dict = [convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): # Cast str(value) in case the message value is not a string # This occurs with function messages num_tokens += len(encoding.encode(str(value))) if key == "name": num_tokens += tokens_per_name # every reply is primed with <im_start>assistant num_tokens += 3 return num_tokens
[]
2024-01-10
axgpt/langchain
libs~core~tests~unit_tests~utils~test_imports.py
from langchain_core.utils import __all__ EXPECTED_ALL = [ "StrictFormatter", "check_package_version", "convert_to_secret_str", "formatter", "get_bolded_text", "get_color_mapping", "get_colored_text", "get_pydantic_field_names", "guard_import", "mock_now", "print_text", "raise_for_status_with_text", "xor_args", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~nucliadb.py
import os from typing import Any, Dict, Iterable, List, Optional, Type from langchain_core.schema.document import Document from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VST, VectorStore FIELD_TYPES = { "f": "files", "t": "texts", "l": "links", } class NucliaDB(VectorStore): """NucliaDB vector store.""" _config: Dict[str, Any] = {} def __init__( self, knowledge_box: str, local: bool, api_key: Optional[str] = None, backend: Optional[str] = None, ) -> None: """Initialize the NucliaDB client. Args: knowledge_box: the Knowledge Box id. local: Whether to use a local NucliaDB instance or Nuclia Cloud api_key: A contributor API key for the kb (needed when local is False) backend: The backend url to use when local is True, defaults to http://localhost:8080 """ try: from nuclia.sdk import NucliaAuth except ImportError: raise ValueError( "nuclia python package not found. " "Please install it with `pip install nuclia`." ) self._config["LOCAL"] = local zone = os.environ.get("NUCLIA_ZONE", "europe-1") self._kb = knowledge_box if local: if not backend: backend = "http://localhost:8080" self._config["BACKEND"] = f"{backend}/api/v1" self._config["TOKEN"] = None NucliaAuth().nucliadb(url=backend) NucliaAuth().kb(url=self.kb_url, interactive=False) else: self._config["BACKEND"] = f"https://{zone}.nuclia.cloud/api/v1" self._config["TOKEN"] = api_key NucliaAuth().kb( url=self.kb_url, token=self._config["TOKEN"], interactive=False ) @property def is_local(self) -> str: return self._config["LOCAL"] @property def kb_url(self) -> str: return f"{self._config['BACKEND']}/kb/{self._kb}" def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts to NucliaDB""" ids = [] from nuclia.sdk import NucliaResource factory = NucliaResource() for i, text in enumerate(texts): extra: Dict[str, Any] = {"metadata": ""} if metadatas: extra = {"metadata": metadatas[i]} id = factory.create( texts={"text": {"body": text}}, extra=extra, url=self.kb_url, api_key=self._config["TOKEN"], ) ids.append(id) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if not ids: return None from nuclia.sdk import NucliaResource factory = NucliaResource() results: List[bool] = [] for id in ids: try: factory.delete(rid=id, url=self.kb_url, api_key=self._config["TOKEN"]) results.append(True) except ValueError: results.append(False) return all(results) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: from nuclia.sdk import NucliaSearch from nucliadb_models.search import FindRequest, ResourceProperties request = FindRequest( query=query, page_size=k, show=[ResourceProperties.VALUES, ResourceProperties.EXTRA], ) search = NucliaSearch() results = search.find( query=request, url=self.kb_url, api_key=self._config["TOKEN"] ) paragraphs = [] for resource in results.resources.values(): for field in resource.fields.values(): for paragraph_id, paragraph in field.paragraphs.items(): info = paragraph_id.split("/") field_type = FIELD_TYPES.get(info[1], None) field_id = info[2] if not field_type: continue value = getattr(resource.data, field_type, {}).get(field_id, None) paragraphs.append( { "text": paragraph.text, "metadata": { "extra": getattr( getattr(resource, "extra", {}), "metadata", None ), "value": value, }, "order": paragraph.order, } ) sorted_paragraphs = sorted(paragraphs, key=lambda x: x["order"]) return [ Document(page_content=paragraph["text"], metadata=paragraph["metadata"]) for paragraph in sorted_paragraphs ] @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" raise NotImplementedError
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~prompts~test_ngram_overlap_example_selector.py
"""Test functionality related to ngram overlap based selector.""" import pytest from langchain_core.prompts.prompt import PromptTemplate from langchain.prompts.example_selector.ngram_overlap import ( NGramOverlapExampleSelector, ngram_overlap_score, ) EXAMPLES = [ {"input": "See Spot run.", "output": "foo1"}, {"input": "My dog barks.", "output": "foo2"}, {"input": "Spot can run.", "output": "foo3"}, ] @pytest.fixture def selector() -> NGramOverlapExampleSelector: """Get ngram overlap based selector to use in tests.""" prompts = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}" ) selector = NGramOverlapExampleSelector( examples=EXAMPLES, example_prompt=prompts, ) return selector def test_selector_valid(selector: NGramOverlapExampleSelector) -> None: """Test NGramOverlapExampleSelector can select examples.""" sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0], EXAMPLES[1]] def test_selector_add_example(selector: NGramOverlapExampleSelector) -> None: """Test NGramOverlapExampleSelector can add an example.""" new_example = {"input": "Spot plays fetch.", "output": "foo4"} selector.add_example(new_example) sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] + [new_example] + [EXAMPLES[1]] def test_selector_threshold_zero(selector: NGramOverlapExampleSelector) -> None: """Tests NGramOverlapExampleSelector threshold set to 0.0.""" selector.threshold = 0.0 sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [EXAMPLES[2], EXAMPLES[0]] def test_selector_threshold_more_than_one( selector: NGramOverlapExampleSelector, ) -> None: """Tests NGramOverlapExampleSelector threshold greater than 1.0.""" selector.threshold = 1.0 + 1e-9 sentence = "Spot can run." output = selector.select_examples({"input": sentence}) assert output == [] def test_ngram_overlap_score(selector: NGramOverlapExampleSelector) -> None: """Tests that ngram_overlap_score returns correct values.""" selector.threshold = 1.0 + 1e-9 none = ngram_overlap_score(["Spot can run."], ["My dog barks."]) some = ngram_overlap_score(["Spot can run."], ["See Spot run."]) complete = ngram_overlap_score(["Spot can run."], ["Spot can run."]) check = [abs(none - 0.0) < 1e-9, 0.0 < some < 1.0, abs(complete - 1.0) < 1e-9] assert check == [True, True, True]
[ "Input: {input}\nOutput: {output}", "input" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~cache~test_gptcache.py
import os from typing import Any, Callable, Union import pytest from langchain_core.schema import Generation from langchain.cache import GPTCache from langchain.globals import get_llm_cache, set_llm_cache from tests.unit_tests.llms.fake_llm import FakeLLM try: from gptcache import Cache # noqa: F401 from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt gptcache_installed = True except ImportError: gptcache_installed = False def init_gptcache_map(cache_obj: Any) -> None: i = getattr(init_gptcache_map, "_i", 0) cache_path = f"data_map_{i}.txt" if os.path.isfile(cache_path): os.remove(cache_path) cache_obj.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=cache_path), ) init_gptcache_map._i = i + 1 # type: ignore def init_gptcache_map_with_llm(cache_obj: Any, llm: str) -> None: cache_path = f"data_map_{llm}.txt" if os.path.isfile(cache_path): os.remove(cache_path) cache_obj.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=cache_path), ) @pytest.mark.skipif(not gptcache_installed, reason="gptcache not installed") @pytest.mark.parametrize( "init_func", [None, init_gptcache_map, init_gptcache_map_with_llm] ) def test_gptcache_caching( init_func: Union[Callable[[Any, str], None], Callable[[Any], None], None] ) -> None: """Test gptcache default caching behavior.""" set_llm_cache(GPTCache(init_func)) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) _ = llm.generate(["foo", "bar", "foo"]) cache_output = get_llm_cache().lookup("foo", llm_string) assert cache_output == [Generation(text="fizz")] get_llm_cache().clear() assert get_llm_cache().lookup("bar", llm_string) is None
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~tracers~stdout.py
from langchain_core.callbacks.tracers.stdout import ( ConsoleCallbackHandler, FunctionCallbackHandler, ) __all__ = ["FunctionCallbackHandler", "ConsoleCallbackHandler"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~gpt4all.py
from typing import Any, Dict, List from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.schema.embeddings import Embeddings class GPT4AllEmbeddings(BaseModel, Embeddings): """GPT4All embedding models. To use, you should have the gpt4all python package installed Example: .. code-block:: python from langchain.embeddings import GPT4AllEmbeddings embeddings = GPT4AllEmbeddings() """ client: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that GPT4All library is installed.""" try: from gpt4all import Embed4All values["client"] = Embed4All() except ImportError: raise ImportError( "Could not import gpt4all library. " "Please install the gpt4all library to " "use this embedding model: pip install gpt4all" ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using GPT4All. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] def embed_query(self, text: str) -> List[float]: """Embed a query using GPT4All. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~chat_loaders~test_telegram.py
"""Test the telegram chat loader.""" import pathlib import tempfile import zipfile from typing import Sequence import pytest from langchain_core.schema import AIMessage, BaseMessage, HumanMessage from langchain.chat_loaders import telegram, utils def _assert_messages_are_equal( actual_messages: Sequence[BaseMessage], expected_messages: Sequence[BaseMessage], ) -> None: assert len(actual_messages) == len(expected_messages) for actual, expected in zip(actual_messages, expected_messages): assert actual.content == expected.content assert ( actual.additional_kwargs["sender"] == expected.additional_kwargs["sender"] ) def _check_telegram_chat_loader(path: str) -> None: _data_dir = pathlib.Path(__file__).parent / "data" source_path = _data_dir / path # Create a zip file from the directory in a temp directory with tempfile.TemporaryDirectory() as temp_dir_: temp_dir = pathlib.Path(temp_dir_) if path.endswith(".zip"): # Make a new zip file zip_path = temp_dir / "telegram_chat.zip" with zipfile.ZipFile(zip_path, "w") as zip_file: original_path = _data_dir / path.replace(".zip", "") for file_path in original_path.iterdir(): zip_file.write(file_path, arcname=file_path.name) source_path = zip_path loader = telegram.TelegramChatLoader(str(source_path)) chat_sessions_ = loader.lazy_load() chat_sessions_ = utils.merge_chat_runs(chat_sessions_) chat_sessions = list( utils.map_ai_messages(chat_sessions_, sender="Batman & Robin") ) assert len(chat_sessions) == 1 session = chat_sessions[0] assert len(session["messages"]) > 0 assert session["messages"][0].content == "i refuse to converse with you" expected_content = [ HumanMessage( content="i refuse to converse with you", additional_kwargs={ "sender": "Jimmeny Marvelton", "events": [{"message_time": "23.08.2023 13:11:23 UTC-08:00"}], }, ), AIMessage( content="Hi nemesis", additional_kwargs={ "sender": "Batman & Robin", "events": [{"message_time": "23.08.2023 13:13:20 UTC-08:00"}], }, ), HumanMessage( content="we meet again\n\nyou will not trick me this time", additional_kwargs={ "sender": "Jimmeny Marvelton", "events": [{"message_time": "23.08.2023 13:15:35 UTC-08:00"}], }, ), ] _assert_messages_are_equal(session["messages"], expected_content) @pytest.mark.parametrize( "path", [ "telegram_chat_json", "telegram_chat_json.zip", "telegram_chat_json/result.json", ], ) def test_telegram_chat_loader(path: str) -> None: _check_telegram_chat_loader(path) @pytest.mark.skip(reason="requires bs4 but marking it as such doesn't seem to work") @pytest.mark.parametrize( "path", [ "telegram_chat_json", "telegram_chat_json.zip", "telegram_chat_json/result.json", ], ) def test_telegram_chat_loader_html(path: str) -> None: _check_telegram_chat_loader(path)
[ "we meet again\n\nyou will not trick me this time", "i refuse to converse with you", "Hi nemesis" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~fix.py
from __future__ import annotations from typing import Any, TypeVar from langchain_core.schema import ( BaseOutputParser, BasePromptTemplate, OutputParserException, ) from langchain_core.schema.language_model import BaseLanguageModel from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~xata.py
import json from typing import List from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) class XataChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a Xata database.""" def __init__( self, session_id: str, db_url: str, api_key: str, branch_name: str = "main", table_name: str = "messages", create_table: bool = True, ) -> None: """Initialize with Xata client.""" try: from xata.client import XataClient # noqa: F401 except ImportError: raise ValueError( "Could not import xata python package. " "Please install it with `pip install xata`." ) self._client = XataClient( api_key=api_key, db_url=db_url, branch_name=branch_name ) self._table_name = table_name self._session_id = session_id if create_table: self._create_table_if_not_exists() def _create_table_if_not_exists(self) -> None: r = self._client.table().get_schema(self._table_name) if r.status_code <= 299: return if r.status_code != 404: raise Exception( f"Error checking if table exists in Xata: {r.status_code} {r}" ) r = self._client.table().create(self._table_name) if r.status_code > 299: raise Exception(f"Error creating table in Xata: {r.status_code} {r}") r = self._client.table().set_schema( self._table_name, payload={ "columns": [ {"name": "sessionId", "type": "string"}, {"name": "type", "type": "string"}, {"name": "role", "type": "string"}, {"name": "content", "type": "text"}, {"name": "name", "type": "string"}, {"name": "additionalKwargs", "type": "json"}, ] }, ) if r.status_code > 299: raise Exception(f"Error setting table schema in Xata: {r.status_code} {r}") def add_message(self, message: BaseMessage) -> None: """Append the message to the Xata table""" msg = _message_to_dict(message) r = self._client.records().insert( self._table_name, { "sessionId": self._session_id, "type": msg["type"], "content": message.content, "additionalKwargs": json.dumps(message.additional_kwargs), "role": msg["data"].get("role"), "name": msg["data"].get("name"), }, ) if r.status_code > 299: raise Exception(f"Error adding message to Xata: {r.status_code} {r}") @property def messages(self) -> List[BaseMessage]: # type: ignore r = self._client.data().query( self._table_name, payload={ "filter": { "sessionId": self._session_id, }, "sort": {"xata.createdAt": "asc"}, }, ) if r.status_code != 200: raise Exception(f"Error running query: {r.status_code} {r}") msgs = messages_from_dict( [ { "type": m["type"], "data": { "content": m["content"], "role": m.get("role"), "name": m.get("name"), "additional_kwargs": json.loads(m["additionalKwargs"]), }, } for m in r["records"] ] ) return msgs def clear(self) -> None: """Delete session from Xata table.""" while True: r = self._client.data().query( self._table_name, payload={ "columns": ["id"], "filter": { "sessionId": self._session_id, }, }, ) if r.status_code != 200: raise Exception(f"Error running query: {r.status_code} {r}") ids = [rec["id"] for rec in r["records"]] if len(ids) == 0: break operations = [ {"delete": {"table": self._table_name, "id": id}} for id in ids ] self._client.records().transaction(payload={"operations": operations})
[ "content" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~xata.py
from __future__ import annotations import time from itertools import repeat from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document class XataVectorStore(VectorStore): """`Xata` vector store. It assumes you have a Xata database created with the right schema. See the guide at: https://integrations.langchain.com/vectorstores?integration_name=XataVectorStore """ def __init__( self, api_key: str, db_url: str, embedding: Embeddings, table_name: str, ) -> None: """Initialize with Xata client.""" try: from xata.client import XataClient # noqa: F401 except ImportError: raise ImportError( "Could not import xata python package. " "Please install it with `pip install xata`." ) self._client = XataClient(api_key=api_key, db_url=db_url) self._embedding: Embeddings = embedding self._table_name = table_name or "vectors" @property def embeddings(self) -> Embeddings: return self._embedding def add_vectors( self, vectors: List[List[float]], documents: List[Document], ids: Optional[List[str]] = None, ) -> List[str]: return self._add_vectors(vectors, documents, ids) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: ids = ids docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs, ids) def _add_vectors( self, vectors: List[List[float]], documents: List[Document], ids: Optional[List[str]] = None, ) -> List[str]: """Add vectors to the Xata database.""" rows: List[Dict[str, Any]] = [] for idx, embedding in enumerate(vectors): row = { "content": documents[idx].page_content, "embedding": embedding, } if ids: row["id"] = ids[idx] for key, val in documents[idx].metadata.items(): if key not in ["id", "content", "embedding"]: row[key] = val rows.append(row) # XXX: I would have liked to use the BulkProcessor here, but it # doesn't return the IDs, which we need here. Manual chunking it is. chunk_size = 1000 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] r = self._client.records().bulk_insert(self._table_name, {"records": chunk}) if r.status_code != 200: raise Exception(f"Error adding vectors to Xata: {r.status_code} {r}") id_list.extend(r["recordIDs"]) return id_list @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[Dict[Any, Any]]] = None, ) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @classmethod def from_texts( cls: Type["XataVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, api_key: Optional[str] = None, db_url: Optional[str] = None, table_name: str = "vectors", ids: Optional[List[str]] = None, **kwargs: Any, ) -> "XataVectorStore": """Return VectorStore initialized from texts and embeddings.""" if not api_key or not db_url: raise ValueError("Xata api_key and db_url must be set.") embeddings = embedding.embed_documents(texts) ids = None # Xata will generate them for us docs = cls._texts_to_documents(texts, metadatas) vector_db = cls( api_key=api_key, db_url=db_url, embedding=embedding, table_name=table_name, ) vector_db._add_vectors(embeddings, docs, ids) return vector_db def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ embedding = self._embedding.embed_query(query) payload = { "queryVector": embedding, "column": "embedding", "size": k, } if filter: payload["filter"] = filter r = self._client.data().vector_search(self._table_name, payload=payload) if r.status_code != 200: raise Exception(f"Error running similarity search: {r.status_code} {r}") hits = r["records"] docs_and_scores = [ ( Document( page_content=hit["content"], metadata=self._extractMetadata(hit), ), hit["xata"]["score"], ) for hit in hits ] return docs_and_scores def _extractMetadata(self, record: dict) -> dict: """Extract metadata from a record. Filters out known columns.""" metadata = {} for key, val in record.items(): if key not in ["id", "content", "embedding", "xata"]: metadata[key] = val return metadata def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, **kwargs: Any, ) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. delete_all: Delete all records in the table. """ if delete_all: self._delete_all() self.wait_for_indexing(ndocs=0) elif ids is not None: chunk_size = 500 for i in range(0, len(ids), chunk_size): chunk = ids[i : i + chunk_size] operations = [ {"delete": {"table": self._table_name, "id": id}} for id in chunk ] self._client.records().transaction(payload={"operations": operations}) else: raise ValueError("Either ids or delete_all must be set.") def _delete_all(self) -> None: """Delete all records in the table.""" while True: r = self._client.data().query(self._table_name, payload={"columns": ["id"]}) if r.status_code != 200: raise Exception(f"Error running query: {r.status_code} {r}") ids = [rec["id"] for rec in r["records"]] if len(ids) == 0: break operations = [ {"delete": {"table": self._table_name, "id": id}} for id in ids ] self._client.records().transaction(payload={"operations": operations}) def wait_for_indexing(self, timeout: float = 5, ndocs: int = 1) -> None: """Wait for the search index to contain a certain number of documents. Useful in tests. """ start = time.time() while True: r = self._client.data().search_table( self._table_name, payload={"query": "", "page": {"size": 0}} ) if r.status_code != 200: raise Exception(f"Error running search: {r.status_code} {r}") if r["totalCount"] == ndocs: break if time.time() - start > timeout: raise Exception("Timed out waiting for indexing to complete.") time.sleep(0.5)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~meilisearch.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_env if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~cache~test_momento_cache.py
"""Test Momento cache functionality. To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid Momento auth token. This can be obtained by signing up for a free Momento account at https://gomomento.com/. """ from __future__ import annotations import uuid from datetime import timedelta from typing import Iterator import pytest from langchain_core.schema import Generation, LLMResult from langchain.cache import MomentoCache from langchain.globals import set_llm_cache from tests.unit_tests.llms.fake_llm import FakeLLM def random_string() -> str: return str(uuid.uuid4()) @pytest.fixture(scope="module") def momento_cache() -> Iterator[MomentoCache]: from momento import CacheClient, Configurations, CredentialProvider cache_name = f"langchain-test-cache-{random_string()}" client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_API_KEY"), default_ttl=timedelta(seconds=30), ) try: llm_cache = MomentoCache(client, cache_name) set_llm_cache(llm_cache) yield llm_cache finally: client.delete_cache(cache_name) def test_invalid_ttl() -> None: from momento import CacheClient, Configurations, CredentialProvider client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_API_KEY"), default_ttl=timedelta(seconds=30), ) with pytest.raises(ValueError): MomentoCache(client, cache_name=random_string(), ttl=timedelta(seconds=-1)) def test_momento_cache_miss(momento_cache: MomentoCache) -> None: llm = FakeLLM() stub_llm_output = LLMResult(generations=[[Generation(text="foo")]]) assert llm.generate([random_string()]) == stub_llm_output @pytest.mark.parametrize( "prompts, generations", [ # Single prompt, single generation ([random_string()], [[random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string(), random_string()]]), # Multiple prompts, multiple generations ( [random_string(), random_string()], [[random_string()], [random_string(), random_string()]], ), ], ) def test_momento_cache_hit( momento_cache: MomentoCache, prompts: list[str], generations: list[list[str]] ) -> None: llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) llm_generations = [ [ Generation(text=generation, generation_info=params) for generation in prompt_i_generations ] for prompt_i_generations in generations ] for prompt_i, llm_generations_i in zip(prompts, llm_generations): momento_cache.update(prompt_i, llm_string, llm_generations_i) assert llm.generate(prompts) == LLMResult( generations=llm_generations, llm_output={} )
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~document_loaders~parsers~test_docai.py
"""Test Google Cloud DocAI parser. You need to create a processor and enable the DocAI before running this test: https://cloud.google.com/document-ai/docs/setup """ import os from langchain_core.schema import Document from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers import DocAIParser def test_docai_parser() -> None: """In order to run this test, you should provide a processor name, output path for DocAI to store parsing results, and an input blob path to parse. Example: export BLOB_PATH=gs://... export GCS_OUTPUT_PATH=gs://... export PROCESSOR_NAME=projects/.../locations/us/processors/... """ blob_path = os.environ["BLOB_PATH"] gcs_output_path = os.environ["GCS_OUTPUT_PATH"] processor_name = os.environ["PROCESSOR_NAME"] parser = DocAIParser( location="us", processor_name=processor_name, gcs_output_path=gcs_output_path ) blob = Blob(path=blob_path) documents = list(parser.lazy_parse(blob)) assert len(documents) > 0 for i, doc in enumerate(documents): assert isinstance(doc, Document) assert doc.page_content assert doc.metadata["source"] == blob_path assert doc.metadata["page"] == i + 1
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~agents~output_parsers~test_xml.py
from langchain_core.schema.agent import AgentAction, AgentFinish from langchain.agents.output_parsers.xml import XMLAgentOutputParser def test_tool_usage() -> None: parser = XMLAgentOutputParser() # Test when final closing </tool_input> is included _input = """<tool>search</tool><tool_input>foo</tool_input>""" output = parser.invoke(_input) expected_output = AgentAction(tool="search", tool_input="foo", log=_input) assert output == expected_output # Test when final closing </tool_input> is NOT included # This happens when it's used as a stop token _input = """<tool>search</tool><tool_input>foo</tool_input>""" output = parser.invoke(_input) expected_output = AgentAction(tool="search", tool_input="foo", log=_input) assert output == expected_output def test_finish() -> None: parser = XMLAgentOutputParser() # Test when final closing <final_answer> is included _input = """<final_answer>bar</final_answer>""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={"output": "bar"}, log=_input) assert output == expected_output # Test when final closing <final_answer> is NOT included # This happens when it's used as a stop token _input = """<final_answer>bar</final_answer>""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={"output": "bar"}, log=_input) assert output == expected_output
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~prompts~example_selector~semantic_similarity.py
from langchain_core.prompts.example_selector.semantic_similarity import ( MaxMarginalRelevanceExampleSelector, SemanticSimilarityExampleSelector, sorted_values, ) __all__ = [ "sorted_values", "SemanticSimilarityExampleSelector", "MaxMarginalRelevanceExampleSelector", ]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~exceptions.py
from langchain_core.schema.exceptions import LangChainException __all__ = ["LangChainException"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~baidu_qianfan_endpoint.py
from __future__ import annotations import logging from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema import ChatGeneration, ChatResult from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, BaseMessage, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langchain_core.schema.output import ChatGenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def convert_message_to_dict(message: BaseMessage) -> dict: """Convert a message to a dictionary that can be passed to the API.""" message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] # If function call only, content is None not empty string if message_dict["content"] == "": message_dict["content"] = None elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise TypeError(f"Got unknown type {message}") return message_dict def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage: content = _dict.get("result", "") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} if "thoughts" in additional_kwargs["function_call"]: # align to api sample, which affects the llm function_call output additional_kwargs["function_call"].pop("thoughts") else: additional_kwargs = {} return AIMessage( content=content, additional_kwargs={**_dict.get("body", {}), **additional_kwargs}, ) class QianfanChatEndpoint(BaseChatModel): """Baidu Qianfan chat models. To use, you should have the ``qianfan`` python package installed, and the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your API key and Secret Key. ak, sk are required parameters which you could get from https://cloud.baidu.com/product/wenxinworkshop Example: .. code-block:: python from langchain.chat_models import QianfanChatEndpoint qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot", endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk") """ model_kwargs: Dict[str, Any] = Field(default_factory=dict) client: Any qianfan_ak: Optional[str] = None qianfan_sk: Optional[str] = None streaming: Optional[bool] = False """Whether to stream the results or not.""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 """Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. In the case of other model, passing these params will not affect the result. """ model: str = "ERNIE-Bot-turbo" """Model name. you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu preset models are mapping to an endpoint. `model` will be ignored if `endpoint` is set. Default is ERNIE-Bot-turbo. """ endpoint: Optional[str] = None """Endpoint of the Qianfan LLM, required if custom model used.""" @root_validator() def validate_enviroment(cls, values: Dict) -> Dict: values["qianfan_ak"] = get_from_dict_or_env( values, "qianfan_ak", "QIANFAN_AK", ) values["qianfan_sk"] = get_from_dict_or_env( values, "qianfan_sk", "QIANFAN_SK", ) params = { "ak": values["qianfan_ak"], "sk": values["qianfan_sk"], "model": values["model"], "stream": values["streaming"], } if values["endpoint"] is not None and values["endpoint"] != "": params["endpoint"] = values["endpoint"] try: import qianfan values["client"] = qianfan.ChatCompletion(**params) except ImportError: raise ValueError( "qianfan package not found, please install it with " "`pip install qianfan`" ) return values @property def _identifying_params(self) -> Dict[str, Any]: return { **{"endpoint": self.endpoint, "model": self.model}, **super()._identifying_params, } @property def _llm_type(self) -> str: """Return type of chat_model.""" return "baidu-qianfan-chat" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Qianfan API.""" normal_params = { "model": self.model, "endpoint": self.endpoint, "stream": self.streaming, "request_timeout": self.request_timeout, "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, } return {**normal_params, **self.model_kwargs} def _convert_prompt_msg_params( self, messages: List[BaseMessage], **kwargs: Any, ) -> Dict[str, Any]: """ Converts a list of messages into a dictionary containing the message content and default parameters. Args: messages (List[BaseMessage]): The list of messages. **kwargs (Any): Optional arguments to add additional parameters to the resulting dictionary. Returns: Dict[str, Any]: A dictionary containing the message content and default parameters. """ messages_dict: Dict[str, Any] = { "messages": [ convert_message_to_dict(m) for m in messages if not isinstance(m, SystemMessage) ] } for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]: if "system" not in messages_dict: messages_dict["system"] = "" messages_dict["system"] += cast(str, messages[i].content) + "\n" return { **messages_dict, **self._default_params, **kwargs, } def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Call out to an qianfan models endpoint for each generation with a prompt. Args: messages: The messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = qianfan_model("Tell me a joke.") """ if self.streaming: completion = "" for chunk in self._stream(messages, stop, run_manager, **kwargs): completion += chunk.text lc_msg = AIMessage(content=completion, additional_kwargs={}) gen = ChatGeneration( message=lc_msg, generation_info=dict(finish_reason="stop"), ) return ChatResult( generations=[gen], llm_output={"token_usage": {}, "model_name": self.model}, ) params = self._convert_prompt_msg_params(messages, **kwargs) response_payload = self.client.do(**params) lc_msg = _convert_dict_to_message(response_payload) gen = ChatGeneration( message=lc_msg, generation_info={ "finish_reason": "stop", **response_payload.get("body", {}), }, ) token_usage = response_payload.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model} return ChatResult(generations=[gen], llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: completion = "" token_usage = {} async for chunk in self._astream(messages, stop, run_manager, **kwargs): completion += chunk.text lc_msg = AIMessage(content=completion, additional_kwargs={}) gen = ChatGeneration( message=lc_msg, generation_info=dict(finish_reason="stop"), ) return ChatResult( generations=[gen], llm_output={"token_usage": {}, "model_name": self.model}, ) params = self._convert_prompt_msg_params(messages, **kwargs) response_payload = await self.client.ado(**params) lc_msg = _convert_dict_to_message(response_payload) generations = [] gen = ChatGeneration( message=lc_msg, generation_info={ "finish_reason": "stop", **response_payload.get("body", {}), }, ) generations.append(gen) token_usage = response_payload.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model} return ChatResult(generations=generations, llm_output=llm_output) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) for res in self.client.do(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk( text=res["result"], message=AIMessageChunk( content=msg.content, role="assistant", additional_kwargs=msg.additional_kwargs, ), ) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) async def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: params = self._convert_prompt_msg_params(messages, **kwargs) async for res in await self.client.ado(**params): if res: msg = _convert_dict_to_message(res) chunk = ChatGenerationChunk( text=res["result"], message=AIMessageChunk( content=msg.content, role="assistant", additional_kwargs=msg.additional_kwargs, ), ) yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~tiledb.py
"""Wrapper around TileDB vector database.""" from __future__ import annotations import pickle import random import sys from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance INDEX_METRICS = frozenset(["euclidean"]) DEFAULT_METRIC = "euclidean" DOCUMENTS_ARRAY_NAME = "documents" VECTOR_INDEX_NAME = "vectors" MAX_UINT64 = np.iinfo(np.dtype("uint64")).max MAX_FLOAT_32 = np.finfo(np.dtype("float32")).max MAX_FLOAT = sys.float_info.max def dependable_tiledb_import() -> Any: """Import tiledb-vector-search if available, otherwise raise error.""" try: import tiledb as tiledb import tiledb.vector_search as tiledb_vs except ImportError: raise ValueError( "Could not import tiledb-vector-search python package. " "Please install it with `conda install -c tiledb tiledb-vector-search` " "or `pip install tiledb-vector-search`" ) return tiledb_vs, tiledb def get_vector_index_uri_from_group(group: Any) -> str: return group[VECTOR_INDEX_NAME].uri def get_documents_array_uri_from_group(group: Any) -> str: return group[DOCUMENTS_ARRAY_NAME].uri def get_vector_index_uri(uri: str) -> str: return f"{uri}/{VECTOR_INDEX_NAME}" def get_documents_array_uri(uri: str) -> str: return f"{uri}/{DOCUMENTS_ARRAY_NAME}" class TileDB(VectorStore): """Wrapper around TileDB vector database. To use, you should have the ``tiledb-vector-search`` python package installed. Example: .. code-block:: python from langchain import TileDB embeddings = OpenAIEmbeddings() db = TileDB(embeddings, index_uri, metric) """ def __init__( self, embedding: Embeddings, index_uri: str, metric: str, *, vector_index_uri: str = "", docs_array_uri: str = "", config: Optional[Mapping[str, Any]] = None, timestamp: Any = None, **kwargs: Any, ): """Initialize with necessary components.""" self.embedding = embedding self.embedding_function = embedding.embed_query self.index_uri = index_uri self.metric = metric self.config = config tiledb_vs, tiledb = dependable_tiledb_import() with tiledb.scope_ctx(ctx_or_config=config): index_group = tiledb.Group(self.index_uri, "r") self.vector_index_uri = ( vector_index_uri if vector_index_uri != "" else get_vector_index_uri_from_group(index_group) ) self.docs_array_uri = ( docs_array_uri if docs_array_uri != "" else get_documents_array_uri_from_group(index_group) ) index_group.close() group = tiledb.Group(self.vector_index_uri, "r") self.index_type = group.meta.get("index_type") group.close() self.timestamp = timestamp if self.index_type == "FLAT": self.vector_index = tiledb_vs.flat_index.FlatIndex( uri=self.vector_index_uri, config=self.config, timestamp=self.timestamp, **kwargs, ) elif self.index_type == "IVF_FLAT": self.vector_index = tiledb_vs.ivf_flat_index.IVFFlatIndex( uri=self.vector_index_uri, config=self.config, timestamp=self.timestamp, **kwargs, ) @property def embeddings(self) -> Optional[Embeddings]: return self.embedding def process_index_results( self, ids: List[int], scores: List[float], *, k: int = 4, filter: Optional[Dict[str, Any]] = None, score_threshold: float = MAX_FLOAT, ) -> List[Tuple[Document, float]]: """Turns TileDB results into a list of documents and scores. Args: ids: List of indices of the documents in the index. scores: List of distances of the documents in the index. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None. score_threshold: Optional, a floating point value to filter the resulting set of retrieved docs Returns: List of Documents and scores. """ tiledb_vs, tiledb = dependable_tiledb_import() docs = [] docs_array = tiledb.open( self.docs_array_uri, "r", timestamp=self.timestamp, config=self.config ) for idx, score in zip(ids, scores): if idx == 0 and score == 0: continue if idx == MAX_UINT64 and score == MAX_FLOAT_32: continue doc = docs_array[idx] if doc is None or len(doc["text"]) == 0: raise ValueError(f"Could not find document for id {idx}, got {doc}") pickled_metadata = doc.get("metadata") result_doc = Document(page_content=str(doc["text"][0])) if pickled_metadata is not None: metadata = pickle.loads( np.array(pickled_metadata.tolist()).astype(np.uint8).tobytes() ) result_doc.metadata = metadata if filter is not None: filter = { key: [value] if not isinstance(value, list) else value for key, value in filter.items() } if all( result_doc.metadata.get(key) in value for key, value in filter.items() ): docs.append((result_doc, score)) else: docs.append((result_doc, score)) docs_array.close() docs = [(doc, score) for doc, score in docs if score <= score_threshold] return docs[:k] def similarity_search_with_score_by_vector( self, embedding: List[float], *, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. **kwargs: kwargs to be passed to similarity search. Can include: nprobe: Optional, number of partitions to check if using IVF_FLAT index score_threshold: Optional, a floating point value to filter the resulting set of retrieved docs Returns: List of documents most similar to the query text and distance in float for each. Lower score represents more similarity. """ if "score_threshold" in kwargs: score_threshold = kwargs.pop("score_threshold") else: score_threshold = MAX_FLOAT d, i = self.vector_index.query( np.array([np.array(embedding).astype(np.float32)]).astype(np.float32), k=k if filter is None else fetch_k, **kwargs, ) return self.process_index_results( ids=i[0], scores=d[0], filter=filter, k=k, score_threshold=score_threshold ) def similarity_search_with_score( self, query: str, *, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with Distance as float. Lower score represents more similarity. """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector( embedding, k=k, filter=filter, fetch_k=fetch_k, **kwargs, ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k=k, filter=filter, fetch_k=fetch_k, **kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, fetch_k=fetch_k, **kwargs ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], *, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents and similarity scores selected by maximal marginal relevance and score for each. """ if "score_threshold" in kwargs: score_threshold = kwargs.pop("score_threshold") else: score_threshold = MAX_FLOAT scores, indices = self.vector_index.query( np.array([np.array(embedding).astype(np.float32)]).astype(np.float32), k=fetch_k if filter is None else fetch_k * 2, **kwargs, ) results = self.process_index_results( ids=indices[0], scores=scores[0], filter=filter, k=fetch_k if filter is None else fetch_k * 2, score_threshold=score_threshold, ) embeddings = [ self.embedding.embed_documents([doc.page_content])[0] for doc, _ in results ] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) docs_and_scores = [] for i in mmr_selected: docs_and_scores.append(results[i]) return docs_and_scores def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering (if needed) to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return docs @classmethod def create( cls, index_uri: str, index_type: str, dimensions: int, vector_type: np.dtype, *, metadatas: bool = True, config: Optional[Mapping[str, Any]] = None, ) -> None: tiledb_vs, tiledb = dependable_tiledb_import() with tiledb.scope_ctx(ctx_or_config=config): try: tiledb.group_create(index_uri) except tiledb.TileDBError as err: raise err group = tiledb.Group(index_uri, "w") vector_index_uri = get_vector_index_uri(group.uri) docs_uri = get_documents_array_uri(group.uri) if index_type == "FLAT": tiledb_vs.flat_index.create( uri=vector_index_uri, dimensions=dimensions, vector_type=vector_type, config=config, ) elif index_type == "IVF_FLAT": tiledb_vs.ivf_flat_index.create( uri=vector_index_uri, dimensions=dimensions, vector_type=vector_type, config=config, ) group.add(vector_index_uri, name=VECTOR_INDEX_NAME) # Create TileDB array to store Documents # TODO add a Document store API to tiledb-vector-search to allow storing # different types of objects and metadata in a more generic way. dim = tiledb.Dim( name="id", domain=(0, MAX_UINT64 - 1), dtype=np.dtype(np.uint64), ) dom = tiledb.Domain(dim) text_attr = tiledb.Attr(name="text", dtype=np.dtype("U1"), var=True) attrs = [text_attr] if metadatas: metadata_attr = tiledb.Attr(name="metadata", dtype=np.uint8, var=True) attrs.append(metadata_attr) schema = tiledb.ArraySchema( domain=dom, sparse=True, allows_duplicates=False, attrs=attrs, ) tiledb.Array.create(docs_uri, schema) group.add(docs_uri, name=DOCUMENTS_ARRAY_NAME) group.close() @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, index_uri: str, *, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, metric: str = DEFAULT_METRIC, index_type: str = "FLAT", config: Optional[Mapping[str, Any]] = None, index_timestamp: int = 0, **kwargs: Any, ) -> TileDB: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) tiledb_vs, tiledb = dependable_tiledb_import() input_vectors = np.array(embeddings).astype(np.float32) cls.create( index_uri=index_uri, index_type=index_type, dimensions=input_vectors.shape[1], vector_type=input_vectors.dtype, metadatas=metadatas is not None, config=config, ) with tiledb.scope_ctx(ctx_or_config=config): if not embeddings: raise ValueError("embeddings must be provided to build a TileDB index") vector_index_uri = get_vector_index_uri(index_uri) docs_uri = get_documents_array_uri(index_uri) if ids is None: ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts] external_ids = np.array(ids).astype(np.uint64) tiledb_vs.ingestion.ingest( index_type=index_type, index_uri=vector_index_uri, input_vectors=input_vectors, external_ids=external_ids, index_timestamp=index_timestamp if index_timestamp != 0 else None, config=config, **kwargs, ) with tiledb.open(docs_uri, "w") as A: if external_ids is None: external_ids = np.zeros(len(texts), dtype=np.uint64) for i in range(len(texts)): external_ids[i] = i data = {} data["text"] = np.array(texts) if metadatas is not None: metadata_attr = np.empty([len(metadatas)], dtype=object) i = 0 for metadata in metadatas: metadata_attr[i] = np.frombuffer( pickle.dumps(metadata), dtype=np.uint8 ) i += 1 data["metadata"] = metadata_attr A[external_ids] = data return cls( embedding=embedding, index_uri=index_uri, metric=metric, config=config, **kwargs, ) def delete( self, ids: Optional[List[str]] = None, timestamp: int = 0, **kwargs: Any ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. timestamp: Optional timestamp to delete with. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ external_ids = np.array(ids).astype(np.uint64) self.vector_index.delete_batch( external_ids=external_ids, timestamp=timestamp if timestamp != 0 else None ) return True def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, timestamp: int = 0, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional ids of each text object. timestamp: Optional timestamp to write new texts with. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ tiledb_vs, tiledb = dependable_tiledb_import() embeddings = self.embedding.embed_documents(list(texts)) if ids is None: ids = [str(random.randint(0, MAX_UINT64 - 1)) for _ in texts] external_ids = np.array(ids).astype(np.uint64) vectors = np.empty((len(embeddings)), dtype="O") for i in range(len(embeddings)): vectors[i] = np.array(embeddings[i], dtype=np.float32) self.vector_index.update_batch( vectors=vectors, external_ids=external_ids, timestamp=timestamp if timestamp != 0 else None, ) docs = {} docs["text"] = np.array(texts) if metadatas is not None: metadata_attr = np.empty([len(metadatas)], dtype=object) i = 0 for metadata in metadatas: metadata_attr[i] = np.frombuffer(pickle.dumps(metadata), dtype=np.uint8) i += 1 docs["metadata"] = metadata_attr docs_array = tiledb.open( self.docs_array_uri, "w", timestamp=timestamp if timestamp != 0 else None, config=self.config, ) docs_array[external_ids] = docs docs_array.close() return ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, metric: str = DEFAULT_METRIC, index_uri: str = "/tmp/tiledb_array", index_type: str = "FLAT", config: Optional[Mapping[str, Any]] = None, index_timestamp: int = 0, **kwargs: Any, ) -> TileDB: """Construct a TileDB index from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. ids: Optional ids of each text object. metric: Metric to use for indexing. Defaults to "euclidean". index_uri: The URI to write the TileDB arrays index_type: Optional, Vector index type ("FLAT", IVF_FLAT") config: Optional, TileDB config index_timestamp: Optional, timestamp to write new texts with. Example: .. code-block:: python from langchain import TileDB from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = TileDB.from_texts(texts, embeddings) """ embeddings = [] embeddings = embedding.embed_documents(texts) return cls.__from( texts=texts, embeddings=embeddings, embedding=embedding, metadatas=metadatas, ids=ids, metric=metric, index_uri=index_uri, index_type=index_type, config=config, index_timestamp=index_timestamp, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, index_uri: str, *, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, metric: str = DEFAULT_METRIC, index_type: str = "FLAT", config: Optional[Mapping[str, Any]] = None, index_timestamp: int = 0, **kwargs: Any, ) -> TileDB: """Construct TileDB index from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. index_uri: The URI to write the TileDB arrays metadatas: List of metadata dictionaries to associate with documents. metric: Optional, Metric to use for indexing. Defaults to "euclidean". index_type: Optional, Vector index type ("FLAT", IVF_FLAT") config: Optional, TileDB config index_timestamp: Optional, timestamp to write new texts with. Example: .. code-block:: python from langchain import TileDB from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = TileDB.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts=texts, embeddings=embeddings, embedding=embedding, metadatas=metadatas, ids=ids, metric=metric, index_uri=index_uri, index_type=index_type, config=config, index_timestamp=index_timestamp, **kwargs, ) @classmethod def load( cls, index_uri: str, embedding: Embeddings, *, metric: str = DEFAULT_METRIC, config: Optional[Mapping[str, Any]] = None, timestamp: Any = None, **kwargs: Any, ) -> TileDB: """Load a TileDB index from a URI. Args: index_uri: The URI of the TileDB vector index. embedding: Embeddings to use when generating queries. metric: Optional, Metric to use for indexing. Defaults to "euclidean". config: Optional, TileDB config timestamp: Optional, timestamp to use for opening the arrays. """ return cls( embedding=embedding, index_uri=index_uri, metric=metric, config=config, timestamp=timestamp, **kwargs, ) def consolidate_updates(self, **kwargs: Any) -> None: self.vector_index = self.vector_index.consolidate_updates(**kwargs)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~rocksetdb.py
from datetime import datetime from time import sleep from typing import Any, Callable, List, Union from uuid import uuid4 from langchain_core.schema import BaseChatMessageHistory from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) class RocksetChatMessageHistory(BaseChatMessageHistory): """Uses Rockset to store chat messages. To use, ensure that the `rockset` python package installed. Example: .. code-block:: python from langchain.memory.chat_message_histories import ( RocksetChatMessageHistory ) from rockset import RocksetClient history = RocksetChatMessageHistory( session_id="MySession", client=RocksetClient(), collection="langchain_demo", sync=True ) history.add_user_message("hi!") history.add_ai_message("whats up?") print(history.messages) """ # You should set these values based on your VI. # These values are configured for the typical # free VI. Read more about VIs here: # https://rockset.com/docs/instances SLEEP_INTERVAL_MS: int = 5 ADD_TIMEOUT_MS: int = 5000 CREATE_TIMEOUT_MS: int = 20000 def _wait_until(self, method: Callable, timeout: int, **method_params: Any) -> None: """Sleeps until meth() evaluates to true. Passes kwargs into meth. """ start = datetime.now() while not method(**method_params): curr = datetime.now() if (curr - start).total_seconds() * 1000 > timeout: raise TimeoutError(f"{method} timed out at {timeout} ms") sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000) def _query(self, query: str, **query_params: Any) -> List[Any]: """Executes an SQL statement and returns the result Args: - query: The SQL string - **query_params: Parameters to pass into the query """ return self.client.sql(query, params=query_params).results def _create_collection(self) -> None: """Creates a collection for this message history""" self.client.Collections.create_s3_collection( name=self.collection, workspace=self.workspace ) def _collection_exists(self) -> bool: """Checks whether a collection exists for this message history""" try: self.client.Collections.get(collection=self.collection) except self.rockset.exceptions.NotFoundException: return False return True def _collection_is_ready(self) -> bool: """Checks whether the collection for this message history is ready to be queried """ return ( self.client.Collections.get(collection=self.collection).data.status == "READY" ) def _document_exists(self) -> bool: return ( len( self._query( f""" SELECT 1 FROM {self.location} WHERE _id=:session_id LIMIT 1 """, session_id=self.session_id, ) ) != 0 ) def _wait_until_collection_created(self) -> None: """Sleeps until the collection for this message history is ready to be queried """ self._wait_until( lambda: self._collection_is_ready(), RocksetChatMessageHistory.CREATE_TIMEOUT_MS, ) def _wait_until_message_added(self, message_id: str) -> None: """Sleeps until a message is added to the messages list""" self._wait_until( lambda message_id: len( self._query( f""" SELECT * FROM UNNEST(( SELECT {self.messages_key} FROM {self.location} WHERE _id = :session_id )) AS message WHERE message.data.additional_kwargs.id = :message_id LIMIT 1 """, session_id=self.session_id, message_id=message_id, ), ) != 0, RocksetChatMessageHistory.ADD_TIMEOUT_MS, message_id=message_id, ) def _create_empty_doc(self) -> None: """Creates or replaces a document for this message history with no messages""" self.client.Documents.add_documents( collection=self.collection, workspace=self.workspace, data=[{"_id": self.session_id, self.messages_key: []}], ) def __init__( self, session_id: str, client: Any, collection: str, workspace: str = "commons", messages_key: str = "messages", sync: bool = False, message_uuid_method: Callable[[], Union[str, int]] = lambda: str(uuid4()), ) -> None: """Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs. """ try: import rockset except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, rockset.RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) self.session_id = session_id self.client = client self.collection = collection self.workspace = workspace self.location = f'"{self.workspace}"."{self.collection}"' self.rockset = rockset self.messages_key = messages_key self.message_uuid_method = message_uuid_method self.sync = sync try: self.client.set_application("langchain") except AttributeError: # ignore pass if not self._collection_exists(): self._create_collection() self._wait_until_collection_created() self._create_empty_doc() elif not self._document_exists(): self._create_empty_doc() @property def messages(self) -> List[BaseMessage]: # type: ignore """Messages in this chat history.""" return messages_from_dict( self._query( f""" SELECT * FROM UNNEST (( SELECT "{self.messages_key}" FROM {self.location} WHERE _id = :session_id )) """, session_id=self.session_id, ) ) def add_message(self, message: BaseMessage) -> None: """Add a Message object to the history. Args: message: A BaseMessage object to store. """ if self.sync and "id" not in message.additional_kwargs: message.additional_kwargs["id"] = self.message_uuid_method() self.client.Documents.patch_documents( collection=self.collection, workspace=self.workspace, data=[ self.rockset.model.patch_document.PatchDocument( id=self.session_id, patch=[ self.rockset.model.patch_operation.PatchOperation( op="ADD", path=f"/{self.messages_key}/-", value=_message_to_dict(message), ) ], ) ], ) if self.sync: self._wait_until_message_added(message.additional_kwargs["id"]) def clear(self) -> None: """Removes all messages from the chat history""" self._create_empty_doc() if self.sync: self._wait_until( lambda: not self.messages, RocksetChatMessageHistory.ADD_TIMEOUT_MS, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~token_buffer.py
from typing import Any, Dict, List from langchain_core.schema.language_model import BaseLanguageModel from langchain_core.schema.messages import BaseMessage, get_buffer_string from langchain.memory.chat_memory import BaseChatMemory class ConversationTokenBufferMemory(BaseChatMemory): """Conversation chat memory with token limit.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is False.""" return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is True.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~chat_models~test_vertexai.py
"""Test Vertex AI API wrapper. In order to run this test, you need to install VertexAI SDK (that is is the private preview) and be whitelisted to list the models themselves: In order to run this test, you need to install VertexAI SDK pip install google-cloud-aiplatform>=1.35.0 Your end-user credentials would be used to make the calls (make sure you've run `gcloud auth login` first). """ from typing import Optional from unittest.mock import MagicMock, Mock, patch import pytest from langchain_core.schema import LLMResult from langchain_core.schema.messages import AIMessage, HumanMessage, SystemMessage from langchain.chat_models import ChatVertexAI from langchain.chat_models.vertexai import _parse_chat_history, _parse_examples @pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) def test_vertexai_instantiation(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == "vertexai" assert model.model_name == model.client._model_id @pytest.mark.scheduled @pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) def test_vertexai_single_call(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() message = HumanMessage(content="Hello") response = model([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_candidates() -> None: model = ChatVertexAI(model_name="chat-bison@001", temperature=0.3, n=2) message = HumanMessage(content="Hello") response = model.generate(messages=[[message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 1 assert len(response.generations[0]) == 2 @pytest.mark.scheduled @pytest.mark.asyncio async def test_vertexai_agenerate() -> None: model = ChatVertexAI(temperature=0) message = HumanMessage(content="Hello") response = await model.agenerate([[message]]) assert isinstance(response, LLMResult) assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore sync_response = model.generate([[message]]) assert response.generations[0][0] == sync_response.generations[0][0] @pytest.mark.scheduled def test_vertexai_single_call_with_context() -> None: model = ChatVertexAI() raw_context = ( "My name is Ned. You are my personal assistant. My favorite movies " "are Lord of the Rings and Hobbit." ) question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @pytest.mark.scheduled def test_vertexai_single_call_with_examples() -> None: model = ChatVertexAI() raw_context = "My name is Ned. You are my personal assistant." question = "2+2" text_question, text_answer = "4+4", "8" inp = HumanMessage(content=text_question) output = AIMessage(content=text_answer) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message], examples=[inp, output]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @pytest.mark.scheduled @pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) def test_vertexai_single_call_with_history(model_name: str) -> None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() text_question1, text_answer1 = "How much is 2+2?", "4" text_question2 = "How much is 3+3?" message1 = HumanMessage(content=text_question1) message2 = AIMessage(content=text_answer1) message3 = HumanMessage(content=text_question2) response = model([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_parse_chat_history_correct() -> None: from vertexai.language_models import ChatMessage text_context = ( "My name is Ned. You are my personal assistant. My " "favorite movies are Lord of the Rings and Hobbit." ) context = SystemMessage(content=text_context) text_question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) question = HumanMessage(content=text_question) text_answer = ( "Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring " "(2001): This is the first movie in the Lord of the Rings trilogy." ) answer = AIMessage(content=text_answer) history = _parse_chat_history([context, question, answer, question, answer]) assert history.context == context.content assert len(history.history) == 4 assert history.history == [ ChatMessage(content=text_question, author="user"), ChatMessage(content=text_answer, author="bot"), ChatMessage(content=text_question, author="user"), ChatMessage(content=text_answer, author="bot"), ] def test_vertexai_single_call_fails_no_message() -> None: chat = ChatVertexAI() with pytest.raises(ValueError) as exc_info: _ = chat([]) assert ( str(exc_info.value) == "You should provide at least one message to start the chat!" ) @pytest.mark.parametrize("stop", [None, "stop1"]) def test_vertexai_args_passed(stop: Optional[str]) -> None: response_text = "Goodbye" user_prompt = "Hello" prompt_params = { "max_output_tokens": 1, "temperature": 10000.0, "top_k": 10, "top_p": 0.5, } # Mock the library to ensure the args are passed correctly with patch( "vertexai.language_models._language_models.ChatModel.start_chat" ) as start_chat: mock_response = MagicMock() mock_response.candidates = [Mock(text=response_text)] mock_chat = MagicMock() start_chat.return_value = mock_chat mock_send_message = MagicMock(return_value=mock_response) mock_chat.send_message = mock_send_message model = ChatVertexAI(**prompt_params) message = HumanMessage(content=user_prompt) if stop: response = model([message], stop=[stop]) else: response = model([message]) assert response.content == response_text mock_send_message.assert_called_once_with(user_prompt, candidate_count=1) expected_stop_sequence = [stop] if stop else None start_chat.assert_called_once_with( context=None, message_history=[], **prompt_params, stop_sequences=expected_stop_sequence, ) def test_parse_examples_correct() -> None: from vertexai.language_models import InputOutputTextPair text_question = ( "Hello, could you recommend a good movie for me to watch this evening, please?" ) question = HumanMessage(content=text_question) text_answer = ( "Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring " "(2001): This is the first movie in the Lord of the Rings trilogy." ) answer = AIMessage(content=text_answer) examples = _parse_examples([question, answer, question, answer]) assert len(examples) == 2 assert examples == [ InputOutputTextPair(input_text=text_question, output_text=text_answer), InputOutputTextPair(input_text=text_question, output_text=text_answer), ] def test_parse_examples_failes_wrong_sequence() -> None: with pytest.raises(ValueError) as exc_info: _ = _parse_examples([AIMessage(content="a")]) print(str(exc_info.value)) assert ( str(exc_info.value) == "Expect examples to have an even amount of messages, got 1." )
[ "{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}", "a", "My name is Ned. You are my personal assistant.", "2+2", "Hello", "How much is 3+3?" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~google_vertex_ai_search.py
"""Retriever wrapper for Google Vertex AI Search.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.utilities.vertexai import get_client_info from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from google.api_core.client_options import ClientOptions from google.cloud.discoveryengine_v1beta import ( ConversationalSearchServiceClient, SearchRequest, SearchResult, SearchServiceClient, ) class _BaseGoogleVertexAISearchRetriever(BaseModel): project_id: str """Google Cloud Project ID.""" data_store_id: str """Vertex AI Search data store ID.""" location_id: str = "global" """Vertex AI Search data store location.""" serving_config_id: str = "default_config" """Vertex AI Search serving config ID.""" credentials: Any = None """The default custom credentials (google.auth.credentials.Credentials) to use when making API calls. If not provided, credentials will be ascertained from the environment.""" engine_data_type: int = Field(default=0, ge=0, le=2) """ Defines the Vertex AI Search data type 0 - Unstructured data 1 - Structured data 2 - Website data """ @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validates the environment.""" try: from google.cloud import discoveryengine_v1beta # noqa: F401 except ImportError as exc: raise ImportError( "google.cloud.discoveryengine is not installed." "Please install it with pip install " "google-cloud-discoveryengine>=0.11.0" ) from exc try: from google.api_core.exceptions import InvalidArgument # noqa: F401 except ImportError as exc: raise ImportError( "google.api_core.exceptions is not installed. " "Please install it with pip install google-api-core" ) from exc values["project_id"] = get_from_dict_or_env(values, "project_id", "PROJECT_ID") try: # For backwards compatibility search_engine_id = get_from_dict_or_env( values, "search_engine_id", "SEARCH_ENGINE_ID" ) if search_engine_id: import warnings warnings.warn( "The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.", # noqa: E501 DeprecationWarning, ) values["data_store_id"] = search_engine_id except: # noqa: E722 pass values["data_store_id"] = get_from_dict_or_env( values, "data_store_id", "DATA_STORE_ID" ) return values @property def client_options(self) -> "ClientOptions": from google.api_core.client_options import ClientOptions return ClientOptions( api_endpoint=f"{self.location_id}-discoveryengine.googleapis.com" if self.location_id != "global" else None ) def _convert_structured_search_response( self, results: Sequence[SearchResult] ) -> List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" import json from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict( result.document._pb, preserving_proto_field_name=True ) documents.append( Document( page_content=json.dumps(document_dict.get("struct_data", {})), metadata={"id": document_dict["id"], "name": document_dict["name"]}, ) ) return documents def _convert_unstructured_search_response( self, results: Sequence[SearchResult], chunk_type: str ) -> List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict( result.document._pb, preserving_proto_field_name=True ) derived_struct_data = document_dict.get("derived_struct_data") if not derived_struct_data: continue doc_metadata = document_dict.get("struct_data", {}) doc_metadata["id"] = document_dict["id"] if chunk_type not in derived_struct_data: continue for chunk in derived_struct_data[chunk_type]: doc_metadata["source"] = derived_struct_data.get("link", "") if chunk_type == "extractive_answers": doc_metadata["source"] += f":{chunk.get('pageNumber', '')}" documents.append( Document( page_content=chunk.get("content", ""), metadata=doc_metadata ) ) return documents def _convert_website_search_response( self, results: Sequence[SearchResult], chunk_type: str ) -> List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" from google.protobuf.json_format import MessageToDict documents: List[Document] = [] chunk_type = "extractive_answers" for result in results: document_dict = MessageToDict( result.document._pb, preserving_proto_field_name=True ) derived_struct_data = document_dict.get("derived_struct_data") if not derived_struct_data: continue doc_metadata = document_dict.get("struct_data", {}) doc_metadata["id"] = document_dict["id"] doc_metadata["source"] = derived_struct_data.get("link", "") if chunk_type not in derived_struct_data: continue text_field = "snippet" if chunk_type == "snippets" else "content" for chunk in derived_struct_data[chunk_type]: documents.append( Document( page_content=chunk.get(text_field, ""), metadata=doc_metadata ) ) if not documents: print(f"No {chunk_type} could be found.") if chunk_type == "extractive_answers": print( "Make sure that your data store is using Advanced Website " "Indexing.\n" "https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501 ) return documents class GoogleVertexAISearchRetriever(BaseRetriever, _BaseGoogleVertexAISearchRetriever): """`Google Vertex AI Search` retriever. For a detailed explanation of the Vertex AI Search concepts and configuration parameters, refer to the product documentation. https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction """ filter: Optional[str] = None """Filter expression.""" get_extractive_answers: bool = False """If True return Extractive Answers, otherwise return Extractive Segments or Snippets.""" # noqa: E501 max_documents: int = Field(default=5, ge=1, le=100) """The maximum number of documents to return.""" max_extractive_answer_count: int = Field(default=1, ge=1, le=5) """The maximum number of extractive answers returned in each search result. At most 5 answers will be returned for each SearchResult. """ max_extractive_segment_count: int = Field(default=1, ge=1, le=1) """The maximum number of extractive segments returned in each search result. Currently one segment will be returned for each SearchResult. """ query_expansion_condition: int = Field(default=1, ge=0, le=2) """Specification to determine under which conditions query expansion should occur. 0 - Unspecified query expansion condition. In this case, server behavior defaults to disabled 1 - Disabled query expansion. Only the exact search query is used, even if SearchResponse.total_size is zero. 2 - Automatic query expansion built by the Search API. """ spell_correction_mode: int = Field(default=2, ge=0, le=2) """Specification to determine under which conditions query expansion should occur. 0 - Unspecified spell correction mode. In this case, server behavior defaults to auto. 1 - Suggestion only. Search API will try to find a spell suggestion if there is any and put in the `SearchResponse.corrected_query`. The spell suggestion will not be used as the search query. 2 - Automatic spell correction built by the Search API. Search will be based on the corrected query if found. """ _client: SearchServiceClient _serving_config: str class Config: """Configuration for this pydantic object.""" extra = Extra.ignore arbitrary_types_allowed = True underscore_attrs_are_private = True def __init__(self, **kwargs: Any) -> None: """Initializes private fields.""" try: from google.cloud.discoveryengine_v1beta import SearchServiceClient except ImportError as exc: raise ImportError( "google.cloud.discoveryengine is not installed." "Please install it with pip install google-cloud-discoveryengine" ) from exc super().__init__(**kwargs) # For more information, refer to: # https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store self._client = SearchServiceClient( credentials=self.credentials, client_options=self.client_options, client_info=get_client_info(module="vertex-ai-search"), ) self._serving_config = self._client.serving_config_path( project=self.project_id, location=self.location_id, data_store=self.data_store_id, serving_config=self.serving_config_id, ) def _create_search_request(self, query: str) -> SearchRequest: """Prepares a SearchRequest object.""" from google.cloud.discoveryengine_v1beta import SearchRequest query_expansion_spec = SearchRequest.QueryExpansionSpec( condition=self.query_expansion_condition, ) spell_correction_spec = SearchRequest.SpellCorrectionSpec( mode=self.spell_correction_mode ) if self.engine_data_type == 0: if self.get_extractive_answers: extractive_content_spec = ( SearchRequest.ContentSearchSpec.ExtractiveContentSpec( max_extractive_answer_count=self.max_extractive_answer_count, ) ) else: extractive_content_spec = ( SearchRequest.ContentSearchSpec.ExtractiveContentSpec( max_extractive_segment_count=self.max_extractive_segment_count, ) ) content_search_spec = SearchRequest.ContentSearchSpec( extractive_content_spec=extractive_content_spec ) elif self.engine_data_type == 1: content_search_spec = None elif self.engine_data_type == 2: content_search_spec = SearchRequest.ContentSearchSpec( extractive_content_spec=SearchRequest.ContentSearchSpec.ExtractiveContentSpec( max_extractive_answer_count=self.max_extractive_answer_count, ), snippet_spec=SearchRequest.ContentSearchSpec.SnippetSpec( return_snippet=True ), ) else: raise NotImplementedError( "Only data store type 0 (Unstructured), 1 (Structured)," "or 2 (Website) are supported currently." + f" Got {self.engine_data_type}" ) return SearchRequest( query=query, filter=self.filter, serving_config=self._serving_config, page_size=self.max_documents, content_search_spec=content_search_spec, query_expansion_spec=query_expansion_spec, spell_correction_spec=spell_correction_spec, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" from google.api_core.exceptions import InvalidArgument search_request = self._create_search_request(query) try: response = self._client.search(search_request) except InvalidArgument as exc: raise type(exc)( exc.message + " This might be due to engine_data_type not set correctly." ) if self.engine_data_type == 0: chunk_type = ( "extractive_answers" if self.get_extractive_answers else "extractive_segments" ) documents = self._convert_unstructured_search_response( response.results, chunk_type ) elif self.engine_data_type == 1: documents = self._convert_structured_search_response(response.results) elif self.engine_data_type == 2: chunk_type = ( "extractive_answers" if self.get_extractive_answers else "snippets" ) documents = self._convert_website_search_response( response.results, chunk_type ) else: raise NotImplementedError( "Only data store type 0 (Unstructured), 1 (Structured)," "or 2 (Website) are supported currently." + f" Got {self.engine_data_type}" ) return documents class GoogleVertexAIMultiTurnSearchRetriever( BaseRetriever, _BaseGoogleVertexAISearchRetriever ): """`Google Vertex AI Search` retriever for multi-turn conversations.""" conversation_id: str = "-" """Vertex AI Search Conversation ID.""" _client: ConversationalSearchServiceClient _serving_config: str class Config: """Configuration for this pydantic object.""" extra = Extra.ignore arbitrary_types_allowed = True underscore_attrs_are_private = True def __init__(self, **kwargs: Any): super().__init__(**kwargs) from google.cloud.discoveryengine_v1beta import ( ConversationalSearchServiceClient, ) self._client = ConversationalSearchServiceClient( credentials=self.credentials, client_options=self.client_options, client_info=get_client_info(module="vertex-ai-search"), ) self._serving_config = self._client.serving_config_path( project=self.project_id, location=self.location_id, data_store=self.data_store_id, serving_config=self.serving_config_id, ) if self.engine_data_type == 1: raise NotImplementedError( "Data store type 1 (Structured)" "is not currently supported for multi-turn search." + f" Got {self.engine_data_type}" ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" from google.cloud.discoveryengine_v1beta import ( ConverseConversationRequest, TextInput, ) request = ConverseConversationRequest( name=self._client.conversation_path( self.project_id, self.location_id, self.data_store_id, self.conversation_id, ), serving_config=self._serving_config, query=TextInput(input=query), ) response = self._client.converse_conversation(request) if self.engine_data_type == 2: return self._convert_website_search_response( response.search_results, "extractive_answers" ) return self._convert_unstructured_search_response( response.search_results, "extractive_answers" ) class GoogleCloudEnterpriseSearchRetriever(GoogleVertexAISearchRetriever): """`Google Vertex Search API` retriever alias for backwards compatibility. DEPRECATED: Use `GoogleVertexAISearchRetriever` instead. """ def __init__(self, **data: Any): import warnings warnings.warn( "GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501 DeprecationWarning, ) super().__init__(**data)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~initialize.py
"""Load agent.""" from typing import Any, Optional, Sequence from langchain_core.schema.language_model import BaseLanguageModel from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent from langchain.callbacks.base import BaseCallbackManager from langchain.tools.base import BaseTool def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, *, tags: Optional[Sequence[str]] = None, **kwargs: Any, ) -> AgentExecutor: """Load an agent executor given tools and LLM. Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. agent: Agent type to use. If None and agent_path is also None, will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. agent_kwargs: Additional keyword arguments to pass to the underlying agent tags: Tags to apply to the traced runs. **kwargs: Additional keyword arguments passed to the agent executor Returns: An agent executor """ tags_ = list(tags) if tags else [] if agent is None and agent_path is None: agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " "but at most only one should be." ) if agent is not None: if agent not in AGENT_TO_CLASS: raise ValueError( f"Got unknown agent type: {agent}. " f"Valid types are: {AGENT_TO_CLASS.keys()}." ) tags_.append(agent.value if isinstance(agent, AgentType) else agent) agent_cls = AGENT_TO_CLASS[agent] agent_kwargs = agent_kwargs or {} agent_obj = agent_cls.from_llm_and_tools( llm, tools, callback_manager=callback_manager, **agent_kwargs ) elif agent_path is not None: agent_obj = load_agent( agent_path, llm=llm, tools=tools, callback_manager=callback_manager ) try: # TODO: Add tags from the serialized object directly. tags_.append(agent_obj._agent_type) except NotImplementedError: pass else: raise ValueError( "Somehow both `agent` and `agent_path` are None, " "this should never happen." ) return AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, callback_manager=callback_manager, tags=tags_, **kwargs, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_loaders~sitemap.py
import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional, Tuple from urllib.parse import urlparse from langchain_core.schema import Document from langchain.document_loaders.web_base import WebBaseLoader def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item def _extract_scheme_and_domain(url: str) -> Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc class SitemapLoader(WebBaseLoader): """Load a sitemap and its URLs. **Security Note**: This loader can be used to load all URLs specified in a sitemap. If a malicious actor gets access to the sitemap, they could force the server to load URLs from other domains by modifying the sitemap. This could lead to server-side request forgery (SSRF) attacks; e.g., with the attacker forcing the server to load URLs from internal service endpoints that are not publicly accessible. While the attacker may not immediately gain access to this data, this data could leak into downstream systems (e.g., data loader is used to load data for indexing). This loader is a crawler and web crawlers should generally NOT be deployed with network access to any internal servers. Control access to who can submit crawling requests and what network access the crawler has. By default, the loader will only load URLs from the same domain as the sitemap if the site map is not a local file. This can be disabled by setting restrict_to_same_domain to False (not recommended). If the site map is a local file, no such risk mitigation is applied by default. Use the filter URLs argument to limit which URLs can be loaded. See https://python.langchain.com/docs/security """ def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, continue_on_failure: bool = False, restrict_to_same_domain: bool = True, **kwargs: Any, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: a list of regexes. If specified, only URLS that match one of the filter URLs will be loaded. *WARNING* The filter URLs are interpreted as regular expressions. Remember to escape special characters if you do not want them to be interpreted as regular expression syntax. For example, `.` appears frequently in URLs and should be escaped if you want to match a literal `.` rather than any character. restrict_to_same_domain takes precedence over filter_urls when restrict_to_same_domain is True and the sitemap is not a local file. parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False restrict_to_same_domain: whether to restrict loading to URLs to the same domain as the sitemap. Attention: This is only applied if the sitemap is not a local file! """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with `pip install lxml`" ) super().__init__(web_paths=[web_path], **kwargs) # Define a list of URL patterns (interpreted as regular expressions) that # will be allowed to be loaded. # restrict_to_same_domain takes precedence over filter_urls when # restrict_to_same_domain is True and the sitemap is not a local file. self.allow_url_patterns = filter_urls self.restrict_to_same_domain = restrict_to_same_domain self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local self.continue_on_failure = continue_on_failure def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts. Args: soup: BeautifulSoup object. Returns: List of dicts. """ els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.restrict_to_same_domain and not self.is_local: if _extract_scheme_and_domain(loc_text) != _extract_scheme_and_domain( self.web_path ): continue if self.allow_url_patterns and not any( re.match(regexp_pattern, loc_text) for regexp_pattern in self.allow_url_patterns ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self._scrape(self.web_path, parser="xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_loaders~generic.py
from __future__ import annotations from pathlib import Path from typing import Iterator, List, Literal, Optional, Sequence, Union from langchain_core.schema import Document from langchain.document_loaders.base import BaseBlobParser, BaseLoader from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader from langchain.document_loaders.parsers.registry import get_parser from langchain.text_splitter import TextSplitter _PathLike = Union[str, Path] DEFAULT = Literal["default"] class GenericLoader(BaseLoader): """Generic Document Loader. A generic document loader that allows combining an arbitrary blob loader with a blob parser. Examples: .. code-block:: python from langchain.document_loaders import GenericLoader from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = GenericLoader.from_filesystem( path="path/to/directory", glob="**/[!.]*", suffixes=[".pdf"], show_progress=True, ) docs = loader.lazy_load() next(docs) Example instantiations to change which files are loaded: .. code-block:: python # Recursively load all text files in a directory. loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*") # Load all files in a directory without recursion. loader = GenericLoader.from_filesystem("/path/to/dir", glob="*") Example instantiations to change which parser is used: .. code-block:: python from langchain.document_loaders.parsers.pdf import PyPDFParser # Recursively load all text files in a directory. loader = GenericLoader.from_filesystem( "/path/to/dir", glob="**/*.pdf", parser=PyPDFParser() ) """ def __init__( self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, ) -> None: """A generic document loader. Args: blob_loader: A blob loader which knows how to yield blobs blob_parser: A blob parser which knows how to parse blobs into documents """ self.blob_loader = blob_loader self.blob_parser = blob_parser def lazy_load( self, ) -> Iterator[Document]: """Load documents lazily. Use this when working at a large scale.""" for blob in self.blob_loader.yield_blobs(): yield from self.blob_parser.lazy_parse(blob) def load(self) -> List[Document]: """Load all documents.""" return list(self.lazy_load()) def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: """Load all documents and split them into sentences.""" raise NotImplementedError( "Loading and splitting is not yet implemented for generic loaders. " "When they will be implemented they will be added via the initializer. " "This method should not be used going forward." ) @classmethod def from_filesystem( cls, path: _PathLike, *, glob: str = "**/[!.]*", exclude: Sequence[str] = (), suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, parser: Union[DEFAULT, BaseBlobParser] = "default", ) -> GenericLoader: """Create a generic document loader using a filesystem blob loader. Args: path: The path to the directory to load documents from. glob: The glob pattern to use to find documents. suffixes: The suffixes to use to filter documents. If None, all files matching the glob will be loaded. exclude: A list of patterns to exclude from the loader. show_progress: Whether to show a progress bar or not (requires tqdm). Proxies to the file system loader. parser: A blob parser which knows how to parse blobs into documents Returns: A generic document loader. """ blob_loader = FileSystemBlobLoader( path, glob=glob, exclude=exclude, suffixes=suffixes, show_progress=show_progress, ) if isinstance(parser, str): blob_parser = get_parser(parser) else: blob_parser = parser return cls(blob_loader, blob_parser)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~baidu_qianfan_endpoint.py
from __future__ import annotations import logging from typing import ( Any, AsyncIterator, Dict, Iterator, List, Optional, ) from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema.output import GenerationChunk from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class QianfanLLMEndpoint(LLM): """Baidu Qianfan hosted open source or customized models. To use, you should have the ``qianfan`` python package installed, and the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your API key and Secret Key. ak, sk are required parameters which you could get from https://cloud.baidu.com/product/wenxinworkshop Example: .. code-block:: python from langchain.llms import QianfanLLMEndpoint qianfan_model = QianfanLLMEndpoint(model="ERNIE-Bot", endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk") """ model_kwargs: Dict[str, Any] = Field(default_factory=dict) client: Any qianfan_ak: Optional[str] = None qianfan_sk: Optional[str] = None streaming: Optional[bool] = False """Whether to stream the results or not.""" model: str = "ERNIE-Bot-turbo" """Model name. you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu preset models are mapping to an endpoint. `model` will be ignored if `endpoint` is set """ endpoint: Optional[str] = None """Endpoint of the Qianfan LLM, required if custom model used.""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 """Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo. In the case of other model, passing these params will not affect the result. """ @root_validator() def validate_enviroment(cls, values: Dict) -> Dict: values["qianfan_ak"] = get_from_dict_or_env( values, "qianfan_ak", "QIANFAN_AK", ) values["qianfan_sk"] = get_from_dict_or_env( values, "qianfan_sk", "QIANFAN_SK", ) params = { "ak": values["qianfan_ak"], "sk": values["qianfan_sk"], "model": values["model"], } if values["endpoint"] is not None and values["endpoint"] != "": params["endpoint"] = values["endpoint"] try: import qianfan values["client"] = qianfan.Completion(**params) except ImportError: raise ImportError( "qianfan package not found, please install it with " "`pip install qianfan`" ) return values @property def _identifying_params(self) -> Dict[str, Any]: return { **{"endpoint": self.endpoint, "model": self.model}, **super()._identifying_params, } @property def _llm_type(self) -> str: """Return type of llm.""" return "baidu-qianfan-endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Qianfan API.""" normal_params = { "model": self.model, "endpoint": self.endpoint, "stream": self.streaming, "request_timeout": self.request_timeout, "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, } return {**normal_params, **self.model_kwargs} def _convert_prompt_msg_params( self, prompt: str, **kwargs: Any, ) -> dict: if "streaming" in kwargs: kwargs["stream"] = kwargs.pop("streaming") return { **{"prompt": prompt, "model": self.model}, **self._default_params, **kwargs, } def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to an qianfan models endpoint for each generation with a prompt. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = qianfan_model("Tell me a joke.") """ if self.streaming: completion = "" for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response_payload = self.client.do(**params) return response_payload["result"] async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.streaming: completion = "" async for chunk in self._astream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response_payload = await self.client.ado(**params) return response_payload["result"] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True}) for res in self.client.do(**params): if res: chunk = GenerationChunk(text=res["result"]) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True}) async for res in await self.client.ado(**params): if res: chunk = GenerationChunk(text=res["result"]) yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~document_compressors~embeddings_filter.py
from typing import Callable, Dict, Optional, Sequence import numpy as np from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import Document from langchain_core.schema.embeddings import Embeddings from langchain.callbacks.manager import Callbacks from langchain.document_transformers.embeddings_redundant_filter import ( _get_embeddings_from_stateful_docs, get_stateful_documents, ) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.utils.math import cosine_similarity class EmbeddingsFilter(BaseDocumentCompressor): """Document compressor that uses embeddings to drop documents unrelated to the query.""" embeddings: Embeddings """Embeddings to use for embedding document contents and queries.""" similarity_fn: Callable = cosine_similarity """Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity.""" k: Optional[int] = 20 """The number of relevant documents to return. Can be set to None, in which case `similarity_threshold` must be specified. Defaults to 20.""" similarity_threshold: Optional[float] """Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if `k` is set to None.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate similarity parameters.""" if values["k"] is None and values["similarity_threshold"] is None: raise ValueError("Must specify one of `k` or `similarity_threshold`.") return values def compress_documents( self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks] = None, ) -> Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][: self.k] if self.similarity_threshold is not None: similar_enough = np.where( similarity[included_idxs] > self.similarity_threshold ) included_idxs = included_idxs[similar_enough] for i in included_idxs: stateful_documents[i].state["query_similarity_score"] = similarity[i] return [stateful_documents[i] for i in included_idxs]
[]