date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~combine_documents~refine.py
"""Combine documents by doing a first pass and then refining on more documents.""" from __future__ import annotations from typing import Any, Dict, List, Tuple from langchain_core.prompts.prompt import PromptTemplate from langchain_core.pydantic_v1 import Extra, Field, root_validator from langchain_core.schema import BasePromptTemplate, format_document from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( BaseCombineDocumentsChain, ) from langchain.chains.llm import LLMChain from langchain.docstore.document import Document def _get_default_document_prompt() -> PromptTemplate: return PromptTemplate(input_variables=["page_content"], template="{page_content}") class RefineDocumentsChain(BaseCombineDocumentsChain): """Combine documents by doing a first pass and then refining on more documents. This algorithm first calls `initial_llm_chain` on the first document, passing that first document in with the variable name `document_variable_name`, and produces a new variable with the variable name `initial_response_name`. Then, it loops over every remaining document. This is called the "refine" step. It calls `refine_llm_chain`, passing in that document with the variable name `document_variable_name` as well as the previous response with the variable name `initial_response_name`. Example: .. code-block:: python from langchain.chains import RefineDocumentsChain, LLMChain from langchain_core.prompts import PromptTemplate from langchain.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more # details. document_prompt = PromptTemplate( input_variables=["page_content"], template="{page_content}" ) document_variable_name = "context" llm = OpenAI() # The prompt here should take as an input variable the # `document_variable_name` prompt = PromptTemplate.from_template( "Summarize this content: {context}" ) initial_llm_chain = LLMChain(llm=llm, prompt=prompt) initial_response_name = "prev_response" # The prompt here should take as an input variable the # `document_variable_name` as well as `initial_response_name` prompt_refine = PromptTemplate.from_template( "Here's your first summary: {prev_response}. " "Now add to it based on the following context: {context}" ) refine_llm_chain = LLMChain(llm=llm, prompt=prompt_refine) chain = RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, initial_response_name=initial_response_name, ) """ initial_llm_chain: LLMChain """LLM chain to use on initial document.""" refine_llm_chain: LLMChain """LLM chain to use when refining.""" document_variable_name: str """The variable name in the initial_llm_chain to put the documents in. If only one variable in the initial_llm_chain, this need not be provided.""" initial_response_name: str """The variable name to format the initial response in when refining.""" document_prompt: BasePromptTemplate = Field( default_factory=_get_default_document_prompt ) """Prompt to use to format each document, gets passed to `format_document`.""" return_intermediate_steps: bool = False """Return the results of the refine steps in the output.""" @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"] return _output_keys class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_refine_steps" in values: values["return_intermediate_steps"] = values["return_refine_steps"] del values["return_refine_steps"] return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["initial_llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["initial_llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine by mapping first chain over all, then stuffing into final chain. Args: docs: List of documents to combine callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._construct_initial_inputs(docs, **kwargs) res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs) refine_steps = [res] for doc in docs[1:]: base_inputs = self._construct_refine_inputs(doc, res) inputs = {**base_inputs, **kwargs} res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs) refine_steps.append(res) return self._construct_result(refine_steps, res) async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Async combine by mapping a first chain over all, then stuffing into a final chain. Args: docs: List of documents to combine callbacks: Callbacks to be passed through **kwargs: additional parameters to be passed to LLM calls (like other input variables besides the documents) Returns: The first element returned is the single string output. The second element returned is a dictionary of other keys to return. """ inputs = self._construct_initial_inputs(docs, **kwargs) res = await self.initial_llm_chain.apredict(callbacks=callbacks, **inputs) refine_steps = [res] for doc in docs[1:]: base_inputs = self._construct_refine_inputs(doc, res) inputs = {**base_inputs, **kwargs} res = await self.refine_llm_chain.apredict(callbacks=callbacks, **inputs) refine_steps.append(res) return self._construct_result(refine_steps, res) def _construct_result(self, refine_steps: List[str], res: str) -> Tuple[str, dict]: if self.return_intermediate_steps: extra_return_dict = {"intermediate_steps": refine_steps} else: extra_return_dict = {} return res, extra_return_dict def _construct_refine_inputs(self, doc: Document, res: str) -> Dict[str, Any]: return { self.document_variable_name: format_document(doc, self.document_prompt), self.initial_response_name: res, } def _construct_initial_inputs( self, docs: List[Document], **kwargs: Any ) -> Dict[str, Any]: base_info = {"page_content": docs[0].page_content} base_info.update(docs[0].metadata) document_info = {k: base_info[k] for k in self.document_prompt.input_variables} base_inputs: dict = { self.document_variable_name: self.document_prompt.format(**document_info) } inputs = {**base_inputs, **kwargs} return inputs @property def _chain_type(self) -> str: return "refine_documents_chain"
[ "{page_content}" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~cache~test_upstash_redis_cache.py
"""Test Upstash Redis cache functionality.""" import uuid import pytest from langchain_core.schema import Generation, LLMResult import langchain from langchain.cache import UpstashRedisCache from tests.unit_tests.llms.fake_chat_model import FakeChatModel from tests.unit_tests.llms.fake_llm import FakeLLM URL = "<UPSTASH_REDIS_REST_URL>" TOKEN = "<UPSTASH_REDIS_REST_TOKEN>" def random_string() -> str: return str(uuid.uuid4()) @pytest.mark.requires("upstash_redis") def test_redis_cache_ttl() -> None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) langchain.llm_cache.update("foo", "bar", [Generation(text="fizz")]) key = langchain.llm_cache._key("foo", "bar") assert langchain.llm_cache.redis.pttl(key) > 0 @pytest.mark.requires("upstash_redis") def test_redis_cache() -> None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["foo"]) expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) assert output == expected_output lookup_output = langchain.llm_cache.lookup("foo", llm_string) if lookup_output and len(lookup_output) > 0: assert lookup_output == expected_output.generations[0] langchain.llm_cache.clear() output = llm.generate(["foo"]) assert output != expected_output langchain.llm_cache.redis.flushall() def test_redis_cache_multi() -> None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.llm_cache.update( "foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")] ) output = llm.generate( ["foo"] ) # foo and bar will have the same embedding produced by FakeEmbeddings expected_output = LLMResult( generations=[[Generation(text="fizz"), Generation(text="Buzz")]], llm_output={}, ) assert output == expected_output # clear the cache langchain.llm_cache.clear() @pytest.mark.requires("upstash_redis") def test_redis_cache_chat() -> None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeChatModel() params = llm.dict() params["stop"] = None with pytest.warns(): llm.predict("foo") langchain.llm_cache.redis.flushall()
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~vectara.py
from __future__ import annotations import json import logging import os from hashlib import md5 from typing import Any, Iterable, List, Optional, Tuple, Type import requests from langchain_core.pydantic_v1 import Field from langchain_core.schema import Document from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore, VectorStoreRetriever logger = logging.getLogger(__name__) class Vectara(VectorStore): """`Vectara API` vector store. See (https://vectara.com). Example: .. code-block:: python from langchain.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, vectara_api_timeout: int = 120, source: str = "langchain", ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logger.warning( "Can't find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logger.debug(f"Using corpus id {self._vectara_corpus_id}") self._source = source self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) self.vectara_api_timeout = vectara_api_timeout @property def embeddings(self) -> Optional[Embeddings]: return None def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", "X-Source": self._source, } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: url (str): URL of the page to delete. doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict) -> str: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/index", data=json.dumps(request), timeout=self.vectara_api_timeout, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or status_str and (status_str == "ALREADY_EXISTS"): return "E_ALREADY_EXISTS" elif status_str and (status_str == "FORBIDDEN"): return "E_NO_PERMISSIONS" else: return "E_SUCCEEDED" def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Vectara provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list metadatas: Optional list of metadatas associated with each file Returns: List of ids associated with each of the files indexed """ doc_ids = [] for inx, file in enumerate(files_list): if not os.path.exists(file): logger.error(f"File {file} does not exist, skipping") continue md = metadatas[inx] if metadatas else {} files: dict = { "file": (file, open(file, "rb")), "doc_metadata": json.dumps(md), } headers = self._get_post_headers() headers.pop("Content-Type") response = self._session.post( f"https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True", files=files, verify=True, headers=headers, timeout=self.vectara_api_timeout, ) if response.status_code == 409: doc_id = response.json()["document"]["documentId"] logger.info( f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" ) elif response.status_code == 200: doc_id = response.json()["document"]["documentId"] doc_ids.append(doc_id) else: logger.info(f"Error indexing file {file}: {response.json()}") return doc_ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata["source"] = "langchain" else: doc_metadata = {"source": "langchain"} doc = { "document_id": doc_id, "metadataJson": json.dumps(doc_metadata), "section": [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } success_str = self._index_doc(doc) if success_str == "E_ALREADY_EXISTS": self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id] def similarity_search_with_score( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, score_threshold: Optional[float] = None, n_sentence_context: int = 2, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. score_threshold: minimal score threshold for the result. If defined, results with score less than this value will be filtered out. n_sentence_context: number of sentences before/after the matching segment to add, defaults to 2 Returns: List of Documents most similar to the query and score for each. """ data = json.dumps( { "query": [ { "query": query, "start": 0, "num_results": k, "context_config": { "sentences_before": n_sentence_context, "sentences_after": n_sentence_context, }, "corpus_key": [ { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "metadataFilter": filter, "lexical_interpolation_config": {"lambda": lambda_val}, } ], } ] } ) response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=data, timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] result = response.json() if score_threshold: responses = [ r for r in result["responseSet"][0]["response"] if r["score"] > score_threshold ] else: responses = result["responseSet"][0]["response"] documents = result["responseSet"][0]["document"] metadatas = [] for x in responses: md = {m["name"]: m["value"] for m in x["metadata"]} doc_num = x["documentIndex"] doc_md = {m["name"]: m["value"] for m in documents[doc_num]["metadata"]} md.update(doc_md) metadatas.append(md) docs_with_score = [ ( Document( page_content=x["text"], metadata=md, ), x["score"], ) for x, md in zip(responses, metadatas) ] return docs_with_score def similarity_search( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 2, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add, defaults to 2 Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, lambda_val=lambda_val, filter=filter, score_threshold=None, n_sentence_context=n_sentence_context, **kwargs, ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Notes: # * Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) # * when metadatas[] are provided they are associated with each "part" # in Vectara. doc_metadata can be used to provide additional metadata # for the document itself (applies to all "texts" in this call) doc_metadata = kwargs.pop("doc_metadata", {}) vectara = cls(**kwargs) vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs) return vectara @classmethod def from_files( cls: Type[Vectara], files: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_files(files, metadatas) return vectara def as_retriever(self, **kwargs: Any) -> VectaraRetriever: tags = kwargs.pop("tags", None) or [] tags.extend(self._get_retriever_tags()) return VectaraRetriever(vectorstore=self, search_kwargs=kwargs, tags=tags) class VectaraRetriever(VectorStoreRetriever): """Retriever class for `Vectara`.""" vectorstore: Vectara """Vectara vectorstore.""" search_kwargs: dict = Field( default_factory=lambda: { "lambda_val": 0.0, "k": 5, "filter": "", "n_sentence_context": "2", } ) """Search params. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add """ def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, ) -> None: """Add text to the Vectara vectorstore. Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.vectorstore.add_texts(texts, metadatas, doc_metadata or {})
[]
2024-01-10
axgpt/langchain
libs~core~langchain_core~callbacks~manager.py
from __future__ import annotations import asyncio import functools import logging import os import uuid from concurrent.futures import ThreadPoolExecutor from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, ) from uuid import UUID from langsmith import utils as ls_utils from langsmith.run_helpers import get_run_tree_context from tenacity import RetryCallState from langchain_core.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, Callbacks, ChainManagerMixin, LLMManagerMixin, RetrieverManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain_core.callbacks.stdout import StdOutCallbackHandler from langchain_core.callbacks.tracers import run_collector from langchain_core.callbacks.tracers.langchain import ( LangChainTracer, ) from langchain_core.callbacks.tracers.langchain_v1 import ( LangChainTracerV1, TracerSessionV1, ) from langchain_core.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain_core.schema import ( AgentAction, AgentFinish, Document, LLMResult, ) from langchain_core.schema.messages import BaseMessage, get_buffer_string from langchain_core.schema.output import ChatGenerationChunk, GenerationChunk if TYPE_CHECKING: from langsmith import Client as LangSmithClient logger = logging.getLogger(__name__) tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501 "tracing_callback", default=None ) tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) run_collector_var: ContextVar[ Optional[run_collector.RunCollectorCallbackHandler] ] = ContextVar( # noqa: E501 "run_collector", default=None ) def _get_debug() -> bool: from langchain_core.globals import get_debug return get_debug() @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) try: tracing_callback_var.set(cb) yield session finally: tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, client: Optional[LangSmithClient] = None, ) -> Generator[LangChainTracer, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The tags to add to the run. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced You can use this to fetch the LangSmith run URL: >>> with tracing_v2_enabled() as cb: ... chain.invoke("foo") ... run_url = cb.get_run_url() """ if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, tags=tags, client=client, ) try: tracing_v2_callback_var.set(cb) yield cb finally: tracing_v2_callback_var.set(None) @contextmanager def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]: """Collect all run traces in context. Returns: run_collector.RunCollectorCallbackHandler: The run collector callback handler. Example: >>> with collect_runs() as runs_cb: chain.invoke("foo") run_id = runs_cb.traced_runs[0].id """ cb = run_collector.RunCollectorCallbackHandler() run_collector_var.set(cb) yield cb run_collector_var.set(None) def _get_trace_callbacks( project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None, ) -> Callbacks: if _tracing_v2_is_enabled(): project_name_ = project_name or _get_tracer_project() tracer = tracing_v2_callback_var.get() or LangChainTracer( project_name=project_name_, example_id=example_id, ) if callback_manager is None: cb = cast(Callbacks, [tracer]) else: if not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): callback_manager.add_handler(tracer, True) # If it already has a LangChainTracer, we don't need to add another one. # this would likely mess up the trace hierarchy. cb = callback_manager else: cb = None return cb @contextmanager def trace_as_chain_group( group_name: str, callback_manager: Optional[CallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManagerForChainGroup, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (CallbackManager, optional): The callback manager to use. inputs (Dict[str, Any], optional): The inputs to the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. Returns: CallbackManagerForChainGroup: The callback manager for the chain group. Example: .. code-block:: python llm_input = "Foo" with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the callback manager for the chain group res = llm.predict(llm_input, callbacks=manager) manager.on_chain_end({"output": res}) """ # noqa: E501 cb = _get_trace_callbacks( project_name, example_id, callback_manager=callback_manager ) cm = CallbackManager.configure( inheritable_callbacks=cb, inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id) child_cm = run_manager.get_child() group_cm = CallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: run_manager.on_chain_error(e) raise e else: if not group_cm.ended: run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, callback_manager: Optional[AsyncCallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (AsyncCallbackManager, optional): The async callback manager to use, which manages tracing and other callback behavior. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith. Example: .. code-block:: python llm_input = "Foo" async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the async callback manager for the chain group res = await llm.apredict(llm_input, callbacks=manager) await manager.on_chain_end({"output": res}) """ # noqa: E501 cb = _get_trace_callbacks( project_name, example_id, callback_manager=callback_manager ) cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags) run_manager = await cm.on_chain_start( {"name": group_name}, inputs or {}, run_id=run_id ) child_cm = run_manager.get_child() group_cm = AsyncCallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: await run_manager.on_chain_error(e) raise e else: if not group_cm.ended: await run_manager.on_chain_end({}) def handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager. Note: This function is used by langserve to handle events. Args: handlers: The list of handlers that will handle the event event_name: The name of the event (e.g., "on_llm_start") ignore_condition_name: Name of the attribute defined on handler that if True will cause the handler to be skipped for the given event *args: The arguments to pass to the event handler **kwargs: The keyword arguments to pass to the event handler """ coros: List[Coroutine[Any, Any, Any]] = [] try: message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): event = getattr(handler, event_name)(*args, **kwargs) if asyncio.iscoroutine(event): coros.append(event) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: handler_name = handler.__class__.__name__ logger.warning( f"NotImplementedError in {handler_name}.{event_name}" f" callback: {repr(e)}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback:" f" {repr(e)}" ) if handler.raise_error: raise e finally: if coros: try: # Raises RuntimeError if there is no current event loop. asyncio.get_running_loop() loop_running = True except RuntimeError: loop_running = False if loop_running: # If we try to submit this coroutine to the running loop # we end up in a deadlock, as we'd have gotten here from a # running coroutine, which we cannot interrupt to run this one. # The solution is to create a new loop in a new thread. with ThreadPoolExecutor(1) as executor: executor.submit(_run_coros, coros).result() else: _run_coros(coros) def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None: if hasattr(asyncio, "Runner"): # Python 3.11+ # Run the coroutines in a new event loop, taking care to # - install signal handlers # - run pending tasks scheduled by `coros` # - close asyncgens and executors # - close the loop with asyncio.Runner() as runner: # Run the coroutine, get the result for coro in coros: runner.run(coro) # Run pending tasks scheduled by coros until they are all done while pending := asyncio.all_tasks(runner.get_loop()): runner.run(asyncio.wait(pending)) else: # Before Python 3.11 we need to run each coroutine in a new event loop # as the Runner api is not available. for coro in coros: asyncio.run(coro) async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"NotImplementedError in {handler.__class__.__name__}.{event_name}" f" callback: {repr(e)}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback:" f" {repr(e)}" ) if handler.raise_error: raise e async def ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager. Note: This function is used by langserve to handle events. Args: handlers: The list of handlers that will handle the event event_name: The name of the event (e.g., "on_llm_start") ignore_condition_name: Name of the attribute defined on handler that if True will cause the handler to be skipped for the given event *args: The arguments to pass to the event handler **kwargs: The keyword arguments to pass to the event handler """ for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (Optional[List[str]]): The list of tags. inheritable_tags (Optional[List[str]]): The list of inheritable tags. metadata (Optional[Dict[str, Any]]): The metadata. inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {} @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid.uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], metadata={}, inheritable_metadata={}, ) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: handle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class ParentRunManager(RunManager): """Sync Parent Run Manager.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: await ahandle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncParentRunManager(AsyncRunManager): """Async Parent Run Manager.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, chunk=chunk, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ await ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, chunk=chunk, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin): """Callback manager for chain run.""" def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin): """Async callback manager for chain run.""" async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ await ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin): """Callback manager for tool run.""" def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_tool_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin): """Async callback manager for tool run.""" async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ await ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_tool_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin): """Callback manager for retriever run.""" def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any, ) -> None: """Run when retriever ends running.""" handle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" handle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForRetrieverRun( AsyncParentRunManager, RetrieverManagerMixin, ): """Async callback manager for retriever run.""" async def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any ) -> None: """Run when retriever ends running.""" await ahandle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" await ahandle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that handles callbacks from LangChain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid.uuid4() handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid.uuid4() handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() handle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, ) class CallbackManagerForChainGroup(CallbackManager): """Callback manager for the chain group.""" def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]] = None, parent_run_id: Optional[UUID] = None, *, parent_run_manager: CallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False def copy(self) -> CallbackManagerForChainGroup: return self.__class__( handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, parent_run_manager=self.parent_run_manager, ) def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True return self.parent_run_manager.on_chain_end(outputs, **kwargs) def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True return self.parent_run_manager.on_chain_error(error, **kwargs) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that handles callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid.uuid4() tasks.append( ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid.uuid4() tasks.append( ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers async def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) async def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() await ahandle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, ) class AsyncCallbackManagerForChainGroup(AsyncCallbackManager): """Async callback manager for the chain group.""" def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]] = None, parent_run_id: Optional[UUID] = None, *, parent_run_manager: AsyncCallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False def copy(self) -> AsyncCallbackManagerForChainGroup: return self.__class__( handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, parent_run_manager=self.parent_run_manager, ) async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True await self.parent_run_manager.on_chain_end(outputs, **kwargs) async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True await self.parent_run_manager.on_chain_error(error, **kwargs) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _tracing_v2_is_enabled() -> bool: return ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracing_v2_callback_var.get() is not None or get_run_tree_context() is not None ) def _get_tracer_project() -> str: run_tree = get_run_tree_context() return getattr( run_tree, "session_name", getattr( # Note, if people are trying to nest @traceable functions and the # tracing_v2_enabled context manager, this will likely mess up the # tree structure. tracing_v2_callback_var.get(), "project", # Have to set this to a string even though it always will return # a string because `get_tracer_project` technically can return # None, but only when a specific argument is supplied. # Therefore, this just tricks the mypy type checker str(ls_utils.get_tracer_project()), ), ) _configure_hooks: List[ Tuple[ ContextVar[Optional[BaseCallbackHandler]], bool, Optional[Type[BaseCallbackHandler]], Optional[str], ] ] = [] H = TypeVar("H", bound=BaseCallbackHandler, covariant=True) def register_configure_hook( context_var: ContextVar[Optional[Any]], inheritable: bool, handle_class: Optional[Type[BaseCallbackHandler]] = None, env_var: Optional[str] = None, ) -> None: if env_var is not None and handle_class is None: raise ValueError( "If env_var is set, handle_class must also be set to a non-None value." ) _configure_hooks.append( ( # the typings of ContextVar do not have the generic arg set as covariant # so we have to cast it cast(ContextVar[Optional[BaseCallbackHandler]], context_var), inheritable, handle_class, env_var, ) ) register_configure_hook(run_collector_var, False) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: T: The configured callback manager. """ run_tree = get_run_tree_context() parent_run_id = None if run_tree is None else getattr(run_tree, "id") callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), parent_run_id=parent_run_id, ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers.copy(), inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags.copy(), inheritable_tags=inheritable_callbacks.inheritable_tags.copy(), metadata=inheritable_callbacks.metadata.copy(), inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(), ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) if inheritable_tags or local_tags: callback_manager.add_tags(inheritable_tags or []) callback_manager.add_tags(local_tags or [], False) if inheritable_metadata or local_metadata: callback_manager.add_metadata(inheritable_metadata or {}) callback_manager.add_metadata(local_metadata or {}, False) tracer = tracing_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = _tracing_v2_is_enabled() tracer_project = _get_tracer_project() debug = _get_debug() if verbose or debug or tracing_enabled_ or tracing_v2_enabled_: if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_project) callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(project_name=tracer_project) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) for var, inheritable, handler_class, env_var in _configure_hooks: create_one = ( env_var is not None and env_var_is_set(env_var) and handler_class is not None ) if var.get() is not None or create_one: var_handler = var.get() or cast(Type[BaseCallbackHandler], handler_class)() if handler_class is None: if not any( handler is var_handler # direct pointer comparison for handler in callback_manager.handlers ): callback_manager.add_handler(var_handler, inheritable) else: if not any( isinstance(handler, handler_class) for handler in callback_manager.handlers ): callback_manager.add_handler(var_handler, inheritable) return callback_manager
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~ensemble.py
""" Ensemble retriever that ensemble the results of multiple retrievers by using weighted Reciprocal Rank Fusion """ from typing import Any, Dict, List from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) class EnsembleRetriever(BaseRetriever): """Retriever that ensembles the multiple retrievers. It uses a rank fusion. Args: retrievers: A list of retrievers to ensemble. weights: A list of weights corresponding to the retrievers. Defaults to equal weighting for all retrievers. c: A constant added to the rank, controlling the balance between the importance of high-ranked items and the consideration given to lower-ranked items. Default is 60. """ retrievers: List[BaseRetriever] weights: List[float] c: int = 60 @root_validator(pre=True) def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]: if not values.get("weights"): n_retrievers = len(values["retrievers"]) values["weights"] = [1 / n_retrievers] * n_retrievers return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = self.rank_fusion(query, run_manager) return fused_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = await self.arank_fusion(query, run_manager) return fused_documents def rank_fusion( self, query: str, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """ Retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents async def arank_fusion( self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """ Asynchronously retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ await retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents def weighted_reciprocal_rank( self, doc_lists: List[List[Document]] ) -> List[Document]: """ Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order. """ if len(doc_lists) != len(self.weights): raise ValueError( "Number of rank lists must be equal to the number of weights." ) # Create a union of all unique documents in the input doc_lists all_documents = set() for doc_list in doc_lists: for doc in doc_list: all_documents.add(doc.page_content) # Initialize the RRF score dictionary for each document rrf_score_dic = {doc: 0.0 for doc in all_documents} # Calculate RRF scores for each document for doc_list, weight in zip(doc_lists, self.weights): for rank, doc in enumerate(doc_list, start=1): rrf_score = weight * (1 / (rank + self.c)) rrf_score_dic[doc.page_content] += rrf_score # Sort documents by their RRF scores in descending order sorted_documents = sorted( rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True ) # Map the sorted page_content back to the original document objects page_content_to_doc_map = { doc.page_content: doc for doc_list in doc_lists for doc in doc_list } sorted_docs = [ page_content_to_doc_map[page_content] for page_content in sorted_documents ] return sorted_docs
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~chat_models~test_jinachat.py
"""Test JinaChat wrapper.""" import pytest from langchain_core.schema import ( BaseMessage, ChatGeneration, HumanMessage, LLMResult, SystemMessage, ) from langchain.callbacks.manager import CallbackManager from langchain.chat_models.jinachat import JinaChat from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_jinachat() -> None: """Test JinaChat wrapper.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_system_message() -> None: """Test JinaChat wrapper with system message.""" chat = JinaChat(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_generate() -> None: """Test JinaChat wrapper with generate.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) @pytest.mark.asyncio async def test_async_jinachat() -> None: """Test async generation.""" chat = JinaChat(max_tokens=102) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content @pytest.mark.asyncio async def test_async_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert callback_handler.llm_streams > 0 assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_extra_kwargs() -> None: """Test extra kwargs to chat openai.""" # Check that foo is saved in extra_kwargs. llm = JinaChat(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. llm = JinaChat(foo=3, model_kwargs={"bar": 2}) assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): JinaChat(foo=3, model_kwargs={"foo": 2}) # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): JinaChat(model_kwargs={"temperature": 0.2})
[ "Hello", "You are to chat with the user." ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~dynamodb.py
from __future__ import annotations import logging from typing import TYPE_CHECKING, Dict, List, Optional from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, messages_to_dict, ) if TYPE_CHECKING: from boto3.session import Session logger = logging.getLogger(__name__) class DynamoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table exists with name `table_name` Args: table_name: name of the DynamoDB table session_id: arbitrary key that is used to store the messages of a single chat session. endpoint_url: URL of the AWS endpoint to connect to. This argument is optional and useful for test purposes, like using Localstack. If you plan to use AWS cloud service, you normally don't have to worry about setting the endpoint_url. primary_key_name: name of the primary key of the DynamoDB table. This argument is optional, defaulting to "SessionId". key: an optional dictionary with a custom primary and secondary key. This argument is optional, but useful when using composite dynamodb keys, or isolating records based off of application details such as a user id. This may also contain global and local secondary index keys. kms_key_id: an optional AWS KMS Key ID, AWS KMS Key ARN, or AWS KMS Alias for client-side encryption """ def __init__( self, table_name: str, session_id: str, endpoint_url: Optional[str] = None, primary_key_name: str = "SessionId", key: Optional[Dict[str, str]] = None, boto3_session: Optional[Session] = None, kms_key_id: Optional[str] = None, ): if boto3_session: client = boto3_session.resource("dynamodb", endpoint_url=endpoint_url) else: try: import boto3 except ImportError as e: raise ImportError( "Unable to import boto3, please install with `pip install boto3`." ) from e if endpoint_url: client = boto3.resource("dynamodb", endpoint_url=endpoint_url) else: client = boto3.resource("dynamodb") self.table = client.Table(table_name) self.session_id = session_id self.key: Dict = key or {primary_key_name: session_id} if kms_key_id: try: from dynamodb_encryption_sdk.encrypted.table import EncryptedTable from dynamodb_encryption_sdk.identifiers import CryptoAction from dynamodb_encryption_sdk.material_providers.aws_kms import ( AwsKmsCryptographicMaterialsProvider, ) from dynamodb_encryption_sdk.structures import AttributeActions except ImportError as e: raise ImportError( "Unable to import dynamodb_encryption_sdk, please install with " "`pip install dynamodb-encryption-sdk`." ) from e actions = AttributeActions( default_action=CryptoAction.DO_NOTHING, attribute_actions={"History": CryptoAction.ENCRYPT_AND_SIGN}, ) aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=kms_key_id) self.table = EncryptedTable( table=self.table, materials_provider=aws_kms_cmp, attribute_actions=actions, auto_refresh_table_indexes=False, ) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e response = None try: response = self.table.get_item(Key=self.key) except ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": logger.warning("No record found with session id: %s", self.session_id) else: logger.error(error) if response and "Item" in response: items = response["Item"]["History"] else: items = [] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e messages = messages_to_dict(self.messages) _message = _message_to_dict(message) messages.append(_message) try: self.table.put_item(Item={**self.key, "History": messages}) except ClientError as err: logger.error(err) def clear(self) -> None: """Clear session memory from DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e try: self.table.delete_item(Key=self.key) except ClientError as err: logger.error(err)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~chat_models~test_qianfan_endpoint.py
"""Test Baidu Qianfan Chat Endpoint.""" from typing import Any from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_core.schema import ( AIMessage, BaseMessage, ChatGeneration, FunctionMessage, HumanMessage, LLMResult, ) from langchain.callbacks.manager import CallbackManager from langchain.chains.openai_functions import ( create_openai_fn_chain, ) from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler _FUNCTIONS: Any = [ { "name": "format_person_info", "description": ( "Output formatter. Should always be used to format your response to the" " user." ), "parameters": { "title": "Person", "description": "Identifying information about a person.", "type": "object", "properties": { "name": { "title": "Name", "description": "The person's name", "type": "string", }, "age": { "title": "Age", "description": "The person's age", "type": "integer", }, "fav_food": { "title": "Fav Food", "description": "The person's favorite food", "type": "string", }, }, "required": ["name", "age"], }, }, { "name": "get_current_temperature", "description": ("Used to get the location's temperature."), "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "city name", }, "unit": { "type": "string", "enum": ["centigrade", "Fahrenheit"], }, }, "required": ["location", "unit"], }, "responses": { "type": "object", "properties": { "temperature": { "type": "integer", "description": "city temperature", }, "unit": { "type": "string", "enum": ["centigrade", "Fahrenheit"], }, }, }, }, ] def test_default_call() -> None: """Test default model(`ERNIE-Bot`) call.""" chat = QianfanChatEndpoint() response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_model() -> None: """Test model kwarg works.""" chat = QianfanChatEndpoint(model="BLOOMZ-7B") response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_model_param() -> None: """Test model params works.""" chat = QianfanChatEndpoint() response = chat(model="BLOOMZ-7B", messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_endpoint() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_endpoint_param() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint() response = chat( messages=[ HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello") ] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_multiple_history() -> None: """Tests multiple history works.""" chat = QianfanChatEndpoint() response = chat( messages=[ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), ] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_stream() -> None: """Test that stream works.""" chat = QianfanChatEndpoint(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat( messages=[ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, callbacks=callback_manager, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) def test_multiple_messages() -> None: """Tests multiple messages works.""" chat = QianfanChatEndpoint() message = HumanMessage(content="Hi, how are you.") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_functions_call_thoughts() -> None: chat = QianfanChatEndpoint(model="ERNIE-Bot") prompt_tmpl = "Use the given functions to answer following question: {input}" prompt_msgs = [ HumanMessagePromptTemplate.from_template(prompt_tmpl), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_openai_fn_chain( _FUNCTIONS, chat, prompt, output_parser=None, ) message = HumanMessage(content="What's the temperature in Shanghai today?") response = chain.generate([{"input": message}]) assert isinstance(response.generations[0][0], ChatGeneration) assert isinstance(response.generations[0][0].message, AIMessage) assert "function_call" in response.generations[0][0].message.additional_kwargs def test_functions_call() -> None: chat = QianfanChatEndpoint(model="ERNIE-Bot") prompt = ChatPromptTemplate( messages=[ HumanMessage(content="What's the temperature in Shanghai today?"), AIMessage( content="", additional_kwargs={ "function_call": { "name": "get_current_temperature", "thoughts": "i will use get_current_temperature " "to resolve the questions", "arguments": '{"location":"Shanghai","unit":"centigrade"}', } }, ), FunctionMessage( name="get_current_weather", content='{"temperature": "25", \ "unit": "摄氏度", "description": "晴朗"}', ), ] ) llm_chain = create_openai_fn_chain( _FUNCTIONS, chat, prompt, output_parser=None, ) resp = llm_chain.generate([{}]) assert isinstance(resp, LLMResult)
[ "function_call", "Who are you?", "{\"temperature\": \"25\", \"unit\": \"摄氏度\", \"description\": \"晴朗\"}", "Use the given functions to answer following question: {input}", "get_current_weather", "get_current_temperature", "to resolve the questions", "name", "Hello.", "Hello!", "How are you doing?", "{\"location\":\"Shanghai\",\"unit\":\"centigrade\"}", "i will use get_current_temperature ", "Hi, how are you.", "What's the temperature in Shanghai today?", "arguments", "Hello" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~storage~exceptions.py
from langchain_core.schema import LangChainException class InvalidKeyException(LangChainException): """Raised when a key is invalid; e.g., uses incorrect characters."""
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~yandex.py
"""Wrapper around YandexGPT chat models.""" import logging from typing import Any, Dict, List, Optional, Tuple, cast from langchain_core.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.utils import enforce_stop_tokens from langchain.llms.yandex import _BaseYandexGPT logger = logging.getLogger(__name__) def _parse_message(role: str, text: str) -> Dict: return {"role": role, "text": text} def _parse_chat_history(history: List[BaseMessage]) -> Tuple[List[Dict[str, str]], str]: """Parse a sequence of messages into history. Returns: A tuple of a list of parsed messages and an instruction message for the model. """ chat_history = [] instruction = "" for message in history: content = cast(str, message.content) if isinstance(message, HumanMessage): chat_history.append(_parse_message("user", content)) if isinstance(message, AIMessage): chat_history.append(_parse_message("assistant", content)) if isinstance(message, SystemMessage): instruction = content return chat_history, instruction class ChatYandexGPT(_BaseYandexGPT, BaseChatModel): """Wrapper around YandexGPT large language models. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. Example: .. code-block:: python from langchain.chat_models import ChatYandexGPT chat_model = ChatYandexGPT(iam_token="t1.9eu...") """ def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ try: import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.llm.v1alpha.llm_pb2 import GenerationOptions, Message from yandex.cloud.ai.llm.v1alpha.llm_service_pb2 import ChatRequest from yandex.cloud.ai.llm.v1alpha.llm_service_pb2_grpc import ( TextGenerationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) message_history, instruction = _parse_chat_history(messages) channel_credentials = grpc.ssl_channel_credentials() channel = grpc.secure_channel(self.url, channel_credentials) request = ChatRequest( model=self.model_name, generation_options=GenerationOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), instruction_text=instruction, messages=[Message(**message) for message in message_history], ) stub = TextGenerationServiceStub(channel) if self.iam_token: metadata = (("authorization", f"Bearer {self.iam_token}"),) else: metadata = (("authorization", f"Api-Key {self.api_key}"),) res = stub.Chat(request, metadata=metadata) text = list(res)[0].message.text text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """YandexGPT doesn't support async requests at the moment.""" )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~agent_toolkits~nla~toolkit.py
from __future__ import annotations from typing import Any, List, Optional, Sequence from langchain_core.pydantic_v1 import Field from langchain_core.schema.language_model import BaseLanguageModel from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.nla.tool import NLATool from langchain.tools.base import BaseTool from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.tools.plugin import AIPlugin from langchain.utilities.requests import Requests class NLAToolkit(BaseToolkit): """Natural Language API Toolkit. *Security Note*: This toolkit creates tools that enable making calls to an Open API compliant API. The tools created by this toolkit may be able to make GET, POST, PATCH, PUT, DELETE requests to any of the exposed endpoints on the API. Control access to who can use this toolkit. See https://python.langchain.com/docs/security for more information. """ nla_tools: Sequence[NLATool] = Field(...) """List of API Endpoint Tools.""" def get_tools(self) -> List[BaseTool]: """Get the tools for all the API operations.""" return list(self.nla_tools) @staticmethod def _get_http_operation_tools( llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> List[NLATool]: """Get the tools for all the API operations.""" if not spec.paths: return [] http_operation_tools = [] for path in spec.paths: for method in spec.get_methods_for_path(path): endpoint_tool = NLATool.from_llm_and_method( llm=llm, path=path, method=method, spec=spec, requests=requests, verbose=verbose, **kwargs, ) http_operation_tools.append(endpoint_tool) return http_operation_tools @classmethod def from_llm_and_spec( cls, llm: BaseLanguageModel, spec: OpenAPISpec, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit by creating tools for each operation.""" http_operation_tools = cls._get_http_operation_tools( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) return cls(nla_tools=http_operation_tools) @classmethod def from_llm_and_url( cls, llm: BaseLanguageModel, open_api_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(open_api_url) return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs ) @classmethod def from_llm_and_ai_plugin( cls, llm: BaseLanguageModel, ai_plugin: AIPlugin, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" spec = OpenAPISpec.from_url(ai_plugin.api.url) # TODO: Merge optional Auth information with the `requests` argument return cls.from_llm_and_spec( llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs, ) @classmethod def from_llm_and_ai_plugin_url( cls, llm: BaseLanguageModel, ai_plugin_url: str, requests: Optional[Requests] = None, verbose: bool = False, **kwargs: Any, ) -> NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin( llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~llama_index.py
from typing import Any, Dict, List, cast from langchain_core.pydantic_v1 import Field from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun class LlamaIndexRetriever(BaseRetriever): """`LlamaIndex` retriever. It is used for the question-answering with sources over an LlamaIndex data structure.""" index: Any """LlamaIndex index to query.""" query_kwargs: Dict = Field(default_factory=dict) """Keyword arguments to pass to the query method.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: raise ImportError( "You need to install `pip install llama-index` to use this retriever." ) index = cast(BaseGPTIndex, self.index) response = index.query(query, response_mode="no_text", **self.query_kwargs) response = cast(Response, response) # parse source nodes docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append( Document(page_content=source_node.source_text, metadata=metadata) ) return docs class LlamaIndexGraphRetriever(BaseRetriever): """`LlamaIndex` graph data structure retriever. It is used for question-answering with sources over an LlamaIndex graph data structure.""" graph: Any """LlamaIndex graph to query.""" query_configs: List[Dict] = Field(default_factory=list) """List of query configs to pass to the query method.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" try: from llama_index.composability.graph import ( QUERY_CONFIG_TYPE, ComposableGraph, ) from llama_index.response.schema import Response except ImportError: raise ImportError( "You need to install `pip install llama-index` to use this retriever." ) graph = cast(ComposableGraph, self.graph) # for now, inject response_mode="no_text" into query configs for query_config in self.query_configs: query_config["response_mode"] = "no_text" query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs) response = graph.query(query, query_configs=query_configs) response = cast(Response, response) # parse source nodes docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append( Document(page_content=source_node.source_text, metadata=metadata) ) return docs
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~motorhead_memory.py
from typing import Any, Dict, List, Optional import requests from langchain_core.schema.messages import get_buffer_string from langchain.memory.chat_memory import BaseChatMemory MANAGED_URL = "https://api.getmetal.io/v1/motorhead" # LOCAL_URL = "http://localhost:8080" class MotorheadMemory(BaseChatMemory): """Chat message memory backed by Motorhead service.""" url: str = MANAGED_URL timeout: int = 3000 memory_key: str = "history" session_id: str context: Optional[str] = None # Managed Params api_key: Optional[str] = None client_id: Optional[str] = None def __get_headers(self) -> Dict[str, str]: is_managed = self.url == MANAGED_URL headers = { "Content-Type": "application/json", } if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers["x-metal-api-key"] = self.api_key headers["x-metal-client-id"] = self.client_id return headers async def init(self) -> None: res = requests.get( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, headers=self.__get_headers(), ) res_data = res.json() res_data = res_data.get("data", res_data) # Handle Managed Version messages = res_data.get("messages", []) context = res_data.get("context", "NONE") for message in reversed(messages): if message["role"] == "AI": self.chat_memory.add_ai_message(message["content"]) else: self.chat_memory.add_user_message(message["content"]) if context and context != "NONE": self.context = context def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]: if self.return_messages: return {self.memory_key: self.chat_memory.messages} else: return {self.memory_key: get_buffer_string(self.chat_memory.messages)} @property def memory_variables(self) -> List[str]: return [self.memory_key] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: input_str, output_str = self._get_input_output(inputs, outputs) requests.post( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, json={ "messages": [ {"role": "Human", "content": f"{input_str}"}, {"role": "AI", "content": f"{output_str}"}, ] }, headers=self.__get_headers(), ) super().save_context(inputs, outputs) def delete_session(self) -> None: """Delete a session""" requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
[ "PLACEHOLDER" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~text_splitter.py
"""**Text Splitters** are classes for splitting text. **Class hierarchy:** .. code-block:: BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter RecursiveCharacterTextSplitter --> <name>TextSplitter Note: **MarkdownHeaderTextSplitter** and **HTMLHeaderTextSplitter do not derive from TextSplitter. **Main helpers:** .. code-block:: Document, Tokenizer, Language, LineType, HeaderType """ # noqa: E501 from __future__ import annotations import asyncio import copy import logging import pathlib import re from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum from functools import partial from io import BytesIO, StringIO from typing import ( AbstractSet, Any, Callable, Collection, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, Type, TypedDict, TypeVar, Union, cast, ) import requests from langchain_core.schema import BaseDocumentTransformer from langchain.docstore.document import Document logger = logging.getLogger(__name__) TS = TypeVar("TS", bound="TextSplitter") def _make_spacy_pipeline_for_splitting(pipeline: str) -> Any: # avoid importing spacy try: import spacy except ImportError: raise ImportError( "Spacy is not installed, please install it with `pip install spacy`." ) if pipeline == "sentencizer": from spacy.lang.en import English sentencizer = English() sentencizer.add_pipe("sentencizer") else: sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"]) return sentencizer def _split_text_with_regex( text: str, separator: str, keep_separator: bool ) -> List[str]: # Now that we have the separator, split the text if separator: if keep_separator: # The parentheses in the pattern keep the delimiters in the result. _splits = re.split(f"({separator})", text) splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)] if len(_splits) % 2 == 0: splits += _splits[-1:] splits = [_splits[0]] + splits else: splits = re.split(separator, text) else: splits = list(text) return [s for s in splits if s != ""] class TextSplitter(BaseDocumentTransformer, ABC): """Interface for splitting text into chunks.""" def __init__( self, chunk_size: int = 4000, chunk_overlap: int = 200, length_function: Callable[[str], int] = len, keep_separator: bool = False, add_start_index: bool = False, strip_whitespace: bool = True, ) -> None: """Create a new TextSplitter. Args: chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks keep_separator: Whether to keep the separator in the chunks add_start_index: If `True`, includes chunk's start index in metadata strip_whitespace: If `True`, strips whitespace from the start and end of every document """ if chunk_overlap > chunk_size: raise ValueError( f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller." ) self._chunk_size = chunk_size self._chunk_overlap = chunk_overlap self._length_function = length_function self._keep_separator = keep_separator self._add_start_index = add_start_index self._strip_whitespace = strip_whitespace @abstractmethod def split_text(self, text: str) -> List[str]: """Split text into multiple components.""" def create_documents( self, texts: List[str], metadatas: Optional[List[dict]] = None ) -> List[Document]: """Create documents from a list of texts.""" _metadatas = metadatas or [{}] * len(texts) documents = [] for i, text in enumerate(texts): index = -1 for chunk in self.split_text(text): metadata = copy.deepcopy(_metadatas[i]) if self._add_start_index: index = text.find(chunk, index + 1) metadata["start_index"] = index new_doc = Document(page_content=chunk, metadata=metadata) documents.append(new_doc) return documents def split_documents(self, documents: Iterable[Document]) -> List[Document]: """Split documents.""" texts, metadatas = [], [] for doc in documents: texts.append(doc.page_content) metadatas.append(doc.metadata) return self.create_documents(texts, metadatas=metadatas) def _join_docs(self, docs: List[str], separator: str) -> Optional[str]: text = separator.join(docs) if self._strip_whitespace: text = text.strip() if text == "": return None else: return text def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: # We now want to combine these smaller pieces into medium size # chunks to send to the LLM. separator_len = self._length_function(separator) docs = [] current_doc: List[str] = [] total = 0 for d in splits: _len = self._length_function(d) if ( total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size ): if total > self._chunk_size: logger.warning( f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}" ) if len(current_doc) > 0: doc = self._join_docs(current_doc, separator) if doc is not None: docs.append(doc) # Keep on popping if: # - we have a larger chunk than in the chunk overlap # - or if we still have any chunks and the length is long while total > self._chunk_overlap or ( total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size and total > 0 ): total -= self._length_function(current_doc[0]) + ( separator_len if len(current_doc) > 1 else 0 ) current_doc = current_doc[1:] current_doc.append(d) total += _len + (separator_len if len(current_doc) > 1 else 0) doc = self._join_docs(current_doc, separator) if doc is not None: docs.append(doc) return docs @classmethod def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter: """Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( "Tokenizer received was not an instance of PreTrainedTokenizerBase" ) def _huggingface_tokenizer_length(text: str) -> int: return len(tokenizer.encode(text)) except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) return cls(length_function=_huggingface_tokenizer_length, **kwargs) @classmethod def from_tiktoken_encoder( cls: Type[TS], encoding_name: str = "gpt2", model_name: Optional[str] = None, allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), disallowed_special: Union[Literal["all"], Collection[str]] = "all", **kwargs: Any, ) -> TS: """Text splitter that uses tiktoken encoder to count length.""" try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate max_tokens_for_prompt. " "Please install it with `pip install tiktoken`." ) if model_name is not None: enc = tiktoken.encoding_for_model(model_name) else: enc = tiktoken.get_encoding(encoding_name) def _tiktoken_encoder(text: str) -> int: return len( enc.encode( text, allowed_special=allowed_special, disallowed_special=disallowed_special, ) ) if issubclass(cls, TokenTextSplitter): extra_kwargs = { "encoding_name": encoding_name, "model_name": model_name, "allowed_special": allowed_special, "disallowed_special": disallowed_special, } kwargs = {**kwargs, **extra_kwargs} return cls(length_function=_tiktoken_encoder, **kwargs) def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Transform sequence of documents by splitting them.""" return self.split_documents(list(documents)) async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Asynchronously transform a sequence of documents by splitting them.""" return await asyncio.get_running_loop().run_in_executor( None, partial(self.transform_documents, **kwargs), documents ) class CharacterTextSplitter(TextSplitter): """Splitting text that looks at characters.""" def __init__( self, separator: str = "\n\n", is_separator_regex: bool = False, **kwargs: Any ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) self._separator = separator self._is_separator_regex = is_separator_regex def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" # First we naively split the large input into a bunch of smaller ones. separator = ( self._separator if self._is_separator_regex else re.escape(self._separator) ) splits = _split_text_with_regex(text, separator, self._keep_separator) _separator = "" if self._keep_separator else self._separator return self._merge_splits(splits, _separator) class LineType(TypedDict): """Line type as typed dict.""" metadata: Dict[str, str] content: str class HeaderType(TypedDict): """Header type as typed dict.""" level: int name: str data: str class MarkdownHeaderTextSplitter: """Splitting markdown files based on specified headers.""" def __init__( self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False ): """Create a new MarkdownHeaderTextSplitter. Args: headers_to_split_on: Headers we want to track return_each_line: Return each line w/ associated headers """ # Output line-by-line or aggregated into chunks w/ common headers self.return_each_line = return_each_line # Given the headers we want to split on, # (e.g., "#, ##, etc") order by length self.headers_to_split_on = sorted( headers_to_split_on, key=lambda split: len(split[0]), reverse=True ) def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[Document]: """Combine lines with common metadata into chunks Args: lines: Line of text / associated header metadata """ aggregated_chunks: List[LineType] = [] for line in lines: if ( aggregated_chunks and aggregated_chunks[-1]["metadata"] == line["metadata"] ): # If the last line in the aggregated list # has the same metadata as the current line, # append the current content to the last lines's content aggregated_chunks[-1]["content"] += " \n" + line["content"] else: # Otherwise, append the current line to the aggregated list aggregated_chunks.append(line) return [ Document(page_content=chunk["content"], metadata=chunk["metadata"]) for chunk in aggregated_chunks ] def split_text(self, text: str) -> List[Document]: """Split markdown file Args: text: Markdown file""" # Split the input text by newline character ("\n"). lines = text.split("\n") # Final output lines_with_metadata: List[LineType] = [] # Content and metadata of the chunk currently being processed current_content: List[str] = [] current_metadata: Dict[str, str] = {} # Keep track of the nested header structure # header_stack: List[Dict[str, Union[int, str]]] = [] header_stack: List[HeaderType] = [] initial_metadata: Dict[str, str] = {} in_code_block = False for line in lines: stripped_line = line.strip() if stripped_line.startswith("```"): # code block in one row if stripped_line.count("```") >= 2: in_code_block = False else: in_code_block = not in_code_block if in_code_block: current_content.append(stripped_line) continue # Check each line against each of the header types (e.g., #, ##) for sep, name in self.headers_to_split_on: # Check if line starts with a header that we intend to split on if stripped_line.startswith(sep) and ( # Header with no text OR header is followed by space # Both are valid conditions that sep is being used a header len(stripped_line) == len(sep) or stripped_line[len(sep)] == " " ): # Ensure we are tracking the header as metadata if name is not None: # Get the current header level current_header_level = sep.count("#") # Pop out headers of lower or same level from the stack while ( header_stack and header_stack[-1]["level"] >= current_header_level ): # We have encountered a new header # at the same or higher level popped_header = header_stack.pop() # Clear the metadata for the # popped header in initial_metadata if popped_header["name"] in initial_metadata: initial_metadata.pop(popped_header["name"]) # Push the current header to the stack header: HeaderType = { "level": current_header_level, "name": name, "data": stripped_line[len(sep) :].strip(), } header_stack.append(header) # Update initial_metadata with the current header initial_metadata[name] = header["data"] # Add the previous line to the lines_with_metadata # only if current_content is not empty if current_content: lines_with_metadata.append( { "content": "\n".join(current_content), "metadata": current_metadata.copy(), } ) current_content.clear() break else: if stripped_line: current_content.append(stripped_line) elif current_content: lines_with_metadata.append( { "content": "\n".join(current_content), "metadata": current_metadata.copy(), } ) current_content.clear() current_metadata = initial_metadata.copy() if current_content: lines_with_metadata.append( {"content": "\n".join(current_content), "metadata": current_metadata} ) # lines_with_metadata has each line with associated header metadata # aggregate these into chunks based on common metadata if not self.return_each_line: return self.aggregate_lines_to_chunks(lines_with_metadata) else: return [ Document(page_content=chunk["content"], metadata=chunk["metadata"]) for chunk in lines_with_metadata ] class ElementType(TypedDict): """Element type as typed dict.""" url: str xpath: str content: str metadata: Dict[str, str] class HTMLHeaderTextSplitter: """ Splitting HTML files based on specified headers. Requires lxml package. """ def __init__( self, headers_to_split_on: List[Tuple[str, str]], return_each_element: bool = False, ): """Create a new HTMLHeaderTextSplitter. Args: headers_to_split_on: list of tuples of headers we want to track mapped to (arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4, h5, h6 e.g. [("h1", "Header 1"), ("h2", "Header 2)]. return_each_element: Return each element w/ associated headers. """ # Output element-by-element or aggregated into chunks w/ common headers self.return_each_element = return_each_element self.headers_to_split_on = sorted(headers_to_split_on) def aggregate_elements_to_chunks( self, elements: List[ElementType] ) -> List[Document]: """Combine elements with common metadata into chunks Args: elements: HTML element content with associated identifying info and metadata """ aggregated_chunks: List[ElementType] = [] for element in elements: if ( aggregated_chunks and aggregated_chunks[-1]["metadata"] == element["metadata"] ): # If the last element in the aggregated list # has the same metadata as the current element, # append the current content to the last element's content aggregated_chunks[-1]["content"] += " \n" + element["content"] else: # Otherwise, append the current element to the aggregated list aggregated_chunks.append(element) return [ Document(page_content=chunk["content"], metadata=chunk["metadata"]) for chunk in aggregated_chunks ] def split_text_from_url(self, url: str) -> List[Document]: """Split HTML from web URL Args: url: web URL """ r = requests.get(url) return self.split_text_from_file(BytesIO(r.content)) def split_text(self, text: str) -> List[Document]: """Split HTML text string Args: text: HTML text """ return self.split_text_from_file(StringIO(text)) def split_text_from_file(self, file: Any) -> List[Document]: """Split HTML file Args: file: HTML file """ try: from lxml import etree except ImportError as e: raise ImportError( "Unable to import lxml, please install with `pip install lxml`." ) from e # use lxml library to parse html document and return xml ElementTree parser = etree.HTMLParser() tree = etree.parse(file, parser) # document transformation for "structure-aware" chunking is handled with xsl. # see comments in html_chunks_with_headers.xslt for more detailed information. xslt_path = ( pathlib.Path(__file__).parent / "document_transformers/xsl/html_chunks_with_headers.xslt" ) xslt_tree = etree.parse(xslt_path) transform = etree.XSLT(xslt_tree) result = transform(tree) result_dom = etree.fromstring(str(result)) # create filter and mapping for header metadata header_filter = [header[0] for header in self.headers_to_split_on] header_mapping = dict(self.headers_to_split_on) # map xhtml namespace prefix ns_map = {"h": "http://www.w3.org/1999/xhtml"} # build list of elements from DOM elements = [] for element in result_dom.findall("*//*", ns_map): if element.findall("*[@class='headers']") or element.findall( "*[@class='chunk']" ): elements.append( ElementType( url=file, xpath="".join( [ node.text for node in element.findall("*[@class='xpath']", ns_map) ] ), content="".join( [ node.text for node in element.findall("*[@class='chunk']", ns_map) ] ), metadata={ # Add text of specified headers to metadata using header # mapping. header_mapping[node.tag]: node.text for node in filter( lambda x: x.tag in header_filter, element.findall("*[@class='headers']/*", ns_map), ) }, ) ) if not self.return_each_element: return self.aggregate_elements_to_chunks(elements) else: return [ Document(page_content=chunk["content"], metadata=chunk["metadata"]) for chunk in elements ] # should be in newer Python versions (3.10+) # @dataclass(frozen=True, kw_only=True, slots=True) @dataclass(frozen=True) class Tokenizer: """Tokenizer data class.""" chunk_overlap: int """Overlap in tokens between chunks""" tokens_per_chunk: int """Maximum number of tokens per chunk""" decode: Callable[[List[int]], str] """ Function to decode a list of token ids to a string""" encode: Callable[[str], List[int]] """ Function to encode a string to a list of token ids""" def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]: """Split incoming text and return chunks using tokenizer.""" splits: List[str] = [] input_ids = tokenizer.encode(text) start_idx = 0 cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] while start_idx < len(input_ids): splits.append(tokenizer.decode(chunk_ids)) start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] return splits class TokenTextSplitter(TextSplitter): """Splitting text to tokens using model tokenizer.""" def __init__( self, encoding_name: str = "gpt2", model_name: Optional[str] = None, allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), disallowed_special: Union[Literal["all"], Collection[str]] = "all", **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for TokenTextSplitter. " "Please install it with `pip install tiktoken`." ) if model_name is not None: enc = tiktoken.encoding_for_model(model_name) else: enc = tiktoken.get_encoding(encoding_name) self._tokenizer = enc self._allowed_special = allowed_special self._disallowed_special = disallowed_special def split_text(self, text: str) -> List[str]: def _encode(_text: str) -> List[int]: return self._tokenizer.encode( _text, allowed_special=self._allowed_special, disallowed_special=self._disallowed_special, ) tokenizer = Tokenizer( chunk_overlap=self._chunk_overlap, tokens_per_chunk=self._chunk_size, decode=self._tokenizer.decode, encode=_encode, ) return split_text_on_tokens(text=text, tokenizer=tokenizer) class SentenceTransformersTokenTextSplitter(TextSplitter): """Splitting text to tokens using sentence model tokenizer.""" def __init__( self, chunk_overlap: int = 50, model_name: str = "sentence-transformers/all-mpnet-base-v2", tokens_per_chunk: Optional[int] = None, **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs, chunk_overlap=chunk_overlap) try: from sentence_transformers import SentenceTransformer except ImportError: raise ImportError( "Could not import sentence_transformer python package. " "This is needed in order to for SentenceTransformersTokenTextSplitter. " "Please install it with `pip install sentence-transformers`." ) self.model_name = model_name self._model = SentenceTransformer(self.model_name) self.tokenizer = self._model.tokenizer self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk) def _initialize_chunk_configuration( self, *, tokens_per_chunk: Optional[int] ) -> None: self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length) if tokens_per_chunk is None: self.tokens_per_chunk = self.maximum_tokens_per_chunk else: self.tokens_per_chunk = tokens_per_chunk if self.tokens_per_chunk > self.maximum_tokens_per_chunk: raise ValueError( f"The token limit of the models '{self.model_name}'" f" is: {self.maximum_tokens_per_chunk}." f" Argument tokens_per_chunk={self.tokens_per_chunk}" f" > maximum token limit." ) def split_text(self, text: str) -> List[str]: def encode_strip_start_and_stop_token_ids(text: str) -> List[int]: return self._encode(text)[1:-1] tokenizer = Tokenizer( chunk_overlap=self._chunk_overlap, tokens_per_chunk=self.tokens_per_chunk, decode=self.tokenizer.decode, encode=encode_strip_start_and_stop_token_ids, ) return split_text_on_tokens(text=text, tokenizer=tokenizer) def count_tokens(self, *, text: str) -> int: return len(self._encode(text)) _max_length_equal_32_bit_integer: int = 2**32 def _encode(self, text: str) -> List[int]: token_ids_with_start_and_end_token_ids = self.tokenizer.encode( text, max_length=self._max_length_equal_32_bit_integer, truncation="do_not_truncate", ) return token_ids_with_start_and_end_token_ids class Language(str, Enum): """Enum of the programming languages.""" CPP = "cpp" GO = "go" JAVA = "java" KOTLIN = "kotlin" JS = "js" TS = "ts" PHP = "php" PROTO = "proto" PYTHON = "python" RST = "rst" RUBY = "ruby" RUST = "rust" SCALA = "scala" SWIFT = "swift" MARKDOWN = "markdown" LATEX = "latex" HTML = "html" SOL = "sol" CSHARP = "csharp" COBOL = "cobol" class RecursiveCharacterTextSplitter(TextSplitter): """Splitting text by recursively look at characters. Recursively tries to split by different characters to find one that works. """ def __init__( self, separators: Optional[List[str]] = None, keep_separator: bool = True, is_separator_regex: bool = False, **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or ["\n\n", "\n", " ", ""] self._is_separator_regex = is_separator_regex def _split_text(self, text: str, separators: List[str]) -> List[str]: """Split incoming text and return chunks.""" final_chunks = [] # Get appropriate separator to use separator = separators[-1] new_separators = [] for i, _s in enumerate(separators): _separator = _s if self._is_separator_regex else re.escape(_s) if _s == "": separator = _s break if re.search(_separator, text): separator = _s new_separators = separators[i + 1 :] break _separator = separator if self._is_separator_regex else re.escape(separator) splits = _split_text_with_regex(text, _separator, self._keep_separator) # Now go merging things, recursively splitting longer texts. _good_splits = [] _separator = "" if self._keep_separator else separator for s in splits: if self._length_function(s) < self._chunk_size: _good_splits.append(s) else: if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) _good_splits = [] if not new_separators: final_chunks.append(s) else: other_info = self._split_text(s, new_separators) final_chunks.extend(other_info) if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) return final_chunks def split_text(self, text: str) -> List[str]: return self._split_text(text, self._separators) @classmethod def from_language( cls, language: Language, **kwargs: Any ) -> RecursiveCharacterTextSplitter: separators = cls.get_separators_for_language(language) return cls(separators=separators, is_separator_regex=True, **kwargs) @staticmethod def get_separators_for_language(language: Language) -> List[str]: if language == Language.CPP: return [ # Split along class definitions "\nclass ", # Split along function definitions "\nvoid ", "\nint ", "\nfloat ", "\ndouble ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.GO: return [ # Split along function definitions "\nfunc ", "\nvar ", "\nconst ", "\ntype ", # Split along control flow statements "\nif ", "\nfor ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.JAVA: return [ # Split along class definitions "\nclass ", # Split along method definitions "\npublic ", "\nprotected ", "\nprivate ", "\nstatic ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.KOTLIN: return [ # Split along class definitions "\nclass ", # Split along method definitions "\npublic ", "\nprotected ", "\nprivate ", "\ninternal ", "\ncompanion ", "\nfun ", "\nval ", "\nvar ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nwhen ", "\ncase ", "\nelse ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.JS: return [ # Split along function definitions "\nfunction ", "\nconst ", "\nlet ", "\nvar ", "\nclass ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", "\ndefault ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.TS: return [ "\nenum ", "\ninterface ", "\nnamespace ", "\ntype ", # Split along class definitions "\nclass ", # Split along function definitions "\nfunction ", "\nconst ", "\nlet ", "\nvar ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", "\ndefault ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PHP: return [ # Split along function definitions "\nfunction ", # Split along class definitions "\nclass ", # Split along control flow statements "\nif ", "\nforeach ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PROTO: return [ # Split along message definitions "\nmessage ", # Split along service definitions "\nservice ", # Split along enum definitions "\nenum ", # Split along option definitions "\noption ", # Split along import statements "\nimport ", # Split along syntax declarations "\nsyntax ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PYTHON: return [ # First, try to split along class definitions "\nclass ", "\ndef ", "\n\tdef ", # Now split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RST: return [ # Split along section titles "\n=+\n", "\n-+\n", "\n\\*+\n", # Split along directive markers "\n\n.. *\n\n", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RUBY: return [ # Split along method definitions "\ndef ", "\nclass ", # Split along control flow statements "\nif ", "\nunless ", "\nwhile ", "\nfor ", "\ndo ", "\nbegin ", "\nrescue ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RUST: return [ # Split along function definitions "\nfn ", "\nconst ", "\nlet ", # Split along control flow statements "\nif ", "\nwhile ", "\nfor ", "\nloop ", "\nmatch ", "\nconst ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.SCALA: return [ # Split along class definitions "\nclass ", "\nobject ", # Split along method definitions "\ndef ", "\nval ", "\nvar ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nmatch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.SWIFT: return [ # Split along function definitions "\nfunc ", # Split along class definitions "\nclass ", "\nstruct ", "\nenum ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.MARKDOWN: return [ # First, try to split along Markdown headings (starting with level 2) "\n#{1,6} ", # Note the alternative syntax for headings (below) is not handled here # Heading level 2 # --------------- # End of code block "```\n", # Horizontal lines "\n\\*\\*\\*+\n", "\n---+\n", "\n___+\n", # Note that this splitter doesn't handle horizontal lines defined # by *three or more* of ***, ---, or ___, but this is not handled "\n\n", "\n", " ", "", ] elif language == Language.LATEX: return [ # First, try to split along Latex sections "\n\\\\chapter{", "\n\\\\section{", "\n\\\\subsection{", "\n\\\\subsubsection{", # Now split by environments "\n\\\\begin{enumerate}", "\n\\\\begin{itemize}", "\n\\\\begin{description}", "\n\\\\begin{list}", "\n\\\\begin{quote}", "\n\\\\begin{quotation}", "\n\\\\begin{verse}", "\n\\\\begin{verbatim}", # Now split by math environments "\n\\\begin{align}", "$$", "$", # Now split by the normal type of lines " ", "", ] elif language == Language.HTML: return [ # First, try to split along HTML tags "<body", "<div", "<p", "<br", "<li", "<h1", "<h2", "<h3", "<h4", "<h5", "<h6", "<span", "<table", "<tr", "<td", "<th", "<ul", "<ol", "<header", "<footer", "<nav", # Head "<head", "<style", "<script", "<meta", "<title", "", ] elif language == Language.CSHARP: return [ "\ninterface ", "\nenum ", "\nimplements ", "\ndelegate ", "\nevent ", # Split along class definitions "\nclass ", "\nabstract ", # Split along method definitions "\npublic ", "\nprotected ", "\nprivate ", "\nstatic ", "\nreturn ", # Split along control flow statements "\nif ", "\ncontinue ", "\nfor ", "\nforeach ", "\nwhile ", "\nswitch ", "\nbreak ", "\ncase ", "\nelse ", # Split by exceptions "\ntry ", "\nthrow ", "\nfinally ", "\ncatch ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.SOL: return [ # Split along compiler information definitions "\npragma ", "\nusing ", # Split along contract definitions "\ncontract ", "\ninterface ", "\nlibrary ", # Split along method definitions "\nconstructor ", "\ntype ", "\nfunction ", "\nevent ", "\nmodifier ", "\nerror ", "\nstruct ", "\nenum ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo while ", "\nassembly ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.COBOL: return [ # Split along divisions "\nIDENTIFICATION DIVISION.", "\nENVIRONMENT DIVISION.", "\nDATA DIVISION.", "\nPROCEDURE DIVISION.", # Split along sections within DATA DIVISION "\nWORKING-STORAGE SECTION.", "\nLINKAGE SECTION.", "\nFILE SECTION.", # Split along sections within PROCEDURE DIVISION "\nINPUT-OUTPUT SECTION.", # Split along paragraphs and common statements "\nOPEN ", "\nCLOSE ", "\nREAD ", "\nWRITE ", "\nIF ", "\nELSE ", "\nMOVE ", "\nPERFORM ", "\nUNTIL ", "\nVARYING ", "\nACCEPT ", "\nDISPLAY ", "\nSTOP RUN.", # Split by the normal type of lines "\n", " ", "", ] else: raise ValueError( f"Language {language} is not supported! " f"Please choose from {list(Language)}" ) class NLTKTextSplitter(TextSplitter): """Splitting text using NLTK package.""" def __init__( self, separator: str = "\n\n", language: str = "english", **kwargs: Any ) -> None: """Initialize the NLTK splitter.""" super().__init__(**kwargs) try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( "NLTK is not installed, please install it with `pip install nltk`." ) self._separator = separator self._language = language def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" # First we naively split the large input into a bunch of smaller ones. splits = self._tokenizer(text, language=self._language) return self._merge_splits(splits, self._separator) class SpacyTextSplitter(TextSplitter): """Splitting text using Spacy package. Per default, Spacy's `en_core_web_sm` model is used. For a faster, but potentially less accurate splitting, you can use `pipeline='sentencizer'`. """ def __init__( self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any ) -> None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline) self._separator = separator def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" splits = (s.text for s in self._tokenizer(text).sents) return self._merge_splits(splits, self._separator) # For backwards compatibility class PythonCodeTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Python syntax.""" def __init__(self, **kwargs: Any) -> None: """Initialize a PythonCodeTextSplitter.""" separators = self.get_separators_for_language(Language.PYTHON) super().__init__(separators=separators, **kwargs) class MarkdownTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Markdown-formatted headings.""" def __init__(self, **kwargs: Any) -> None: """Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs) class LatexTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Latex-formatted layout elements.""" def __init__(self, **kwargs: Any) -> None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
[ "\n" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~agents~format_scratchpad~test_xml.py
from langchain_core.schema.agent import AgentAction from langchain.agents.format_scratchpad.xml import format_xml def test_single_agent_action_observation() -> None: # Arrange agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1") observation = "Observation1" intermediate_steps = [(agent_action, observation)] # Act result = format_xml(intermediate_steps) expected_result = """<tool>Tool1</tool><tool_input>Input1\ </tool_input><observation>Observation1</observation>""" # Assert assert result == expected_result def test_multiple_agent_actions_observations() -> None: # Arrange agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1") agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2") observation1 = "Observation1" observation2 = "Observation2" intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)] # Act result = format_xml(intermediate_steps) # Assert expected_result = """<tool>Tool1</tool><tool_input>Input1\ </tool_input><observation>Observation1</observation><tool>\ Tool2</tool><tool_input>Input2</tool_input><observation>\ Observation2</observation>""" assert result == expected_result def test_empty_list_agent_actions() -> None: result = format_xml([]) assert result == ""
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~chat_models~test_baichuan.py
import pytest from langchain_core.pydantic_v1 import SecretStr from langchain_core.schema.messages import ( AIMessage, AIMessageChunk, ChatMessage, FunctionMessage, HumanMessage, HumanMessageChunk, SystemMessage, ) from langchain.chat_models.baichuan import ( _convert_delta_to_message_chunk, _convert_dict_to_message, _convert_message_to_dict, _signature, ) def test__convert_message_to_dict_human() -> None: message = HumanMessage(content="foo") result = _convert_message_to_dict(message) expected_output = {"role": "user", "content": "foo"} assert result == expected_output def test__convert_message_to_dict_ai() -> None: message = AIMessage(content="foo") result = _convert_message_to_dict(message) expected_output = {"role": "assistant", "content": "foo"} assert result == expected_output def test__convert_message_to_dict_system() -> None: message = SystemMessage(content="foo") with pytest.raises(TypeError) as e: _convert_message_to_dict(message) assert "Got unknown type" in str(e) def test__convert_message_to_dict_function() -> None: message = FunctionMessage(name="foo", content="bar") with pytest.raises(TypeError) as e: _convert_message_to_dict(message) assert "Got unknown type" in str(e) def test__convert_dict_to_message_human() -> None: message_dict = {"role": "user", "content": "foo"} result = _convert_dict_to_message(message_dict) expected_output = HumanMessage(content="foo") assert result == expected_output def test__convert_dict_to_message_ai() -> None: message_dict = {"role": "assistant", "content": "foo"} result = _convert_dict_to_message(message_dict) expected_output = AIMessage(content="foo") assert result == expected_output def test__convert_dict_to_message_other_role() -> None: message_dict = {"role": "system", "content": "foo"} result = _convert_dict_to_message(message_dict) expected_output = ChatMessage(role="system", content="foo") assert result == expected_output def test__convert_delta_to_message_assistant() -> None: delta = {"role": "assistant", "content": "foo"} result = _convert_delta_to_message_chunk(delta, AIMessageChunk) expected_output = AIMessageChunk(content="foo") assert result == expected_output def test__convert_delta_to_message_human() -> None: delta = {"role": "user", "content": "foo"} result = _convert_delta_to_message_chunk(delta, HumanMessageChunk) expected_output = HumanMessageChunk(content="foo") assert result == expected_output def test__signature() -> None: secret_key = SecretStr("YOUR_SECRET_KEY") result = _signature( secret_key=secret_key, payload={ "model": "Baichuan2-53B", "messages": [{"role": "user", "content": "Hi"}], }, timestamp=1697734335, ) # The signature was generated by the demo provided by Baichuan. # https://platform.baichuan-ai.com/docs/api#4 expected_output = "24a50b2db1648e25a244c67c5ab57d3f" assert result == expected_output
[ "bar", "foo", "Hi" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~hippo.py
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document if TYPE_CHECKING: from transwarp_hippo_api.hippo_client import HippoClient # Default connection DEFAULT_HIPPO_CONNECTION = { "host": "localhost", "port": "7788", "username": "admin", "password": "admin", } logger = logging.getLogger(__name__) class Hippo(VectorStore): """`Hippo` vector store. You need to install `hippo-api` and run Hippo. Please visit our official website for how to run a Hippo instance: https://www.transwarp.cn/starwarp Args: embedding_function (Embeddings): Function used to embed the text. table_name (str): Which Hippo table to use. Defaults to "test". database_name (str): Which Hippo database to use. Defaults to "default". number_of_shards (int): The number of shards for the Hippo table.Defaults to 1. number_of_replicas (int): The number of replicas for the Hippo table.Defaults to 1. connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. index_params (Optional[dict]): Which index params to use. Defaults to IVF_FLAT. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. primary_field (str): Name of the primary key field. Defaults to "pk". text_field (str): Name of the text field. Defaults to "text". vector_field (str): Name of the vector field. Defaults to "vector". The connection args used for this class comes in the form of a dict, here are a few of the options: host (str): The host of Hippo instance. Default at "localhost". port (str/int): The port of Hippo instance. Default at 7788. user (str): Use which user to connect to Hippo instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. Example: .. code-block:: python from langchain.vectorstores import Hippo from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a hippo instance on localhost vector_store = Hippo.from_documents( docs, embedding=embeddings, table_name="langchain_test", connection_args=HIPPO_CONNECTION ) Raises: ValueError: If the hippo-api python package is not installed. """ def __init__( self, embedding_function: Embeddings, table_name: str = "test", database_name: str = "default", number_of_shards: int = 1, number_of_replicas: int = 1, connection_args: Optional[Dict[str, Any]] = None, index_params: Optional[dict] = None, drop_old: Optional[bool] = False, ): self.number_of_shards = number_of_shards self.number_of_replicas = number_of_replicas self.embedding_func = embedding_function self.table_name = table_name self.database_name = database_name self.index_params = index_params # In order for a collection to be compatible, # 'pk' should be an auto-increment primary key and string self._primary_field = "pk" # In order for compatibility, the text field will need to be called "text" self._text_field = "text" # In order for compatibility, the vector field needs to be called "vector" self._vector_field = "vector" self.fields: List[str] = [] # Create the connection to the server if connection_args is None: connection_args = DEFAULT_HIPPO_CONNECTION self.hc = self._create_connection_alias(connection_args) self.col: Any = None # If the collection exists, delete it try: if ( self.hc.check_table_exists(self.table_name, self.database_name) and drop_old ): self.hc.delete_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while deleting the table " f"{self.table_name}: {e}" ) raise try: if self.hc.check_table_exists(self.table_name, self.database_name): self.col = self.hc.get_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while getting the table " f"{self.table_name}: {e}" ) raise # Initialize the vector database self._get_env() def _create_connection_alias(self, connection_args: dict) -> HippoClient: """Create the connection to the Hippo server.""" # Grab the connection arguments that are used for checking existing connection try: from transwarp_hippo_api.hippo_client import HippoClient except ImportError as e: raise ImportError( "Unable to import transwarp_hipp_api, please install with " "`pip install hippo-api`." ) from e host: str = connection_args.get("host", None) port: int = connection_args.get("port", None) username: str = connection_args.get("username", "shiva") password: str = connection_args.get("password", "shiva") # Order of use is host/port, uri, address if host is not None and port is not None: if "," in host: hosts = host.split(",") given_address = ",".join([f"{h}:{port}" for h in hosts]) else: given_address = str(host) + ":" + str(port) else: raise ValueError("Missing standard address type for reuse attempt") try: logger.info(f"create HippoClient[{given_address}]") return HippoClient([given_address], username=username, pwd=password) except Exception as e: logger.error("Failed to create new connection") raise e def _get_env( self, embeddings: Optional[list] = None, metadatas: Optional[List[dict]] = None ) -> None: logger.info("init ...") if embeddings is not None: logger.info("create collection") self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() def _create_collection( self, embeddings: list, metadatas: Optional[List[dict]] = None ) -> None: from transwarp_hippo_api.hippo_client import HippoField from transwarp_hippo_api.hippo_type import HippoType # Determine embedding dim dim = len(embeddings[0]) logger.debug(f"[_create_collection] dim: {dim}") fields = [] # Create the primary key field fields.append(HippoField(self._primary_field, True, HippoType.STRING)) # Create the text field fields.append(HippoField(self._text_field, False, HippoType.STRING)) # Create the vector field, supports binary or float vectors # to The binary vector type is to be developed. fields.append( HippoField( self._vector_field, False, HippoType.FLOAT_VECTOR, type_params={"dimension": dim}, ) ) # to In Hippo,there is no method similar to the infer_type_data # types, so currently all non-vector data is converted to string type. if metadatas: # # Create FieldSchema for each entry in metadata. for key, value in metadatas[0].items(): # # Infer the corresponding datatype of the metadata if isinstance(value, list): value_dim = len(value) fields.append( HippoField( key, False, HippoType.FLOAT_VECTOR, type_params={"dimension": value_dim}, ) ) else: fields.append(HippoField(key, False, HippoType.STRING)) logger.debug(f"[_create_collection] fields: {fields}") # Create the collection self.hc.create_table( name=self.table_name, auto_id=True, fields=fields, database_name=self.database_name, number_of_shards=self.number_of_shards, number_of_replicas=self.number_of_replicas, ) self.col = self.hc.get_table(self.table_name, self.database_name) logger.info( f"[_create_collection] : " f"create table {self.table_name} in {self.database_name} successfully" ) def _extract_fields(self) -> None: """Grab the existing fields from the Collection""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): schema = self.col.schema logger.debug(f"[_extract_fields] schema:{schema}") for x in schema: self.fields.append(x.name) logger.debug(f"04 [_extract_fields] fields:{self.fields}") # TO CAN: Translated into English, your statement would be: "Currently, # only the field named 'vector' (the automatically created vector field) # is checked for indexing. Indexes need to be created manually for other # vector type columns. def _get_index(self) -> Optional[Dict[str, Any]]: """Return the vector index information if it exists""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): table_info = self.hc.get_table_info( self.table_name, self.database_name ).get(self.table_name, {}) embedding_indexes = table_info.get("embedding_indexes", None) if embedding_indexes is None: return None else: for x in self.hc.get_table_info(self.table_name, self.database_name)[ self.table_name ]["embedding_indexes"]: logger.debug(f"[_get_index] embedding_indexes {embedding_indexes}") if x["column"] == self._vector_field: return x return None # TO Indexes can only be created for the self._vector_field field. def _create_index(self) -> None: """Create a index on the collection""" from transwarp_hippo_api.hippo_client import HippoTable from transwarp_hippo_api.hippo_type import IndexType, MetricType if isinstance(self.col, HippoTable) and self._get_index() is None: if self._get_index() is None: if self.index_params is None: self.index_params = { "index_name": "langchain_auto_create", "metric_type": MetricType.L2, "index_type": IndexType.IVF_FLAT, "nlist": 10, } self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params["nlist"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) logger.info("create index successfully") else: index_dict = { "IVF_FLAT": IndexType.IVF_FLAT, "FLAT": IndexType.FLAT, "IVF_SQ": IndexType.IVF_SQ, "IVF_PQ": IndexType.IVF_PQ, "HNSW": IndexType.HNSW, } metric_dict = { "ip": MetricType.IP, "IP": MetricType.IP, "l2": MetricType.L2, "L2": MetricType.L2, } self.index_params["metric_type"] = metric_dict[ self.index_params["metric_type"] ] if self.index_params["index_type"] == "FLAT": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif ( self.index_params["index_type"] == "IVF_FLAT" or self.index_params["index_type"] == "IVF_SQ" ): self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "IVF_PQ": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), nbits=self.index_params.get("nbits", 8), m=self.index_params.get("m"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "HNSW": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], M=self.index_params.get("M"), ef_construction=self.index_params.get("ef_construction"), ef_search=self.index_params.get("ef_search"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) else: raise ValueError( "Index name does not match, " "please enter the correct index name. " "(FLAT, IVF_FLAT, IVF_PQ,IVF_SQ, HNSW)" ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """ Add text to the collection. Args: texts: An iterable that contains the text to be added. metadatas: An optional list of dictionaries, each dictionary contains the metadata associated with a text. timeout: Optional timeout, in seconds. batch_size: The number of texts inserted in each batch, defaults to 1000. **kwargs: Other optional parameters. Returns: A list of strings, containing the unique identifiers of the inserted texts. Note: If the collection has not yet been created, this method will create a new collection. """ from transwarp_hippo_api.hippo_client import HippoTable if not texts or all(t == "" for t in texts): logger.debug("Nothing to insert, skipping.") return [] texts = list(texts) logger.debug(f"[add_texts] texts: {texts}") try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] logger.debug(f"[add_texts] len_embeddings:{len(embeddings)}") # 如果还没有创建collection则创建collection if not isinstance(self.col, HippoTable): self._get_env(embeddings, metadatas) # Dict to hold all insert columns insert_dict: Dict[str, list] = { self._text_field: texts, self._vector_field: embeddings, } logger.debug(f"[add_texts] metadatas:{metadatas}") logger.debug(f"[add_texts] fields:{self.fields}") if metadatas is not None: for d in metadatas: for key, value in d.items(): if key in self.fields: insert_dict.setdefault(key, []).append(value) logger.debug(insert_dict[self._text_field]) # Total insert count vectors: list = insert_dict[self._vector_field] total_count = len(vectors) if "pk" in self.fields: self.fields.remove("pk") logger.debug(f"[add_texts] total_count:{total_count}") for i in range(0, total_count, batch_size): # Grab end index end = min(i + batch_size, total_count) # Convert dict to list of lists batch for insertion insert_list = [insert_dict[x][i:end] for x in self.fields] try: res = self.col.insert_rows(insert_list) logger.info(f"05 [add_texts] insert {res}") except Exception as e: logger.error( "Failed to insert batch starting at entity: %s/%s", i, total_count ) raise e return [""] def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """ Perform a similarity search on the query string. Args: query (str): The text to search for. k (int, optional): The number of results to return. Default is 4. param (dict, optional): Specifies the search parameters for the index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): Time to wait before a timeout error. Defaults to None. kwargs: Keyword arguments for Collection.search(). Returns: List[Document]: The document results of the search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug("No existing collection to search.") return [] # Embed the query text. embedding = self.embedding_func.embed_query(query) ret = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return ret def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[Tuple[Document, float]]: Resulting documents and scores. """ if self.col is None: logger.debug("No existing collection to search.") return [] # if param is None: # param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. logger.debug(f"search_field:{self._vector_field}") logger.debug(f"vectors:{[embedding]}") logger.debug(f"output_fields:{output_fields}") logger.debug(f"topk:{k}") logger.debug(f"dsl:{expr}") res = self.col.query( search_field=self._vector_field, vectors=[embedding], output_fields=output_fields, topk=k, dsl=expr, ) # Organize results. logger.debug(f"[similarity_search_with_score_by_vector] res:{res}") score_col = self._text_field + "%scores" ret = [] count = 0 for items in zip(*[res[0][field] for field in output_fields]): meta = {field: value for field, value in zip(output_fields, items)} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) logger.debug( f"[similarity_search_with_score_by_vector] " f"res[0][score_col]:{res[0][score_col]}" ) score = res[0][score_col][count] count += 1 ret.append((doc, score)) return ret @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table_name: str = "test", database_name: str = "default", connection_args: Dict[str, Any] = DEFAULT_HIPPO_CONNECTION, index_params: Optional[Dict[Any, Any]] = None, search_params: Optional[Dict[str, Any]] = None, drop_old: bool = False, **kwargs: Any, ) -> "Hippo": """ Creates an instance of the VST class from the given texts. Args: texts (List[str]): List of texts to be added. embedding (Embeddings): Embedding model for the texts. metadatas (List[dict], optional): List of metadata dictionaries for each text.Defaults to None. table_name (str): Name of the table. Defaults to "test". database_name (str): Name of the database. Defaults to "default". connection_args (dict[str, Any]): Connection parameters. Defaults to DEFAULT_HIPPO_CONNECTION. index_params (dict): Indexing parameters. Defaults to None. search_params (dict): Search parameters. Defaults to an empty dictionary. drop_old (bool): Whether to drop the old collection. Defaults to False. kwargs: Other arguments. Returns: Hippo: An instance of the VST class. """ if search_params is None: search_params = {} logger.info("00 [from_texts] init the class of Hippo") vector_db = cls( embedding_function=embedding, table_name=table_name, database_name=database_name, connection_args=connection_args, index_params=index_params, drop_old=drop_old, **kwargs, ) logger.debug(f"[from_texts] texts:{texts}") logger.debug(f"[from_texts] metadatas:{metadatas}") vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~gigachat.py
from __future__ import annotations import logging from functools import cached_property from typing import Any, AsyncIterator, Dict, Iterator, List, Optional from langchain_core.load.serializable import Serializable from langchain_core.pydantic_v1 import root_validator from langchain_core.schema.output import Generation, GenerationChunk, LLMResult from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM logger = logging.getLogger(__name__) class _BaseGigaChat(Serializable): base_url: Optional[str] = None """ Base API URL """ auth_url: Optional[str] = None """ Auth URL """ credentials: Optional[str] = None """ Auth Token """ scope: Optional[str] = None """ Permission scope for access token """ access_token: Optional[str] = None """ Access token for GigaChat """ model: Optional[str] = None """Model name to use.""" user: Optional[str] = None """ Username for authenticate """ password: Optional[str] = None """ Password for authenticate """ timeout: Optional[float] = None """ Timeout for request """ verify_ssl_certs: Optional[bool] = None """ Check certificates for all requests """ ca_bundle_file: Optional[str] = None cert_file: Optional[str] = None key_file: Optional[str] = None key_file_password: Optional[str] = None # Support for connection to GigaChat through SSL certificates profanity: bool = True """ Check for profanity """ streaming: bool = False """ Whether to stream the results or not. """ temperature: Optional[float] = None """What sampling temperature to use.""" max_tokens: Optional[int] = None """ Maximum number of tokens to generate """ @property def _llm_type(self) -> str: return "giga-chat-model" @property def lc_secrets(self) -> Dict[str, str]: return { "credentials": "GIGACHAT_CREDENTIALS", "access_token": "GIGACHAT_ACCESS_TOKEN", "password": "GIGACHAT_PASSWORD", "key_file_password": "GIGACHAT_KEY_FILE_PASSWORD", } @property def lc_serializable(self) -> bool: return True @cached_property def _client(self) -> Any: """Returns GigaChat API client""" import gigachat return gigachat.GigaChat( base_url=self.base_url, auth_url=self.auth_url, credentials=self.credentials, scope=self.scope, access_token=self.access_token, model=self.model, user=self.user, password=self.password, timeout=self.timeout, verify_ssl_certs=self.verify_ssl_certs, ca_bundle_file=self.ca_bundle_file, cert_file=self.cert_file, key_file=self.key_file, key_file_password=self.key_file_password, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate authenticate data in environment and python package is installed.""" try: import gigachat # noqa: F401 except ImportError: raise ImportError( "Could not import gigachat python package. " "Please install it with `pip install gigachat`." ) return values @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "temperature": self.temperature, "model": self.model, "profanity": self.profanity, "streaming": self.streaming, "max_tokens": self.max_tokens, } class GigaChat(_BaseGigaChat, BaseLLM): """`GigaChat` large language models API. To use, you should pass login and password to access GigaChat API or use token. Example: .. code-block:: python from langchain.llms import GigaChat giga = GigaChat(credentials=..., verify_ssl_certs=False) """ def _build_payload(self, messages: List[str]) -> Dict[str, Any]: payload: Dict[str, Any] = { "messages": [{"role": "user", "content": m} for m in messages], "profanity_check": self.profanity, } if self.temperature is not None: payload["temperature"] = self.temperature if self.max_tokens is not None: payload["max_tokens"] = self.max_tokens if self.model: payload["model"] = self.model if self.verbose: logger.info("Giga request: %s", payload) return payload def _create_llm_result(self, response: Any) -> LLMResult: generations = [] for res in response.choices: finish_reason = res.finish_reason gen = Generation( text=res.message.content, generation_info={"finish_reason": finish_reason}, ) generations.append([gen]) if finish_reason != "stop": logger.warning( "Giga generation stopped with reason: %s", finish_reason, ) if self.verbose: logger.info("Giga response: %s", res.message.content) token_usage = response.usage llm_output = {"token_usage": token_usage, "model_name": response.model} return LLMResult(generations=generations, llm_output=llm_output) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._stream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = self._client.chat(payload) return self._create_llm_result(response) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._astream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) async for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = await self._client.achat(payload) return self._create_llm_result(response) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: payload = self._build_payload([prompt]) for chunk in self._client.stream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: run_manager.on_llm_new_token(content) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: payload = self._build_payload([prompt]) async for chunk in self._client.astream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: await run_manager.on_llm_new_token(content) def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" return round(len(text) / 4.6)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~graph_qa~cypher.py
"""Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import Field from langchain_core.schema import BasePromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.graph_store import GraphStore INTERMEDIATE_STEPS_KEY = "intermediate_steps" def extract_cypher(text: str) -> str: """Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text def construct_schema( structured_schema: Dict[str, Any], include_types: List[str], exclude_types: List[str], ) -> str: """Filter the schema based on included or excluded types""" def filter_func(x: str) -> bool: return x in include_types if include_types else x not in exclude_types filtered_schema = { "node_props": { k: v for k, v in structured_schema.get("node_props", {}).items() if filter_func(k) }, "rel_props": { k: v for k, v in structured_schema.get("rel_props", {}).items() if filter_func(k) }, "relationships": [ r for r in structured_schema.get("relationships", []) if all(filter_func(r[t]) for t in ["start", "end", "type"]) ], } return ( f"Node properties are the following: \n {filtered_schema['node_props']}\n" f"Relationships properties are the following: \n {filtered_schema['rel_props']}" "\nRelationships are: \n" + str( [ f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in filtered_schema["relationships"] ] ) ) class GraphCypherQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: GraphStore = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain graph_schema: str input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" cypher_query_corrector: Optional[CypherQueryCorrector] = None """Optional cypher validation tool""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" @classmethod def from_llm( cls, llm: Optional[BaseLanguageModel] = None, *, qa_prompt: Optional[BasePromptTemplate] = None, cypher_prompt: Optional[BasePromptTemplate] = None, cypher_llm: Optional[BaseLanguageModel] = None, qa_llm: Optional[BaseLanguageModel] = None, exclude_types: List[str] = [], include_types: List[str] = [], validate_cypher: bool = False, qa_llm_kwargs: Optional[Dict[str, Any]] = None, cypher_llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> GraphCypherQAChain: """Initialize from LLM.""" if not cypher_llm and not llm: raise ValueError("Either `llm` or `cypher_llm` parameters must be provided") if not qa_llm and not llm: raise ValueError("Either `llm` or `qa_llm` parameters must be provided") if cypher_llm and qa_llm and llm: raise ValueError( "You can specify up to two of 'cypher_llm', 'qa_llm'" ", and 'llm', but not all three simultaneously." ) if cypher_prompt and cypher_llm_kwargs: raise ValueError( "Specifying cypher_prompt and cypher_llm_kwargs together is" " not allowed. Please pass prompt via cypher_llm_kwargs." ) if qa_prompt and qa_llm_kwargs: raise ValueError( "Specifying qa_prompt and qa_llm_kwargs together is" " not allowed. Please pass prompt via qa_llm_kwargs." ) use_qa_llm_kwargs = qa_llm_kwargs if qa_llm_kwargs is not None else {} use_cypher_llm_kwargs = ( cypher_llm_kwargs if cypher_llm_kwargs is not None else {} ) if "prompt" not in use_qa_llm_kwargs: use_qa_llm_kwargs["prompt"] = ( qa_prompt if qa_prompt is not None else CYPHER_QA_PROMPT ) if "prompt" not in use_cypher_llm_kwargs: use_cypher_llm_kwargs["prompt"] = ( cypher_prompt if cypher_prompt is not None else CYPHER_GENERATION_PROMPT ) qa_chain = LLMChain(llm=qa_llm or llm, **use_qa_llm_kwargs) cypher_generation_chain = LLMChain( llm=cypher_llm or llm, **use_cypher_llm_kwargs ) if exclude_types and include_types: raise ValueError( "Either `exclude_types` or `include_types` " "can be provided, but not both" ) graph_schema = construct_schema( kwargs["graph"].get_structured_schema, include_types, exclude_types ) cypher_query_corrector = None if validate_cypher: corrector_schema = [ Schema(el["start"], el["type"], el["end"]) for el in kwargs["graph"].structured_schema.get("relationships") ] cypher_query_corrector = CypherQueryCorrector(corrector_schema) return cls( graph_schema=graph_schema, qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, cypher_query_corrector=cypher_query_corrector, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) # Correct Cypher query if enabled if self.cypher_query_corrector: generated_cypher = self.cypher_query_corrector(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results # Generated Cypher be null if query corrector identifies invalid schema if generated_cypher: context = self.graph.query(generated_cypher)[: self.top_k] else: context = [] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~pai_eas_endpoint.py
import json import logging from typing import Any, Dict, Iterator, List, Mapping, Optional import requests from langchain_core.pydantic_v1 import root_validator from langchain_core.schema.output import GenerationChunk from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class PaiEasEndpoint(LLM): """Langchain LLM class to help to access eass llm service. To use this endpoint, must have a deployed eas chat llm service on PAI AliCloud. One can set the environment variable ``eas_service_url`` and ``eas_service_token``. The environment variables can set with your eas service url and service token. Example: .. code-block:: python from langchain.llms.pai_eas_endpoint import PaiEasEndpoint eas_chat_endpoint = PaiEasChatEndpoint( eas_service_url="your_service_url", eas_service_token="your_service_token" ) """ """PAI-EAS Service URL""" eas_service_url: str """PAI-EAS Service TOKEN""" eas_service_token: str """PAI-EAS Service Infer Params""" max_new_tokens: Optional[int] = 512 temperature: Optional[float] = 0.95 top_p: Optional[float] = 0.1 top_k: Optional[int] = 0 stop_sequences: Optional[List[str]] = None """Enable stream chat mode.""" streaming: bool = False """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None version: Optional[str] = "2.0" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["eas_service_url"] = get_from_dict_or_env( values, "eas_service_url", "EAS_SERVICE_URL" ) values["eas_service_token"] = get_from_dict_or_env( values, "eas_service_token", "EAS_SERVICE_TOKEN" ) return values @property def _llm_type(self) -> str: """Return type of llm.""" return "pai_eas_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "stop_sequences": [], } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { "eas_service_url": self.eas_service_url, "eas_service_token": self.eas_service_token, **_model_kwargs, } def _invocation_params( self, stop_sequences: Optional[List[str]], **kwargs: Any ) -> dict: params = self._default_params if self.stop_sequences is not None and stop_sequences is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop_sequences is not None: params["stop"] = self.stop_sequences else: params["stop"] = stop_sequences if self.model_kwargs: params.update(self.model_kwargs) return {**params, **kwargs} @staticmethod def _process_response( response: Any, stop: Optional[List[str]], version: Optional[str] ) -> str: if version == "1.0": text = response else: text = response["response"] if stop: text = enforce_stop_tokens(text, stop) return "".join(text) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: params = self._invocation_params(stop, **kwargs) prompt = prompt.strip() response = None try: if self.streaming: completion = "" for chunk in self._stream(prompt, stop, run_manager, **params): completion += chunk.text return completion else: response = self._call_eas(prompt, params) _stop = params.get("stop") return self._process_response(response, _stop, self.version) except Exception as error: raise ValueError(f"Error raised by the service: {error}") def _call_eas(self, prompt: str = "", params: Dict = {}) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Authorization": f"{self.eas_service_token}", } if self.version == "1.0": body = { "input_ids": f"{prompt}", } else: body = { "prompt": f"{prompt}", } # add params to body for key, value in params.items(): body[key] = value # make request response = requests.post(self.eas_service_url, headers=headers, json=body) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) try: return json.loads(response.text) except Exception as e: if isinstance(e, json.decoder.JSONDecodeError): return response.text raise e def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) headers = { "User-Agent": "Test Client", "Authorization": f"{self.eas_service_token}", } if self.version == "1.0": pload = {"input_ids": prompt, **invocation_params} response = requests.post( self.eas_service_url, headers=headers, json=pload, stream=True ) res = GenerationChunk(text=response.text) if run_manager: run_manager.on_llm_new_token(res.text) # yield text, if any yield res else: pload = {"prompt": prompt, "use_stream_chat": "True", **invocation_params} response = requests.post( self.eas_service_url, headers=headers, json=pload, stream=True ) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: data = json.loads(chunk.decode("utf-8")) output = data["response"] # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in invocation_params["stop"]: if stop_seq in output: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if stop_seq_found: text = output[: output.index(stop_seq_found)] else: text = output # yield text, if any if text: res = GenerationChunk(text=text) yield res if run_manager: run_manager.on_llm_new_token(res.text) # break if stop sequence found if stop_seq_found: break
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~mosaicml.py
from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain_core.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """MosaicML embedding service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"inputs": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: if response.status_code == 429: if not is_retry: import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: rate limit exceeded.\nResponse: " f"{response.text}" ) parsed_response = response.json() # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): output_keys = ["data", "output", "outputs"] for key in output_keys: if key in parsed_response: output_item = parsed_response[key] break else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~in_memory.py
from typing import List from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import BaseMessage class ChatMessageHistory(BaseChatMessageHistory, BaseModel): """In memory implementation of chat message history. Stores messages in an in memory list. """ messages: List[BaseMessage] = Field(default_factory=list) def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) def clear(self) -> None: self.messages = []
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~tools~yahoo_finance_news.py
from typing import Iterable, Optional from langchain_core.schema import Document from requests.exceptions import HTTPError, ReadTimeout from urllib3.exceptions import ConnectionError from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.document_loaders.web_base import WebBaseLoader from langchain.tools.base import BaseTool class YahooFinanceNewsTool(BaseTool): """Tool that searches financial news on Yahoo Finance.""" name: str = "yahoo_finance_news" description: str = ( "Useful for when you need to find financial news " "about a public company. " "Input should be a company ticker. " "For example, AAPL for Apple, MSFT for Microsoft." ) top_k: int = 10 """The number of results to return.""" def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Yahoo Finance News tool.""" try: import yfinance except ImportError: raise ImportError( "Could not import yfinance python package. " "Please install it with `pip install yfinance`." ) company = yfinance.Ticker(query) try: if company.isin is None: return f"Company ticker {query} not found." except (HTTPError, ReadTimeout, ConnectionError): return f"Company ticker {query} not found." links = [] try: links = [n["link"] for n in company.news if n["type"] == "STORY"] except (HTTPError, ReadTimeout, ConnectionError): if not links: return f"No news found for company that searched with {query} ticker." if not links: return f"No news found for company that searched with {query} ticker." loader = WebBaseLoader(web_paths=links) docs = loader.load() result = self._format_results(docs, query) if not result: return f"No news found for company that searched with {query} ticker." return result @staticmethod def _format_results(docs: Iterable[Document], query: str) -> str: doc_strings = [ "\n".join([doc.metadata["title"], doc.metadata["description"]]) for doc in docs if query in doc.metadata["description"] or query in doc.metadata["title"] ] return "\n\n".join(doc_strings)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~example_generator.py
from typing import List from langchain_core.prompts.few_shot import FewShotPromptTemplate from langchain_core.prompts.prompt import PromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.chains.llm import LLMChain TEST_GEN_TEMPLATE_SUFFIX = "Add another example." def generate_example( examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate ) -> str: """Return another example given a list of examples for a prompt.""" prompt = FewShotPromptTemplate( examples=examples, suffix=TEST_GEN_TEMPLATE_SUFFIX, input_variables=[], example_prompt=prompt_template, ) chain = LLMChain(llm=llm, prompt=prompt) return chain.predict()
[ "Add another example." ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~callbacks~tracers~schemas.py
from langchain_core.callbacks.tracers.schemas import ( BaseRun, ChainRun, LLMRun, Run, RunTypeEnum, ToolRun, TracerSession, TracerSessionBase, TracerSessionV1, TracerSessionV1Base, TracerSessionV1Create, ) __all__ = [ "RunTypeEnum", "TracerSessionV1Base", "TracerSessionV1Create", "TracerSessionV1", "TracerSessionBase", "TracerSession", "BaseRun", "LLMRun", "ChainRun", "ToolRun", "Run", ]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~output_parsers~xml.py
from typing import Union from langchain_core.schema import AgentAction, AgentFinish from langchain.agents import AgentOutputParser class XMLAgentOutputParser(AgentOutputParser): """Parses tool invocations and final answers in XML format. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. ``` <tool>search</tool> <tool_input>what is 2 + 2</tool_input> ``` If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. ``` <final_answer>Foo</final_answer> ``` """ def parse(self, text: str) -> Union[AgentAction, AgentFinish]: if "</tool>" in text: tool, tool_input = text.split("</tool>") _tool = tool.split("<tool>")[1] _tool_input = tool_input.split("<tool_input>")[1] if "</tool_input>" in _tool_input: _tool_input = _tool_input.split("</tool_input>")[0] return AgentAction(tool=_tool, tool_input=_tool_input, log=text) elif "<final_answer>" in text: _, answer = text.split("<final_answer>") if "</final_answer>" in answer: answer = answer.split("</final_answer>")[0] return AgentFinish(return_values={"output": answer}, log=text) else: raise ValueError def get_format_instructions(self) -> str: raise NotImplementedError @property def _type(self) -> str: return "xml-agent"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~google_cloud_documentai_warehouse.py
"""Retriever wrapper for Google Cloud Document AI Warehouse.""" from typing import TYPE_CHECKING, Any, Dict, List, Optional from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BaseRetriever from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document from langchain.utilities.vertexai import get_client_info from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from google.cloud.contentwarehouse_v1 import ( DocumentServiceClient, RequestMetadata, SearchDocumentsRequest, ) from google.cloud.contentwarehouse_v1.services.document_service.pagers import ( SearchDocumentsPager, ) class GoogleDocumentAIWarehouseRetriever(BaseRetriever): """A retriever based on Document AI Warehouse. Documents should be created and documents should be uploaded in a separate flow, and this retriever uses only Document AI schema_id provided to search for revelant documents. More info: https://cloud.google.com/document-ai-warehouse. """ location: str = "us" """Google Cloud location where Document AI Warehouse is placed.""" project_number: str """Google Cloud project number, should contain digits only.""" schema_id: Optional[str] = None """Document AI Warehouse schema to query against. If nothing is provided, all documents in the project will be searched.""" qa_size_limit: int = 5 """The limit on the number of documents returned.""" client: "DocumentServiceClient" = None #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validates the environment.""" try: # noqa: F401 from google.cloud.contentwarehouse_v1 import DocumentServiceClient except ImportError as exc: raise ImportError( "google.cloud.contentwarehouse is not installed." "Please install it with pip install google-cloud-contentwarehouse" ) from exc values["project_number"] = get_from_dict_or_env( values, "project_number", "PROJECT_NUMBER" ) values["client"] = DocumentServiceClient( client_info=get_client_info(module="document-ai-warehouse") ) return values def _prepare_request_metadata(self, user_ldap: str) -> "RequestMetadata": from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo user_info = UserInfo(id=f"user:{user_ldap}") return RequestMetadata(user_info=user_info) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any ) -> List[Document]: request = self._prepare_search_request(query, **kwargs) response = self.client.search_documents(request=request) return self._parse_search_response(response=response) def _prepare_search_request( self, query: str, **kwargs: Any ) -> "SearchDocumentsRequest": from google.cloud.contentwarehouse_v1 import ( DocumentQuery, SearchDocumentsRequest, ) try: user_ldap = kwargs["user_ldap"] except KeyError: raise ValueError("Argument user_ldap should be provided!") request_metadata = self._prepare_request_metadata(user_ldap=user_ldap) schemas = [] if self.schema_id: schemas.append( self.client.document_schema_path( project=self.project_number, location=self.location, document_schema=self.schema_id, ) ) return SearchDocumentsRequest( parent=self.client.common_location_path(self.project_number, self.location), request_metadata=request_metadata, document_query=DocumentQuery( query=query, is_nl_query=True, document_schema_names=schemas ), qa_size_limit=self.qa_size_limit, ) def _parse_search_response( self, response: "SearchDocumentsPager" ) -> List[Document]: documents = [] for doc in response.matching_documents: metadata = { "title": doc.document.title, "source": doc.document.raw_document_path, } documents.append( Document(page_content=doc.search_text_snippet, metadata=metadata) ) return documents
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~lancedb.py
from __future__ import annotations import uuid from typing import Any, Iterable, List, Optional from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document class LanceDB(VectorStore): """`LanceDB` vector store. To use, you should have ``lancedb`` python package installed. Example: .. code-block:: python db = lancedb.connect('./lancedb') table = db.open_table('my_table') vectorstore = LanceDB(table, embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') """ def __init__( self, connection: Any, embedding: Embeddings, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", ): """Initialize with Lance DB connection""" try: import lancedb except ImportError: raise ImportError( "Could not import lancedb python package. " "Please install it with `pip install lancedb`." ) if not isinstance(connection, lancedb.db.LanceTable): raise ValueError( "connection should be an instance of lancedb.db.LanceTable, ", f"got {type(connection)}", ) self._connection = connection self._embedding = embedding self._vector_key = vector_key self._id_key = id_key self._text_key = text_key @property def embeddings(self) -> Embeddings: return self._embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Turn texts into embedding and add it to the database Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids of the added texts. """ # Embed texts and create documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embedding.embed_documents(list(texts)) for idx, text in enumerate(texts): embedding = embeddings[idx] metadata = metadatas[idx] if metadatas else {} docs.append( { self._vector_key: embedding, self._id_key: ids[idx], self._text_key: text, **metadata, } ) self._connection.add(docs) return ids def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return documents most similar to the query Args: query: String to query the vectorstore with. k: Number of documents to return. Returns: List of documents most similar to the query. """ embedding = self._embedding.embed_query(query) docs = self._connection.search(embedding).limit(k).to_df() return [ Document( page_content=row[self._text_key], metadata=row[docs.columns != self._text_key], ) for _, row in docs.iterrows() ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection: Any = None, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", **kwargs: Any, ) -> LanceDB: instance = LanceDB( connection, embedding, vector_key, id_key, text_key, ) instance.add_texts(texts, metadatas=metadatas, **kwargs) return instance
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_loaders~whatsapp.py
import logging import os import re import zipfile from typing import Iterator, List, Union from langchain_core.schema import AIMessage, HumanMessage from langchain_core.schema.chat import ChatSession from langchain.chat_loaders.base import BaseChatLoader logger = logging.getLogger(__name__) class WhatsAppChatLoader(BaseChatLoader): """Load `WhatsApp` conversations from a dump zip file or directory.""" def __init__(self, path: str): """Initialize the WhatsAppChatLoader. Args: path (str): Path to the exported WhatsApp chat zip directory, folder, or file. To generate the dump, open the chat, click the three dots in the top right corner, and select "More". Then select "Export chat" and choose "Without media". """ self.path = path ignore_lines = [ "This message was deleted", "<Media omitted>", "image omitted", "Messages and calls are end-to-end encrypted. No one outside of this chat," " not even WhatsApp, can read or listen to them.", ] self._ignore_lines = re.compile( r"(" + "|".join([r"\u200E*" + line for line in ignore_lines]) + r")", flags=re.IGNORECASE, ) self._message_line_regex = re.compile( r"\u200E*\[?(\d{1,2}/\d{1,2}/\d{2,4}, \d{1,2}:\d{2}:\d{2} (?:AM|PM))\]?[ \u200E]*([^:]+): (.+)", # noqa flags=re.IGNORECASE, ) def _load_single_chat_session(self, file_path: str) -> ChatSession: """Load a single chat session from a file. Args: file_path (str): Path to the chat file. Returns: ChatSession: The loaded chat session. """ with open(file_path, "r", encoding="utf-8") as file: txt = file.read() # Split messages by newlines, but keep multi-line messages grouped chat_lines: List[str] = [] current_message = "" for line in txt.split("\n"): if self._message_line_regex.match(line): if current_message: chat_lines.append(current_message) current_message = line else: current_message += " " + line.strip() if current_message: chat_lines.append(current_message) results: List[Union[HumanMessage, AIMessage]] = [] for line in chat_lines: result = self._message_line_regex.match(line.strip()) if result: timestamp, sender, text = result.groups() if not self._ignore_lines.match(text.strip()): results.append( HumanMessage( role=sender, content=text, additional_kwargs={ "sender": sender, "events": [{"message_time": timestamp}], }, ) ) else: logger.debug(f"Could not parse line: {line}") return ChatSession(messages=results) def _iterate_files(self, path: str) -> Iterator[str]: """Iterate over the files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: The path to each file. """ if os.path.isfile(path): yield path elif os.path.isdir(path): for root, _, files in os.walk(path): for file in files: if file.endswith(".txt"): yield os.path.join(root, file) elif zipfile.is_zipfile(path): with zipfile.ZipFile(path) as zip_file: for file in zip_file.namelist(): if file.endswith(".txt"): yield zip_file.extract(file) def lazy_load(self) -> Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them as chat sessions. Yields: Iterator[ChatSession]: The loaded chat sessions. """ yield self._load_single_chat_session(self.path)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~pgvecto_rs.py
from __future__ import annotations import uuid from typing import Any, Iterable, List, Literal, Optional, Tuple, Type import numpy as np import sqlalchemy from langchain_core.schema import Document from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from sqlalchemy import insert, select from sqlalchemy.dialects import postgresql from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column from sqlalchemy.orm.session import Session class _ORMBase(DeclarativeBase): __tablename__: str id: Mapped[uuid.UUID] text: Mapped[str] meta: Mapped[dict] embedding: Mapped[np.ndarray] class PGVecto_rs(VectorStore): _engine: sqlalchemy.engine.Engine _table: Type[_ORMBase] _embedding: Embeddings def __init__( self, embedding: Embeddings, dimension: int, db_url: str, collection_name: str, new_table: bool = False, ) -> None: try: from pgvecto_rs.sqlalchemy import Vector except ImportError as e: raise ImportError( "Unable to import pgvector_rs, please install with " "`pip install pgvector_rs`." ) from e class _Table(_ORMBase): __tablename__ = f"collection_{collection_name}" id: Mapped[uuid.UUID] = mapped_column( postgresql.UUID(as_uuid=True), primary_key=True, default=uuid.uuid4 ) text: Mapped[str] = mapped_column(sqlalchemy.String) meta: Mapped[dict] = mapped_column(postgresql.JSONB) embedding: Mapped[np.ndarray] = mapped_column(Vector(dimension)) self._engine = sqlalchemy.create_engine(db_url) self._table = _Table self._table.__table__.create(self._engine, checkfirst=not new_table) # type: ignore self._embedding = embedding # ================ Create interface ================= @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, db_url: str = "", collection_name: str = str(uuid.uuid4().hex), **kwargs: Any, ) -> PGVecto_rs: """Return VectorStore initialized from texts and optional metadatas.""" sample_embedding = embedding.embed_query("Hello pgvecto_rs!") dimension = len(sample_embedding) if db_url is None: raise ValueError("db_url must be provided") _self: PGVecto_rs = cls( embedding=embedding, dimension=dimension, db_url=db_url, collection_name=collection_name, new_table=True, ) _self.add_texts(texts, metadatas, **kwargs) return _self @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, db_url: str = "", collection_name: str = str(uuid.uuid4().hex), **kwargs: Any, ) -> PGVecto_rs: """Return VectorStore initialized from documents.""" texts = [document.page_content for document in documents] metadatas = [document.metadata for document in documents] return cls.from_texts( texts, embedding, metadatas, db_url, collection_name, **kwargs ) @classmethod def from_collection_name( cls, embedding: Embeddings, db_url: str, collection_name: str, ) -> PGVecto_rs: """Create new empty vectorstore with collection_name. Or connect to an existing vectorstore in database if exists. Arguments should be the same as when the vectorstore was created.""" sample_embedding = embedding.embed_query("Hello pgvecto_rs!") return cls( embedding=embedding, dimension=len(sample_embedding), db_url=db_url, collection_name=collection_name, ) # ================ Insert interface ================= def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids of the added texts. """ embeddings = self._embedding.embed_documents(list(texts)) with Session(self._engine) as _session: results: List[str] = [] for text, embedding, metadata in zip( texts, embeddings, metadatas or [dict()] * len(list(texts)) ): t = insert(self._table).values( text=text, meta=metadata, embedding=embedding ) id = _session.execute(t).inserted_primary_key[0] # type: ignore results.append(str(id)) _session.commit() return results def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]): List of documents to add to the vectorstore. Returns: List of ids of the added documents. """ return self.add_texts( [document.page_content for document in documents], [document.metadata for document in documents], **kwargs, ) # ================ Query interface ================= def similarity_search_with_score_by_vector( self, query_vector: List[float], k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query vector, with its score.""" with Session(self._engine) as _session: real_distance_func = ( self._table.embedding.squared_euclidean_distance if distance_func == "sqrt_euclid" else self._table.embedding.negative_dot_product_distance if distance_func == "neg_dot_prod" else self._table.embedding.negative_cosine_distance if distance_func == "ned_cos" else None ) if real_distance_func is None: raise ValueError("Invalid distance function") t = ( select(self._table, real_distance_func(query_vector).label("score")) .order_by("score") .limit(k) # type: ignore ) return [ (Document(page_content=row[0].text, metadata=row[0].meta), row[1]) for row in _session.execute(t) ] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Document]: return [ doc for doc, score in self.similarity_search_with_score_by_vector( embedding, k, distance_func, **kwargs ) ] def similarity_search_with_score( self, query: str, k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Tuple[Document, float]]: query_vector = self._embedding.embed_query(query) return self.similarity_search_with_score_by_vector( query_vector, k, distance_func, **kwargs ) def similarity_search( self, query: str, k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" query_vector = self._embedding.embed_query(query) return [ doc for doc, score in self.similarity_search_with_score_by_vector( query_vector, k, distance_func, **kwargs ) ]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~vectorstores~qdrant~test_add_texts.py
import uuid from typing import Optional import pytest from langchain_core.schema import Document from langchain.vectorstores import Qdrant from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) @pytest.mark.parametrize("batch_size", [1, 64]) @pytest.mark.parametrize("vector_name", [None, "my-vector"]) def test_qdrant_add_documents_extends_existing_collection( batch_size: int, vector_name: Optional[str] ) -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch: Qdrant = Qdrant.from_texts( texts, ConsistentFakeEmbeddings(), location=":memory:", batch_size=batch_size, vector_name=vector_name, ) new_texts = ["foobar", "foobaz"] docsearch.add_documents( [Document(page_content=content) for content in new_texts], batch_size=batch_size ) output = docsearch.similarity_search("foobar", k=1) # ConsistentFakeEmbeddings return the same query embedding as the first document # embedding computed in `embedding.embed_documents`. Thus, "foo" embedding is the # same as "foobar" embedding assert output == [Document(page_content="foobar")] @pytest.mark.parametrize("batch_size", [1, 64]) def test_qdrant_add_texts_returns_all_ids(batch_size: int) -> None: """Test end to end Qdrant.add_texts returns unique ids.""" docsearch: Qdrant = Qdrant.from_texts( ["foobar"], ConsistentFakeEmbeddings(), location=":memory:", batch_size=batch_size, ) ids = docsearch.add_texts(["foo", "bar", "baz"]) assert 3 == len(ids) assert 3 == len(set(ids)) @pytest.mark.parametrize("vector_name", [None, "my-vector"]) def test_qdrant_add_texts_stores_duplicated_texts(vector_name: Optional[str]) -> None: """Test end to end Qdrant.add_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex vectors_config = rest.VectorParams(size=10, distance=rest.Distance.COSINE) if vector_name is not None: vectors_config = {vector_name: vectors_config} # type: ignore[assignment] client.recreate_collection(collection_name, vectors_config=vectors_config) vec_store = Qdrant( client, collection_name, embeddings=ConsistentFakeEmbeddings(), vector_name=vector_name, ) ids = vec_store.add_texts(["abc", "abc"], [{"a": 1}, {"a": 2}]) assert 2 == len(set(ids)) assert 2 == client.count(collection_name).count @pytest.mark.parametrize("batch_size", [1, 64]) def test_qdrant_add_texts_stores_ids(batch_size: int) -> None: """Test end to end Qdrant.add_texts stores provided ids.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest ids = [ "fa38d572-4c31-4579-aedc-1960d79df6df", "cdc1aa36-d6ab-4fb2-8a94-56674fd27484", ] client = QdrantClient(":memory:") collection_name = uuid.uuid4().hex client.recreate_collection( collection_name, vectors_config=rest.VectorParams(size=10, distance=rest.Distance.COSINE), ) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings()) returned_ids = vec_store.add_texts(["abc", "def"], ids=ids, batch_size=batch_size) assert all(first == second for first, second in zip(ids, returned_ids)) assert 2 == client.count(collection_name).count stored_ids = [point.id for point in client.scroll(collection_name)[0]] assert set(ids) == set(stored_ids) @pytest.mark.parametrize("vector_name", ["custom-vector"]) def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str) -> None: """Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest collection_name = uuid.uuid4().hex client = QdrantClient(":memory:") client.recreate_collection( collection_name, vectors_config={ vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE) }, ) vec_store = Qdrant( client, collection_name, ConsistentFakeEmbeddings(), vector_name=vector_name, ) vec_store.add_texts(["lorem", "ipsum", "dolor", "sit", "amet"]) assert 5 == client.count(collection_name).count assert all( vector_name in point.vector # type: ignore[operator] for point in client.scroll(collection_name, with_vectors=True)[0] )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~modelscope_hub.py
from typing import Any, List, Optional from langchain_core.pydantic_v1 import BaseModel, Extra from langchain_core.schema.embeddings import Embeddings class ModelScopeEmbeddings(BaseModel, Embeddings): """ModelScopeHub embedding models. To use, you should have the ``modelscope`` python package installed. Example: .. code-block:: python from langchain.embeddings import ModelScopeEmbeddings model_id = "damo/nlp_corom_sentence-embedding_english-base" embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0") """ embed: Any model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" model_revision: Optional[str] = None def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks except ImportError as e: raise ImportError( "Could not import some python packages." "Please install it with `pip install modelscope`." ) from e self.embed = pipeline( Tasks.sentence_embedding, model=self.model_id, model_revision=self.model_revision, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) inputs = {"source_sentence": texts} embeddings = self.embed(input=inputs)["text_embedding"] return embeddings.tolist() def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a modelscope embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") inputs = {"source_sentence": [text]} embedding = self.embed(input=inputs)["text_embedding"][0] return embedding.tolist()
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~output_parsers~react_single_input.py
import re from typing import Union from langchain_core.schema import AgentAction, AgentFinish, OutputParserException from langchain.agents.agent import AgentOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action:' after 'Thought:" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" ) FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( "Parsing LLM output produced both a final answer and a parse-able action:" ) class ReActSingleInputOutputParser(AgentOutputParser): """Parses ReAct-style LLM calls that have a single tool input. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. ``` Thought: agent thought here Action: search Action Input: what is the temperature in SF? ``` If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. ``` Thought: agent thought here Final Answer: The temperature is 100 degrees ``` """ def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if action_match: if includes_answer: raise OutputParserException( f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" ) action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(" ") tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) elif includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: raise OutputParserException(f"Could not parse LLM output: `{text}`") @property def _type(self) -> str: return "react-single-input"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~utilities~wikipedia.py
"""Util that calls Wikipedia.""" import logging from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.schema import Document logger = logging.getLogger(__name__) WIKIPEDIA_MAX_QUERY_LENGTH = 300 class WikipediaAPIWrapper(BaseModel): """Wrapper around WikipediaAPI. To use, you should have the ``wikipedia`` python package installed. This wrapper will use the Wikipedia API to conduct searches and fetch page summaries. By default, it will return the page summaries of the top-k results. It limits the Document content by doc_content_chars_max. """ wiki_client: Any #: :meta private: top_k_results: int = 3 lang: str = "en" load_all_available_meta: bool = False doc_content_chars_max: int = 4000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import wikipedia wikipedia.set_lang(values["lang"]) values["wiki_client"] = wikipedia except ImportError: raise ImportError( "Could not import wikipedia python package. " "Please install it with `pip install wikipedia`." ) return values def run(self, query: str) -> str: """Run Wikipedia search and get page summaries.""" page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH]) summaries = [] for page_title in page_titles[: self.top_k_results]: if wiki_page := self._fetch_page(page_title): if summary := self._formatted_page_summary(page_title, wiki_page): summaries.append(summary) if not summaries: return "No good Wikipedia Search Result was found" return "\n\n".join(summaries)[: self.doc_content_chars_max] @staticmethod def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]: return f"Page: {page_title}\nSummary: {wiki_page.summary}" def _page_to_document(self, page_title: str, wiki_page: Any) -> Document: main_meta = { "title": page_title, "summary": wiki_page.summary, "source": wiki_page.url, } add_meta = ( { "categories": wiki_page.categories, "page_url": wiki_page.url, "image_urls": wiki_page.images, "related_titles": wiki_page.links, "parent_id": wiki_page.parent_id, "references": wiki_page.references, "revision_id": wiki_page.revision_id, "sections": wiki_page.sections, } if self.load_all_available_meta else {} ) doc = Document( page_content=wiki_page.content[: self.doc_content_chars_max], metadata={ **main_meta, **add_meta, }, ) return doc def _fetch_page(self, page: str) -> Optional[str]: try: return self.wiki_client.page(title=page, auto_suggest=False) except ( self.wiki_client.exceptions.PageError, self.wiki_client.exceptions.DisambiguationError, ): return None def load(self, query: str) -> List[Document]: """ Run Wikipedia search and get the article text plus the meta information. See Returns: a list of documents. """ page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH]) docs = [] for page_title in page_titles[: self.top_k_results]: if wiki_page := self._fetch_page(page_title): if doc := self._page_to_document(page_title, wiki_page): docs.append(doc) return docs
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~remote_retriever.py
from typing import List, Optional import aiohttp import requests from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) class RemoteLangChainRetriever(BaseRetriever): """`LangChain API` retriever.""" url: str """URL of the remote LangChain API.""" headers: Optional[dict] = None """Headers to use for the request.""" input_key: str = "message" """Key to use for the input in the request.""" response_key: str = "response" """Key to use for the response in the request.""" page_content_key: str = "page_content" """Key to use for the page content in the response.""" metadata_key: str = "metadata" """Key to use for the metadata in the response.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: response = requests.post( self.url, json={self.input_key: query}, headers=self.headers ) result = response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.url, headers=self.headers, json={self.input_key: query} ) as response: result = await response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~ernie.py
import json import logging import threading from typing import Any, Dict, List, Mapping, Optional import requests from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import BaseChatModel from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} else: raise ValueError(f"Got unknown type {message}") return message_dict class ErnieBotChat(BaseChatModel): """`ERNIE-Bot` large language model. ERNIE-Bot is a large language model developed by Baidu, covering a huge amount of Chinese data. To use, you should have the `ernie_client_id` and `ernie_client_secret` set, or set the environment variable `ERNIE_CLIENT_ID` and `ERNIE_CLIENT_SECRET`. Note: access_token will be automatically generated based on client_id and client_secret, and will be regenerated after expiration (30 days). Default model is `ERNIE-Bot-turbo`, currently supported models are `ERNIE-Bot-turbo`, `ERNIE-Bot` Example: .. code-block:: python from langchain.chat_models import ErnieBotChat chat = ErnieBotChat(model_name='ERNIE-Bot') """ ernie_api_base: Optional[str] = None """Baidu application custom endpoints""" ernie_client_id: Optional[str] = None """Baidu application client id""" ernie_client_secret: Optional[str] = None """Baidu application client secret""" access_token: Optional[str] = None """access token is generated by client id and client secret, setting this value directly will cause an error""" model_name: str = "ERNIE-Bot-turbo" """model name of ernie, default is `ERNIE-Bot-turbo`. Currently supported `ERNIE-Bot-turbo`, `ERNIE-Bot`""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" streaming: Optional[bool] = False """streaming mode. not supported yet.""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 _lock = threading.Lock() @root_validator() def validate_environment(cls, values: Dict) -> Dict: values["ernie_api_base"] = get_from_dict_or_env( values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com" ) values["ernie_client_id"] = get_from_dict_or_env( values, "ernie_client_id", "ERNIE_CLIENT_ID", ) values["ernie_client_secret"] = get_from_dict_or_env( values, "ernie_client_secret", "ERNIE_CLIENT_SECRET", ) return values def _chat(self, payload: object) -> dict: base_url = f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat" model_paths = { "ERNIE-Bot-turbo": "eb-instant", "ERNIE-Bot": "completions", "ERNIE-Bot-4": "completions_pro", "BLOOMZ-7B": "bloomz_7b1", "Llama-2-7b-chat": "llama_2_7b", "Llama-2-13b-chat": "llama_2_13b", "Llama-2-70b-chat": "llama_2_70b", } if self.model_name in model_paths: url = f"{base_url}/{model_paths[self.model_name]}" else: raise ValueError(f"Got unknown model_name {self.model_name}") resp = requests.post( url, timeout=self.request_timeout, headers={ "Content-Type": "application/json", }, params={"access_token": self.access_token}, json=payload, ) return resp.json() def _refresh_access_token_with_lock(self) -> None: with self._lock: logger.debug("Refreshing access token") base_url: str = f"{self.ernie_api_base}/oauth/2.0/token" resp = requests.post( base_url, timeout=10, headers={ "Content-Type": "application/json", "Accept": "application/json", }, params={ "grant_type": "client_credentials", "client_id": self.ernie_client_id, "client_secret": self.ernie_client_secret, }, ) self.access_token = str(resp.json().get("access_token")) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: raise ValueError("`streaming` option currently unsupported.") if not self.access_token: self._refresh_access_token_with_lock() payload = { "messages": [_convert_message_to_dict(m) for m in messages], "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, **kwargs, } logger.debug(f"Payload for ernie api is {payload}") resp = self._chat(payload) if resp.get("error_code"): if resp.get("error_code") == 111: logger.debug("access_token expired, refresh it") self._refresh_access_token_with_lock() resp = self._chat(payload) else: raise ValueError(f"Error from ErnieChat api response: {resp}") return self._create_chat_result(resp) def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: if "function_call" in response: fc_str = '{{"function_call": {}}}'.format( json.dumps(response.get("function_call")) ) generations = [ChatGeneration(message=AIMessage(content=fc_str))] else: generations = [ ChatGeneration(message=AIMessage(content=response.get("result"))) ] token_usage = response.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) @property def _llm_type(self) -> str: return "ernie-bot-chat"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~vespa.py
from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union from langchain_core.schema.embeddings import Embeddings from langchain.docstore.document import Document from langchain.vectorstores.base import VectorStore, VectorStoreRetriever class VespaStore(VectorStore): """ `Vespa` vector store. To use, you should have the python client library ``pyvespa`` installed. Example: .. code-block:: python from langchain.vectorstores import VespaStore from langchain.embeddings.openai import OpenAIEmbeddings from vespa.application import Vespa # Create a vespa client dependent upon your application, # e.g. either connecting to Vespa Cloud or a local deployment # such as Docker. Please refer to the PyVespa documentation on # how to initialize the client. vespa_app = Vespa(url="...", port=..., application_package=...) # You need to instruct LangChain on which fields to use for embeddings vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", metadata_fields=["date", "rating", "author"] ) embedding_function = OpenAIEmbeddings() vectorstore = VespaStore(vespa_app, embedding_function, **vespa_config) """ def __init__( self, app: Any, embedding_function: Optional[Embeddings] = None, page_content_field: Optional[str] = None, embedding_field: Optional[str] = None, input_field: Optional[str] = None, metadata_fields: Optional[List[str]] = None, ) -> None: """ Initialize with a PyVespa client. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "Could not import Vespa python package. " "Please install it with `pip install pyvespa`." ) if not isinstance(app, Vespa): raise ValueError( f"app should be an instance of vespa.application.Vespa, got {type(app)}" ) self._vespa_app = app self._embedding_function = embedding_function self._page_content_field = page_content_field self._embedding_field = embedding_field self._input_field = input_field self._metadata_fields = metadata_fields def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """ Add texts to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = None if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(list(texts)) if ids is None: ids = [str(f"{i+1}") for i, _ in enumerate(texts)] batch = [] for i, text in enumerate(texts): fields: Dict[str, Union[str, List[float]]] = {} if self._page_content_field is not None: fields[self._page_content_field] = text if self._embedding_field is not None and embeddings is not None: fields[self._embedding_field] = embeddings[i] if metadatas is not None and self._metadata_fields is not None: for metadata_field in self._metadata_fields: if metadata_field in metadatas[i]: fields[metadata_field] = metadatas[i][metadata_field] batch.append({"id": ids[i], "fields": fields}) results = self._vespa_app.feed_batch(batch) for result in results: if not (str(result.status_code).startswith("2")): raise RuntimeError( f"Could not add document to Vespa. " f"Error code: {result.status_code}. " f"Message: {result.json['message']}" ) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if ids is None: return False batch = [{"id": id} for id in ids] result = self._vespa_app.delete_batch(batch) return sum([0 if r.status_code == 200 else 1 for r in result]) == 0 def _create_query( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> Dict: hits = k doc_embedding_field = self._embedding_field input_embedding_field = self._input_field ranking_function = kwargs["ranking"] if "ranking" in kwargs else "default" filter = kwargs["filter"] if "filter" in kwargs else None approximate = kwargs["approximate"] if "approximate" in kwargs else False approximate = "true" if approximate else "false" yql = "select * from sources * where " yql += f"{{targetHits: {hits}, approximate: {approximate}}}" yql += f"nearestNeighbor({doc_embedding_field}, {input_embedding_field})" if filter is not None: yql += f" and {filter}" query = { "yql": yql, f"input.query({input_embedding_field})": query_embedding, "ranking": ranking_function, "hits": hits, } return query def similarity_search_by_vector_with_score( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Performs similarity search from a embeddings vector. Args: query_embedding: Embeddings vector to search for. k: Number of results to return. custom_query: Use this custom query instead default query (kwargs) kwargs: other vector store specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if "custom_query" in kwargs: query = kwargs["custom_query"] else: query = self._create_query(query_embedding, k, **kwargs) try: response = self._vespa_app.query(body=query) except Exception as e: raise RuntimeError( f"Could not retrieve data from Vespa: " f"{e.args[0][0]['summary']}. " f"Error: {e.args[0][0]['message']}" ) if not str(response.status_code).startswith("2"): raise RuntimeError( f"Could not retrieve data from Vespa. " f"Error code: {response.status_code}. " f"Message: {response.json['message']}" ) root = response.json["root"] if "errors" in root: import json raise RuntimeError(json.dumps(root["errors"])) if response is None or response.hits is None: return [] docs = [] for child in response.hits: page_content = child["fields"][self._page_content_field] score = child["relevance"] metadata = {"id": child["id"]} if self._metadata_fields is not None: for field in self._metadata_fields: metadata[field] = child["fields"].get(field) doc = Document(page_content=page_content, metadata=metadata) docs.append((doc, score)) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_by_vector_with_score(embedding, k, **kwargs) return [r[0] for r in results] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: query_emb = [] if self._embedding_function is not None: query_emb = self._embedding_function.embed_query(query) return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_with_score(query, k, **kwargs) return [r[0] for r in results] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search not implemented") def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search by vector not implemented") @classmethod def from_texts( cls: Type[VespaStore], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> VespaStore: vespa = cls(embedding_function=embedding, **kwargs) vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids) return vespa def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: return super().as_retriever(**kwargs)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~agents~test_structured_chat.py
"""Unittests for langchain.agents.chat package.""" from textwrap import dedent from typing import Any, Tuple from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.schema import AgentAction, AgentFinish from langchain_core.tool import Tool from langchain.agents.structured_chat.base import StructuredChatAgent from langchain.agents.structured_chat.output_parser import StructuredChatOutputParser output_parser = StructuredChatOutputParser() def get_action_and_input(text: str) -> Tuple[str, str]: output = output_parser.parse(text) if isinstance(output, AgentAction): return output.tool, str(output.tool_input) elif isinstance(output, AgentFinish): return output.return_values["output"], output.log else: raise ValueError("Unexpected output type") def test_parse_with_language() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == "foo" assert action_input == "bar" def test_parse_without_language() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ``` { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == "foo" assert action_input == "bar" def test_parse_with_language_and_spaces() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "foo", "action_input": "bar" } ``` """ action, action_input = get_action_and_input(llm_output) assert action == "foo" assert action_input == "bar" def test_parse_without_language_without_a_new_line() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```{"action": "foo", "action_input": "bar"}``` """ action, action_input = get_action_and_input(llm_output) assert action == "foo" assert action_input == "bar" def test_parse_with_language_without_a_new_line() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json{"action": "foo", "action_input": "bar"}``` """ # TODO: How should this be handled? output, log = get_action_and_input(llm_output) assert output == llm_output assert log == llm_output def test_parse_case_matched_and_final_answer() -> None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```json { "action": "Final Answer", "action_input": "This is the final answer" } ``` """ output, log = get_action_and_input(llm_output) assert output == "This is the final answer" assert log == llm_output # TODO: add more tests. # Test: StructuredChatAgent.create_prompt() method. class TestCreatePrompt: # Test: Output should be a ChatPromptTemplate with sys and human messages. def test_create_prompt_output(self) -> None: prompt = StructuredChatAgent.create_prompt( [Tool(name="foo", description="Test tool FOO", func=lambda x: x)] ) assert isinstance(prompt, ChatPromptTemplate) assert len(prompt.messages) == 2 assert isinstance(prompt.messages[0], SystemMessagePromptTemplate) assert isinstance(prompt.messages[1], HumanMessagePromptTemplate) # Test: Format with a single tool. def test_system_message_single_tool(self) -> None: prompt: Any = StructuredChatAgent.create_prompt( [Tool(name="foo", description="Test tool FOO", func=lambda x: x)] ) actual = prompt.messages[0].prompt.format() expected = dedent( """ Respond to the human as helpfully and accurately as possible. You have access to the following tools: foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Valid "action" values: "Final Answer" or foo Provide only ONE action per $JSON_BLOB, as shown: ``` { "action": $TOOL_NAME, "action_input": $INPUT } ``` Follow this format: Question: input question to answer Thought: consider previous and subsequent steps Action: ``` $JSON_BLOB ``` Observation: action result ... (repeat Thought/Action/Observation N times) Thought: I know what to respond Action: ``` { "action": "Final Answer", "action_input": "Final response to human" } ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: """ # noqa: E501 ).strip() assert actual == expected # Test: Format with multiple tools. # # Check: # # You have access to the following tools: # ... # # and # # Valid "action" values: "Final Answer" or ... # def test_system_message_multiple_tools(self) -> None: prompt: Any = StructuredChatAgent.create_prompt( [ Tool(name="foo", description="Test tool FOO", func=lambda x: x), Tool(name="bar", description="Test tool BAR", func=lambda x: x), ] ) actual = prompt.messages[0].prompt.format() expected = dedent( """ Respond to the human as helpfully and accurately as possible. You have access to the following tools: foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} bar: Test tool BAR, args: {'tool_input': {'type': 'string'}} Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Valid "action" values: "Final Answer" or foo, bar Provide only ONE action per $JSON_BLOB, as shown: ``` { "action": $TOOL_NAME, "action_input": $INPUT } ``` Follow this format: Question: input question to answer Thought: consider previous and subsequent steps Action: ``` $JSON_BLOB ``` Observation: action result ... (repeat Thought/Action/Observation N times) Thought: I know what to respond Action: ``` { "action": "Final Answer", "action_input": "Final response to human" } ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: """ # noqa: E501 ).strip() assert actual == expected
[ "Test tool FOO", "Test tool BAR" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~fastembed.py
from typing import Any, Dict, List, Literal, Optional import numpy as np from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain_core.schema.embeddings import Embeddings class FastEmbedEmbeddings(BaseModel, Embeddings): """Qdrant FastEmbedding models. FastEmbed is a lightweight, fast, Python library built for embedding generation. See more documentation at: * https://github.com/qdrant/fastembed/ * https://qdrant.github.io/fastembed/ To use this class, you must install the `fastembed` Python package. `pip install fastembed` Example: from langchain.embeddings import FastEmbedEmbeddings fastembed = FastEmbedEmbeddings() """ model_name: str = "BAAI/bge-small-en-v1.5" """Name of the FastEmbedding model to use Defaults to "BAAI/bge-small-en-v1.5" Find the list of supported models at https://qdrant.github.io/fastembed/examples/Supported_Models/ """ max_length: int = 512 """The maximum number of tokens. Defaults to 512. Unknown behavior for values > 512. """ cache_dir: Optional[str] """The path to the cache directory. Defaults to `local_cache` in the parent directory """ threads: Optional[int] """The number of threads single onnxruntime session can use. Defaults to None """ doc_embed_type: Literal["default", "passage"] = "default" """Type of embedding to use for documents "default": Uses FastEmbed's default embedding method "passage": Prefixes the text with "passage" before embedding. """ _model: Any # : :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that FastEmbed has been installed.""" try: from fastembed.embedding import FlagEmbedding model_name = values.get("model_name") max_length = values.get("max_length") cache_dir = values.get("cache_dir") threads = values.get("threads") values["_model"] = FlagEmbedding( model_name=model_name, max_length=max_length, cache_dir=cache_dir, threads=threads, ) except ImportError as ie: raise ImportError( "Could not import 'fastembed' Python package. " "Please install it with `pip install fastembed`." ) from ie return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Generate embeddings for documents using FastEmbed. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings: List[np.ndarray] if self.doc_embed_type == "passage": embeddings = self._model.passage_embed(texts) else: embeddings = self._model.embed(texts) return [e.tolist() for e in embeddings] def embed_query(self, text: str) -> List[float]: """Generate query embeddings using FastEmbed. Args: text: The text to embed. Returns: Embeddings for the text. """ query_embeddings: np.ndarray = next(self._model.query_embed(text)) return query_embeddings.tolist()
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~vectorstores~qdrant~test_max_marginal_relevance.py
from typing import Optional import pytest from langchain_core.schema import Document from langchain.vectorstores import Qdrant from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) @pytest.mark.parametrize("batch_size", [1, 64]) @pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "test_content"]) @pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "test_metadata"]) @pytest.mark.parametrize("vector_name", [None, "my-vector"]) def test_qdrant_max_marginal_relevance_search( batch_size: int, content_payload_key: str, metadata_payload_key: str, vector_name: Optional[str], ) -> None: """Test end to end construction and MRR search.""" from qdrant_client import models filter = models.Filter( must=[ models.FieldCondition( key=f"{metadata_payload_key}.page", match=models.MatchValue( value=2, ), ), ], ) texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Qdrant.from_texts( texts, ConsistentFakeEmbeddings(), metadatas=metadatas, location=":memory:", content_payload_key=content_payload_key, metadata_payload_key=metadata_payload_key, batch_size=batch_size, vector_name=vector_name, distance_func="EUCLID", # Euclid distance used to avoid normalization ) output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=0.0 ) assert output == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="baz", metadata={"page": 2}), ] output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=0.0, filter=filter ) assert output == [ Document(page_content="baz", metadata={"page": 2}), ]
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~retrievers~test_bm25.py
import pytest from langchain_core.schema import Document from langchain.retrievers.bm25 import BM25Retriever @pytest.mark.requires("rank_bm25") def test_from_texts() -> None: input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."] bm25_retriever = BM25Retriever.from_texts(texts=input_texts) assert len(bm25_retriever.docs) == 3 assert bm25_retriever.vectorizer.doc_len == [4, 5, 4] @pytest.mark.requires("rank_bm25") def test_from_texts_with_bm25_params() -> None: input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."] bm25_retriever = BM25Retriever.from_texts( texts=input_texts, bm25_params={"epsilon": 10} ) # should count only multiple words (have, pan) assert bm25_retriever.vectorizer.epsilon == 10 @pytest.mark.requires("rank_bm25") def test_from_documents() -> None: input_docs = [ Document(page_content="I have a pen."), Document(page_content="Do you have a pen?"), Document(page_content="I have a bag."), ] bm25_retriever = BM25Retriever.from_documents(documents=input_docs) assert len(bm25_retriever.docs) == 3 assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~kendra.py
import re from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Union from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator, validator from langchain_core.schema import BaseRetriever from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document def clean_excerpt(excerpt: str) -> str: """Clean an excerpt from Kendra. Args: excerpt: The excerpt to clean. Returns: The cleaned excerpt. """ if not excerpt: return excerpt res = re.sub(r"\s+", " ", excerpt).replace("...", "") return res def combined_text(item: "ResultItem") -> str: """Combine a ResultItem title and excerpt into a single string. Args: item: the ResultItem of a Kendra search. Returns: A combined text of the title and excerpt of the given item. """ text = "" title = item.get_title() if title: text += f"Document Title: {title}\n" excerpt = clean_excerpt(item.get_excerpt()) if excerpt: text += f"Document Excerpt: \n{excerpt}\n" return text DocumentAttributeValueType = Union[str, int, List[str], None] """Possible types of a DocumentAttributeValue. Dates are also represented as str. """ # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Highlight(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Information that highlights the keywords in the excerpt.""" BeginOffset: int """The zero-based location in the excerpt where the highlight starts.""" EndOffset: int """The zero-based location in the excerpt where the highlight ends.""" TopAnswer: Optional[bool] """Indicates whether the result is the best one.""" Type: Optional[str] """The highlight type: STANDARD or THESAURUS_SYNONYM.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class TextWithHighLights(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Text with highlights.""" Text: str """The text.""" Highlights: Optional[Any] """The highlights.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class AdditionalResultAttributeValue( # type: ignore[call-arg] BaseModel, extra=Extra.allow ): """Value of an additional result attribute.""" TextWithHighlightsValue: TextWithHighLights """The text with highlights value.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class AdditionalResultAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Additional result attribute.""" Key: str """The key of the attribute.""" ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"] """The type of the value.""" Value: AdditionalResultAttributeValue """The value of the attribute.""" def get_value_text(self) -> str: return self.Value.TextWithHighlightsValue.Text # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class DocumentAttributeValue(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Value of a document attribute.""" DateValue: Optional[str] """The date expressed as an ISO 8601 string.""" LongValue: Optional[int] """The long value.""" StringListValue: Optional[List[str]] """The string list value.""" StringValue: Optional[str] """The string value.""" @property def value(self) -> DocumentAttributeValueType: """The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute. """ if self.DateValue: return self.DateValue if self.LongValue: return self.LongValue if self.StringListValue: return self.StringListValue if self.StringValue: return self.StringValue return None # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class DocumentAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Document attribute.""" Key: str """The key of the attribute.""" Value: DocumentAttributeValue """The value of the attribute.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class ResultItem(BaseModel, ABC, extra=Extra.allow): # type: ignore[call-arg] """Base class of a result item.""" Id: Optional[str] """The ID of the relevant result item.""" DocumentId: Optional[str] """The document ID.""" DocumentURI: Optional[str] """The document URI.""" DocumentAttributes: Optional[List[DocumentAttribute]] = [] """The document attributes.""" @abstractmethod def get_title(self) -> str: """Document title.""" @abstractmethod def get_excerpt(self) -> str: """Document excerpt or passage original content as retrieved by Kendra.""" def get_additional_metadata(self) -> dict: """Document additional metadata dict. This returns any extra metadata except these: * result_id * document_id * source * title * excerpt * document_attributes """ return {} def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]: """Document attributes dict.""" return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])} def to_doc( self, page_content_formatter: Callable[["ResultItem"], str] = combined_text ) -> Document: """Converts this item to a Document.""" page_content = page_content_formatter(self) metadata = self.get_additional_metadata() metadata.update( { "result_id": self.Id, "document_id": self.DocumentId, "source": self.DocumentURI, "title": self.get_title(), "excerpt": self.get_excerpt(), "document_attributes": self.get_document_attributes_dict(), } ) return Document(page_content=page_content, metadata=metadata) class QueryResultItem(ResultItem): """Query API result item.""" DocumentTitle: TextWithHighLights """The document title.""" FeedbackToken: Optional[str] """Identifies a particular result from a particular query.""" Format: Optional[str] """ If the Type is ANSWER, then format is either: * TABLE: a table excerpt is returned in TableExcerpt; * TEXT: a text excerpt is returned in DocumentExcerpt. """ Type: Optional[str] """Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER""" AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = [] """One or more additional attributes associated with the result.""" DocumentExcerpt: Optional[TextWithHighLights] """Excerpt of the document text.""" def get_title(self) -> str: return self.DocumentTitle.Text def get_attribute_value(self) -> str: if not self.AdditionalAttributes: return "" if not self.AdditionalAttributes[0]: return "" else: return self.AdditionalAttributes[0].get_value_text() def get_excerpt(self) -> str: if ( self.AdditionalAttributes and self.AdditionalAttributes[0].Key == "AnswerText" ): excerpt = self.get_attribute_value() elif self.DocumentExcerpt: excerpt = self.DocumentExcerpt.Text else: excerpt = "" return excerpt def get_additional_metadata(self) -> dict: additional_metadata = {"type": self.Type} return additional_metadata class RetrieveResultItem(ResultItem): """Retrieve API result item.""" DocumentTitle: Optional[str] """The document title.""" Content: Optional[str] """The content of the item.""" def get_title(self) -> str: return self.DocumentTitle or "" def get_excerpt(self) -> str: return self.Content or "" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class QueryResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """`Amazon Kendra Query API` search result. It is composed of: * Relevant suggested answers: either a text excerpt or table excerpt. * Matching FAQs or questions-answer from your FAQ file. * Documents including an excerpt of each document with its title. """ ResultItems: List[QueryResultItem] """The result items.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class RetrieveResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """`Amazon Kendra Retrieve API` search result. It is composed of: * relevant passages or text excerpts given an input query. """ QueryId: str """The ID of the query.""" ResultItems: List[RetrieveResultItem] """The result items.""" class AmazonKendraRetriever(BaseRetriever): """`Amazon Kendra Index` retriever. Args: index_id: Kendra index id region_name: The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config. credentials_profile_name: The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. top_k: No of results to return attribute_filter: Additional filtering of results based on metadata See: https://docs.aws.amazon.com/kendra/latest/APIReference page_content_formatter: generates the Document page_content allowing access to all result item attributes. By default, it uses the item's title and excerpt. client: boto3 client for Kendra user_context: Provides information about the user context See: https://docs.aws.amazon.com/kendra/latest/APIReference Example: .. code-block:: python retriever = AmazonKendraRetriever( index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03" ) """ index_id: str region_name: Optional[str] = None credentials_profile_name: Optional[str] = None top_k: int = 3 attribute_filter: Optional[Dict] = None page_content_formatter: Callable[[ResultItem], str] = combined_text client: Any user_context: Optional[Dict] = None @validator("top_k") def validate_top_k(cls, value: int) -> int: if value < 0: raise ValueError(f"top_k ({value}) cannot be negative.") return value @root_validator(pre=True) def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]: if values.get("client") is not None: return values try: import boto3 if values.get("credentials_profile_name"): session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values.get("region_name"): client_params["region_name"] = values["region_name"] values["client"] = session.client("kendra", **client_params) return values except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e def _kendra_query(self, query: str) -> Sequence[ResultItem]: kendra_kwargs = { "IndexId": self.index_id, "QueryText": query.strip(), "PageSize": self.top_k, } if self.attribute_filter is not None: kendra_kwargs["AttributeFilter"] = self.attribute_filter if self.user_context is not None: kendra_kwargs["UserContext"] = self.user_context response = self.client.retrieve(**kendra_kwargs) r_result = RetrieveResult.parse_obj(response) if r_result.ResultItems: return r_result.ResultItems # Retrieve API returned 0 results, fall back to Query API response = self.client.query(**kendra_kwargs) q_result = QueryResult.parse_obj(response) return q_result.ResultItems def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]: top_docs = [ item.to_doc(self.page_content_formatter) for item in result_items[: self.top_k] ] return top_docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Run search on Kendra index and get top k documents Example: .. code-block:: python docs = retriever.get_relevant_documents('This is my query') """ result_items = self._kendra_query(query) top_k_docs = self._get_top_k_docs(result_items) return top_k_docs
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~mongodb.py
import json import logging from typing import List from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_DBNAME = "chat_history" DEFAULT_COLLECTION_NAME = "message_store" class MongoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in MongoDB. Args: connection_string: connection string to connect to MongoDB session_id: arbitrary key that is used to store the messages of a single chat session. database_name: name of the database to use collection_name: name of the collection to use """ def __init__( self, connection_string: str, session_id: str, database_name: str = DEFAULT_DBNAME, collection_name: str = DEFAULT_COLLECTION_NAME, ): from pymongo import MongoClient, errors self.connection_string = connection_string self.session_id = session_id self.database_name = database_name self.collection_name = collection_name try: self.client: MongoClient = MongoClient(connection_string) except errors.ConnectionFailure as error: logger.error(error) self.db = self.client[database_name] self.collection = self.db[collection_name] self.collection.create_index("SessionId") @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({"SessionId": self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document["History"]) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in MongoDB""" from pymongo import errors try: self.collection.insert_one( { "SessionId": self.session_id, "History": json.dumps(_message_to_dict(message)), } ) except errors.WriteError as err: logger.error(err) def clear(self) -> None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({"SessionId": self.session_id}) except errors.WriteError as err: logger.error(err)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~ctranslate2.py
from typing import Any, Dict, List, Optional, Union from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema.output import Generation, LLMResult from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import BaseLLM class CTranslate2(BaseLLM): """CTranslate2 language model.""" model_path: str = "" """Path to the CTranslate2 model directory.""" tokenizer_name: str = "" """Name of the original Hugging Face model needed to load the proper tokenizer.""" device: str = "cpu" """Device to use (possible values are: cpu, cuda, auto).""" device_index: Union[int, List[int]] = 0 """Device IDs where to place this generator on.""" compute_type: Union[str, Dict[str, str]] = "default" """ Model computation type or a dictionary mapping a device name to the computation type (possible values are: default, auto, int8, int8_float32, int8_float16, int8_bfloat16, int16, float16, bfloat16, float32). """ max_length: int = 512 """Maximum generation length.""" sampling_topk: int = 1 """Randomly sample predictions from the top K candidates.""" sampling_topp: float = 1 """Keep the most probable tokens whose cumulative probability exceeds this value.""" sampling_temperature: float = 1 """Sampling temperature to generate more random samples.""" client: Any #: :meta private: tokenizer: Any #: :meta private: ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict) """ Holds any model parameters valid for `ctranslate2.Generator` call not explicitly specified. """ @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: import ctranslate2 except ImportError: raise ImportError( "Could not import ctranslate2 python package. " "Please install it with `pip install ctranslate2`." ) try: import transformers except ImportError: raise ImportError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) values["client"] = ctranslate2.Generator( model_path=values["model_path"], device=values["device"], device_index=values["device_index"], compute_type=values["compute_type"], **values["ctranslate2_kwargs"], ) values["tokenizer"] = transformers.AutoTokenizer.from_pretrained( values["tokenizer_name"] ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters.""" return { "max_length": self.max_length, "sampling_topk": self.sampling_topk, "sampling_topp": self.sampling_topp, "sampling_temperature": self.sampling_temperature, } def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: # build sampling parameters params = {**self._default_params, **kwargs} # call the model encoded_prompts = self.tokenizer(prompts)["input_ids"] tokenized_prompts = [ self.tokenizer.convert_ids_to_tokens(encoded_prompt) for encoded_prompt in encoded_prompts ] results = self.client.generate_batch(tokenized_prompts, **params) sequences = [result.sequences_ids[0] for result in results] decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences] generations = [] for text in decoded_sequences: generations.append([Generation(text=text)]) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "ctranslate2"
[ "input_ids" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~confident_callback.py
# flake8: noqa import os import warnings from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain_core.schema import AgentAction, AgentFinish, LLMResult class DeepEvalCallbackHandler(BaseCallbackHandler): """Callback Handler that logs into deepeval. Args: implementation_name: name of the `implementation` in deepeval metrics: A list of metrics Raises: ImportError: if the `deepeval` package is not installed. Examples: >>> from langchain.llms import OpenAI >>> from langchain.callbacks import DeepEvalCallbackHandler >>> from deepeval.metrics import AnswerRelevancy >>> metric = AnswerRelevancy(minimum_score=0.3) >>> deepeval_callback = DeepEvalCallbackHandler( ... implementation_name="exampleImplementation", ... metrics=[metric], ... ) >>> llm = OpenAI( ... temperature=0, ... callbacks=[deepeval_callback], ... verbose=True, ... openai_api_key="API_KEY_HERE", ... ) >>> llm.generate([ ... "What is the best evaluation tool out there? (no bias at all)", ... ]) "Deepeval, no doubt about it." """ REPO_URL: str = "https://github.com/confident-ai/deepeval" ISSUES_URL: str = f"{REPO_URL}/issues" BLOG_URL: str = "https://docs.confident-ai.com" # noqa: E501 def __init__( self, metrics: List[Any], implementation_name: Optional[str] = None, ) -> None: """Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportError: if the `deepeval` package is not installed. ConnectionError: if the connection to deepeval fails. """ super().__init__() # Import deepeval (not via `import_deepeval` to keep hints in IDEs) try: import deepeval # ignore: F401,I001 except ImportError: raise ImportError( """To use the deepeval callback manager you need to have the `deepeval` Python package installed. Please install it with `pip install deepeval`""" ) if os.path.exists(".deepeval"): warnings.warn( """You are currently not logging anything to the dashboard, we recommend using `deepeval login`.""" ) # Set the deepeval variables self.implementation_name = implementation_name self.metrics = metrics warnings.warn( ( "The `DeepEvalCallbackHandler` is currently in beta and is subject to" " change based on updates to `langchain`. Please report any issues to" f" {self.ISSUES_URL} as an `integration` issue." ), ) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Store the prompts""" self.prompts = prompts def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log records to deepeval when an LLM ends.""" from deepeval.metrics.answer_relevancy import AnswerRelevancy from deepeval.metrics.bias_classifier import UnBiasedMetric from deepeval.metrics.metric import Metric from deepeval.metrics.toxic_classifier import NonToxicMetric for metric in self.metrics: for i, generation in enumerate(response.generations): # Here, we only measure the first generation's output output = generation[0].text query = self.prompts[i] if isinstance(metric, AnswerRelevancy): result = metric.measure( output=output, query=query, ) print(f"Answer Relevancy: {result}") elif isinstance(metric, UnBiasedMetric): score = metric.measure(output) print(f"Bias Score: {score}") elif isinstance(metric, NonToxicMetric): score = metric.measure(output) print(f"Toxic Score: {score}") else: raise ValueError( f"""Metric {metric.__name__} is not supported by deepeval callbacks.""" ) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM outputs an error.""" pass def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing when chain starts""" pass def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing when chain ends.""" pass def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM chain outputs an error.""" pass def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when tool outputs an error.""" pass def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~localai.py
from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain_core.schema.embeddings import Embeddings from langchain_core.utils import get_pydantic_field_names from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]): import openai raise openai.error.APIError("LocalAI API returned an empty embedding") return response def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response) return await _async_embed_with_retry(**kwargs) class LocalAIEmbeddings(BaseModel, Embeddings): """LocalAI embedding models. Since LocalAI and OpenAI have 1:1 compatibility between APIs, this class uses the ``openai`` Python package's ``openai.Embedding`` as its client. Thus, you should have the ``openai`` python package installed, and defeat the environment variable ``OPENAI_API_KEY`` by setting to a random string. You also need to specify ``OPENAI_API_BASE`` to point to your LocalAI service endpoint. Example: .. code-block:: python from langchain.embeddings import LocalAIEmbeddings openai = LocalAIEmbeddings( openai_api_key="random-string", openai_api_base="http://localhost:8080" ) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for LocalAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the LocalAI request.""" headers: Any = None show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )["data"][0]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # call _embedding_func for each text return [self._embedding_func(text, engine=self.deployment) for text in texts] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = [] for text in texts: response = await self._aembedding_func(text, engine=self.deployment) embeddings.append(response) return embeddings def embed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding async def aembed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~graph_qa~hugegraph.py
"""Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import Field from langchain_core.schema import BasePromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( CYPHER_QA_PROMPT, GREMLIN_GENERATION_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.hugegraph import HugeGraph class HugeGraphQAChain(Chain): """Chain for question-answering against a graph by generating gremlin statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: HugeGraph = Field(exclude=True) gremlin_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT, **kwargs: Any, ) -> HugeGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt) return cls( qa_chain=qa_chain, gremlin_generation_chain=gremlin_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate gremlin statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_gremlin = self.gremlin_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_gremlin, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_gremlin) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~llms~test_openai.py
"""Test OpenAI API wrapper.""" from pathlib import Path from typing import Generator import pytest from langchain_core.schema import LLMResult from langchain.callbacks.manager import CallbackManager from langchain.chat_models.openai import ChatOpenAI from langchain.llms.loading import load_llm from langchain.llms.openai import OpenAI from tests.unit_tests.callbacks.fake_callback_handler import ( FakeCallbackHandler, ) @pytest.mark.scheduled def test_openai_call() -> None: """Test valid call to openai.""" llm = OpenAI() output = llm("Say something nice:") assert isinstance(output, str) def test_openai_model_param() -> None: llm = OpenAI(model="foo") assert llm.model_name == "foo" llm = OpenAI(model_name="foo") assert llm.model_name == "foo" def test_openai_extra_kwargs() -> None: """Test extra kwargs to openai.""" # Check that foo is saved in extra_kwargs. llm = OpenAI(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. llm = OpenAI(foo=3, model_kwargs={"bar": 2}) assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): OpenAI(foo=3, model_kwargs={"foo": 2}) # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): OpenAI(model_kwargs={"temperature": 0.2}) # Test that "model" cannot be specified in kwargs with pytest.raises(ValueError): OpenAI(model_kwargs={"model": "text-davinci-003"}) def test_openai_llm_output_contains_model_name() -> None: """Test llm_output contains model_name.""" llm = OpenAI(max_tokens=10) llm_result = llm.generate(["Hello, how are you?"]) assert llm_result.llm_output is not None assert llm_result.llm_output["model_name"] == llm.model_name def test_openai_stop_valid() -> None: """Test openai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = OpenAI(stop="3", temperature=0) first_output = first_llm(query) second_llm = OpenAI(temperature=0) second_output = second_llm(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output def test_openai_stop_error() -> None: """Test openai stop logic on bad configuration.""" llm = OpenAI(stop="3", temperature=0) with pytest.raises(ValueError): llm("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an OpenAI LLM.""" llm = OpenAI(max_tokens=10) llm.save(file_path=tmp_path / "openai.yaml") loaded_llm = load_llm(tmp_path / "openai.yaml") assert loaded_llm == llm @pytest.mark.scheduled def test_openai_streaming() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str) @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_astream() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) async for token in llm.astream("I'm Pickle Rick"): assert isinstance(token, str) @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_abatch() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token, str) @pytest.mark.asyncio async def test_openai_abatch_tags() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = await llm.abatch( ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]} ) for token in result: assert isinstance(token, str) @pytest.mark.scheduled def test_openai_batch() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token, str) @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_ainvoke() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result, str) @pytest.mark.scheduled def test_openai_invoke() -> None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"])) assert isinstance(result, str) @pytest.mark.scheduled def test_openai_multiple_prompts() -> None: """Test completion with multiple prompts.""" llm = OpenAI(max_tokens=10) output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) assert len(output.generations) == 2 def test_openai_streaming_best_of_error() -> None: """Test validation for streaming fails if best_of is not 1.""" with pytest.raises(ValueError): OpenAI(best_of=2, streaming=True) def test_openai_streaming_n_error() -> None: """Test validation for streaming fails if n is not 1.""" with pytest.raises(ValueError): OpenAI(n=2, streaming=True) def test_openai_streaming_multiple_prompts_error() -> None: """Test validation for streaming fails if multiple prompts are given.""" with pytest.raises(ValueError): OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"]) @pytest.mark.scheduled def test_openai_streaming_call() -> None: """Test valid call to openai.""" llm = OpenAI(max_tokens=10, streaming=True) output = llm("Say foo:") assert isinstance(output, str) def test_openai_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = OpenAI( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) llm("Write me a sentence with 100 words.") assert callback_handler.llm_streams == 10 @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_async_generate() -> None: """Test async generation.""" llm = OpenAI(max_tokens=10) output = await llm.agenerate(["Hello, how are you?"]) assert isinstance(output, LLMResult) @pytest.mark.asyncio async def test_openai_async_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = OpenAI( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) result = await llm.agenerate(["Write me a sentence with 100 words."]) assert callback_handler.llm_streams == 10 assert isinstance(result, LLMResult) def test_openai_modelname_to_contextsize_valid() -> None: """Test model name to context size on a valid model.""" assert OpenAI().modelname_to_contextsize("davinci") == 2049 def test_openai_modelname_to_contextsize_invalid() -> None: """Test model name to context size on an invalid model.""" with pytest.raises(ValueError): OpenAI().modelname_to_contextsize("foobar") _EXPECTED_NUM_TOKENS = { "ada": 17, "babbage": 17, "curie": 17, "davinci": 17, "gpt-4": 12, "gpt-4-32k": 12, "gpt-3.5-turbo": 12, } _MODELS = models = [ "ada", "babbage", "curie", "davinci", ] _CHAT_MODELS = [ "gpt-4", "gpt-4-32k", "gpt-3.5-turbo", ] @pytest.mark.parametrize("model", _MODELS) def test_openai_get_num_tokens(model: str) -> None: """Test get_tokens.""" llm = OpenAI(model=model) assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model] @pytest.mark.parametrize("model", _CHAT_MODELS) def test_chat_openai_get_num_tokens(model: str) -> None: """Test get_tokens.""" llm = ChatOpenAI(model=model) assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model] @pytest.fixture def mock_completion() -> dict: return { "id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ", "object": "text_completion", "created": 1689989000, "model": "text-davinci-003", "choices": [ {"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"} ], "usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3}, }
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~firestore.py
"""Firestore Chat Message History.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, List, Optional from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) if TYPE_CHECKING: from google.cloud.firestore import Client, DocumentReference def _get_firestore_client() -> Client: try: import firebase_admin from firebase_admin import firestore except ImportError: raise ImportError( "Could not import firebase-admin python package. " "Please install it with `pip install firebase-admin`." ) # For multiple instances, only initialize the app once. try: firebase_admin.get_app() except ValueError as e: logger.debug("Initializing Firebase app: %s", e) firebase_admin.initialize_app() return firestore.client() class FirestoreChatMessageHistory(BaseChatMessageHistory): """Chat message history backed by Google Firestore.""" def __init__( self, collection_name: str, session_id: str, user_id: str, firestore_client: Optional[Client] = None, ): """ Initialize a new instance of the FirestoreChatMessageHistory class. :param collection_name: The name of the collection to use. :param session_id: The session ID for the chat.. :param user_id: The user ID for the chat. """ self.collection_name = collection_name self.session_id = session_id self.user_id = user_id self._document: Optional[DocumentReference] = None self.messages: List[BaseMessage] = [] self.firestore_client = firestore_client or _get_firestore_client() self.prepare_firestore() def prepare_firestore(self) -> None: """Prepare the Firestore client. Use this function to make sure your database is ready. """ self._document = self.firestore_client.collection( self.collection_name ).document(self.session_id) self.load_messages() def load_messages(self) -> None: """Retrieve the messages from Firestore""" if not self._document: raise ValueError("Document not initialized") doc = self._document.get() if doc.exists: data = doc.to_dict() if "messages" in data and len(data["messages"]) > 0: self.messages = messages_from_dict(data["messages"]) def add_message(self, message: BaseMessage) -> None: self.messages.append(message) self.upsert_messages() def upsert_messages(self, new_message: Optional[BaseMessage] = None) -> None: """Update the Firestore document.""" if not self._document: raise ValueError("Document not initialized") self._document.set( { "id": self.session_id, "user_id": self.user_id, "messages": messages_to_dict(self.messages), } ) def clear(self) -> None: """Clear session memory from this memory and Firestore.""" self.messages = [] if self._document: self._document.delete()
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~contextual_compression.py
from typing import Any, List from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) class ContextualCompressionRetriever(BaseRetriever): """Retriever that wraps a base retriever and compresses the results.""" base_compressor: BaseDocumentCompressor """Compressor for compressing retrieved documents.""" base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: Sequence of relevant documents """ docs = self.base_retriever.get_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = self.base_compressor.compress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return [] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ docs = await self.base_retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = await self.base_compressor.acompress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return []
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~nlpcloud.py
from typing import Any, Dict, List from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env class NLPCloudEmbeddings(BaseModel, Embeddings): """NLP Cloud embedding models. To use, you should have the nlpcloud python package installed Example: .. code-block:: python from langchain.embeddings import NLPCloudEmbeddings embeddings = NLPCloudEmbeddings() """ model_name: str # Define model_name as a class attribute gpu: bool # Define gpu as a class attribute client: Any #: :meta private: def __init__( self, model_name: str = "paraphrase-multilingual-mpnet-base-v2", gpu: bool = False, **kwargs: Any, ) -> None: super().__init__(model_name=model_name, gpu=gpu, **kwargs) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=values["gpu"], lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using NLP Cloud. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self.client.embeddings(texts)["embeddings"] def embed_query(self, text: str) -> List[float]: """Embed a query using NLP Cloud. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.embeddings([text])["embeddings"][0]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~format_scratchpad~log.py
from typing import List, Tuple from langchain_core.schema.agent import AgentAction def format_log_to_str( intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "Thought: ", ) -> str: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~llms~llamacpp.py
from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.schema.output import GenerationChunk from langchain_core.utils import get_pydantic_field_names from langchain_core.utils.utils import build_extra_kwargs from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM if TYPE_CHECKING: from llama_cpp import LlamaGrammar logger = logging.getLogger(__name__) class LlamaCpp(LLM): """llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" rope_freq_scale: float = 1.0 """Scale factor for rope sampling.""" rope_freq_base: float = 10000.0 """Base frequency for rope sampling.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Any additional parameters to pass to llama_cpp.Llama.""" streaming: bool = True """Whether to stream the results, token by token.""" grammar_path: Optional[Union[str, Path]] = None """ grammar_path: Path to the .gbnf file that defines formal grammars for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ grammar: Optional[Union[str, LlamaGrammar]] = None """ grammar: formal grammar for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ verbose: bool = True """Print verbose output to stderr.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" try: from llama_cpp import Llama, LlamaGrammar except ImportError: raise ImportError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) model_path = values["model_path"] model_param_names = [ "rope_freq_scale", "rope_freq_base", "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", "verbose", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] model_params.update(values["model_kwargs"]) try: values["client"] = Llama(model_path, **model_params) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) if values["grammar"] and values["grammar_path"]: grammar = values["grammar"] grammar_path = values["grammar_path"] raise ValueError( "Can only pass in one of grammar and grammar_path. Received " f"{grammar=} and {grammar_path=}." ) elif isinstance(values["grammar"], str): values["grammar"] = LlamaGrammar.from_string(values["grammar"]) elif values["grammar_path"]: values["grammar"] = LlamaGrammar.from_file(values["grammar_path"]) else: pass return values @root_validator(pre=True) def build_model_kwargs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) values["model_kwargs"] = build_extra_kwargs( extra, values, all_required_field_names ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" params = { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } if self.grammar: params["grammar"] = self.grammar return params @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llamacpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs, ): combined_text_output += chunk.text return combined_text_output else: params = self._get_parameters(stop) params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = {**self._get_parameters(stop), **kwargs} result = self.client(prompt=prompt, stream=True, **params) for part in result: logprobs = part["choices"][0].get("logprobs", None) chunk = GenerationChunk( text=part["choices"][0]["text"], generation_info={"logprobs": logprobs}, ) yield chunk if run_manager: run_manager.on_llm_new_token( token=chunk.text, verbose=self.verbose, log_probs=logprobs ) def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text)
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~output_parsers~test_structured_parser.py
from langchain_core.schema import OutputParserException from langchain.output_parsers import ResponseSchema, StructuredOutputParser def test_parse() -> None: response_schemas = [ ResponseSchema(name="name", description="desc"), ResponseSchema(name="age", description="desc"), ] parser = StructuredOutputParser.from_response_schemas(response_schemas) # Test valid JSON input text = '```json\n{"name": "John", "age": 30}\n```' expected_result = {"name": "John", "age": 30} result = parser.parse(text) assert result == expected_result, f"Expected {expected_result}, but got {result}" # Test invalid JSON input text = '```json\n{"name": "John"}\n```' try: parser.parse(text) except OutputParserException: pass # Test passes if OutputParserException is raised else: assert False, f"Expected OutputParserException, but got {parser.parse(text)}"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~zilliz.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain_core.schema.embeddings import Embeddings from langchain.vectorstores.milvus import Milvus logger = logging.getLogger(__name__) class Zilliz(Milvus): """`Zilliz` vector store. You need to have `pymilvus` installed and a running Zilliz database. See the following documentation for how to run a Zilliz instance: https://docs.zilliz.com/docs/create-cluster IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. Args: embedding_function (Embeddings): Function used to embed the text. collection_name (str): Which Zilliz collection to use. Defaults to "LangChainCollection". connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. consistency_level (str): The consistency level to use for a collection. Defaults to "Session". index_params (Optional[dict]): Which index params to use. Defaults to HNSW/AUTOINDEX depending on service. search_params (Optional[dict]): Which search params to use. Defaults to default of index. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. The connection args used for this class comes in the form of a dict, here are a few of the options: address (str): The actual address of Zilliz instance. Example address: "localhost:19530" uri (str): The uri of Zilliz instance. Example uri: "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", host (str): The host of Zilliz instance. Default at "localhost", PyMilvus will fill in the default host if only port is provided. port (str/int): The port of Zilliz instance. Default at 19530, PyMilvus will fill in the default port if only host is provided. user (str): Use which user to connect to Zilliz instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. token (str): API key, for serverless clusters which can be used as replacements for user and password. secure (bool): Default is false. If set to true, tls will be enabled. client_key_path (str): If use tls two-way authentication, need to write the client.key path. client_pem_path (str): If use tls two-way authentication, need to write the client.pem path. ca_pem_path (str): If use tls two-way authentication, need to write the ca.pem path. server_pem_path (str): If use tls one-way authentication, need to write the server.pem path. server_name (str): If use tls, need to write the common name. Example: .. code-block:: python from langchain.vectorstores import Zilliz from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a Zilliz instance milvus_store = Milvus( embedding_function = embedding, collection_name = "LangChainCollection", connection_args = { "uri": "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", "user": "temp", "password": "temp", "token": "temp", # API key as replacements for user and password "secure": True } drop_old: True, ) Raises: ValueError: If the pymilvus python package is not installed. """ def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default AutoIndex based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely Milvus self-hosted except MilvusException: # Use HNSW based index self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name ) raise e @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: Optional[Dict[str, Any]] = None, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Zilliz: """Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Zilliz: Zilliz Vector Store """ vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args or {}, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~agent_toolkits~sql~toolkit.py
"""Toolkit for interacting with an SQL database.""" from typing import List from langchain_core.pydantic_v1 import Field from langchain_core.schema.language_model import BaseLanguageModel from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.sql_database.tool import ( InfoSQLDatabaseTool, ListSQLDatabaseTool, QuerySQLCheckerTool, QuerySQLDataBaseTool, ) from langchain.utilities.sql_database import SQLDatabase class SQLDatabaseToolkit(BaseToolkit): """Toolkit for interacting with SQL databases.""" db: SQLDatabase = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) @property def dialect(self) -> str: """Return string representation of SQL dialect to use.""" return self.db.dialect class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" list_sql_database_tool = ListSQLDatabaseTool(db=self.db) info_sql_database_tool_description = ( "Input to this tool is a comma-separated list of tables, output is the " "schema and sample rows for those tables. " "Be sure that the tables actually exist by calling " f"{list_sql_database_tool.name} first! " "Example Input: table1, table2, table3" ) info_sql_database_tool = InfoSQLDatabaseTool( db=self.db, description=info_sql_database_tool_description ) query_sql_database_tool_description = ( "Input to this tool is a detailed and correct SQL query, output is a " "result from the database. If the query is not correct, an error message " "will be returned. If an error is returned, rewrite the query, check the " "query, and try again. If you encounter an issue with Unknown column " f"'xxxx' in 'field list', use {info_sql_database_tool.name} " "to query the correct table fields." ) query_sql_database_tool = QuerySQLDataBaseTool( db=self.db, description=query_sql_database_tool_description ) query_sql_checker_tool_description = ( "Use this tool to double check if your query is correct before executing " "it. Always use this tool before executing a query with " f"{query_sql_database_tool.name}!" ) query_sql_checker_tool = QuerySQLCheckerTool( db=self.db, llm=self.llm, description=query_sql_checker_tool_description ) return [ query_sql_database_tool, info_sql_database_tool, list_sql_database_tool, query_sql_checker_tool, ]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~vespa_retriever.py
from __future__ import annotations import json from typing import Any, Dict, List, Literal, Optional, Sequence, Union from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun class VespaRetriever(BaseRetriever): """`Vespa` retriever.""" app: Any """Vespa application to query.""" body: Dict """Body of the query.""" content_field: str """Name of the content field.""" metadata_fields: Sequence[str] """Names of the metadata fields.""" def _query(self, body: Dict) -> List[Document]: response = self.app.query(body) if not str(response.status_code).startswith("2"): raise RuntimeError( "Could not retrieve data from Vespa. Error code: {}".format( response.status_code ) ) root = response.json["root"] if "errors" in root: raise RuntimeError(json.dumps(root["errors"])) docs = [] for child in response.hits: page_content = child["fields"].pop(self.content_field, "") if self.metadata_fields == "*": metadata = child["fields"] else: metadata = {mf: child["fields"].get(mf) for mf in self.metadata_fields} metadata["id"] = child["id"] docs.append(Document(page_content=page_content, metadata=metadata)) return docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: body = self.body.copy() body["query"] = query return self._query(body) def get_relevant_documents_with_filter( self, query: str, *, _filter: Optional[str] = None ) -> List[Document]: body = self.body.copy() _filter = f" and {_filter}" if _filter else "" body["yql"] = body["yql"] + _filter body["query"] = query return self._query(body) @classmethod def from_params( cls, url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal["*"]] = (), sources: Union[Sequence[str], Literal["*"], None] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any, ) -> VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "pyvespa is not installed, please install with `pip install pyvespa`" ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( "yql should only be specified if both sources and _filter are not " "specified." ) else: if metadata_fields == "*": _fields = "*" body["summary"] = "short" else: _fields = ", ".join([content_field] + list(metadata_fields or [])) _sources = ", ".join(sources) if isinstance(sources, Sequence) else "*" _filter = f" and {_filter}" if _filter else "" yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}" body["yql"] = yql if k: body["hits"] = k return cls( app=app, body=body, content_field=content_field, metadata_fields=metadata_fields, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~baiducloud_vector_search.py
import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, ) from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class BESVectorStore(VectorStore): """`Baidu Elasticsearch` vector store. Example: .. code-block:: python from langchain.vectorstores import BESVectorStore from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = BESVectorStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", bes_url="http://localhost:9200" ) Args: index_name: Name of the Elasticsearch index to create. bes_url: URL of the Baidu Elasticsearch instance to connect to. user: Username to use when connecting to Elasticsearch. password: Password to use when connecting to Elasticsearch. More information can be obtained from: https://cloud.baidu.com/doc/BES/s/8llyn0hh4 """ def __init__( self, index_name: str, bes_url: str, user: Optional[str] = None, password: Optional[str] = None, embedding: Optional[Embeddings] = None, **kwargs: Optional[dict], ) -> None: self.embedding = embedding self.index_name = index_name self.query_field = kwargs.get("query_field", "text") self.vector_query_field = kwargs.get("vector_query_field", "vector") self.space_type = kwargs.get("space_type", "cosine") self.index_type = kwargs.get("index_type", "linear") self.index_params = kwargs.get("index_params") or {} if bes_url is not None: self.client = BESVectorStore.bes_client( bes_url=bes_url, username=user, password=password ) else: raise ValueError("""Please specified a bes connection url.""") @property def embeddings(self) -> Optional[Embeddings]: return self.embedding @staticmethod def bes_client( *, bes_url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) connection_params: Dict[str, Any] = {} connection_params["hosts"] = [bes_url] if username and password: connection_params["basic_auth"] = (username, password) es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise e return es_client def _create_index_if_not_exists(self, dims_length: Optional[int] = None) -> None: """Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info(f"Index {self.index_name} already exists. Skipping creation.") else: if dims_length is None: raise ValueError( "Cannot create index without specifying dims_length " + "when the index doesn't already exist. " ) indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f"Creating index {self.index_name} with mappings {indexMapping}" ) self.client.indices.create( index=self.index_name, body={ "settings": {"index": {"knn": True}}, "mappings": {"properties": indexMapping}, }, ) def _index_mapping(self, dims_length: Union[int, None]) -> Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. index_params: The extra pamameters for creating index. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """ if "linear" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "build_index": self.index_params.get("build_index", False), } } elif "hnsw" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "index_type": "hnsw", "space_type": self.space_type, "parameters": { "ef_construction": self.index_params.get( "hnsw_ef_construction", 200 ), "m": self.index_params.get("hnsw_m", 4), }, } } else: return { self.vector_query_field: { "type": "bpack_vector", "model_id": self.index_params.get("model_id", ""), } } def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the index. Args: ids: List of ids of documents to delete """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) body = [] if ids is None: raise ValueError("ids must be provided.") for _id in ids: body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id}) if len(body) > 0: try: bulk( self.client, body, refresh=kwargs.get("refresh_indices", True), ignore_status=404, ) logger.debug(f"Deleted {len(body)} texts from index") return True except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") raise e else: logger.info("No documents to delete") return False def _query_body( self, query_vector: Union[List[float], None], filter: Optional[dict] = None, search_params: Dict = {}, ) -> Dict: query_vector_body = {"vector": query_vector, "k": search_params.get("k", 2)} if filter is not None and len(filter) != 0: query_vector_body["filter"] = filter if "linear" == self.index_type: query_vector_body["linear"] = True else: query_vector_body["ef"] = search_params.get("ef", 10) return { "size": search_params.get("size", 4), "query": {"knn": {self.vector_query_field: query_vector_body}}, } def _search( self, query: Optional[str] = None, query_vector: Union[List[float], None] = None, filter: Optional[dict] = None, custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None, search_params: Dict = {}, ) -> List[Tuple[Document, float]]: """Return searched documents result from BES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of Baidu ElasticSearch filter clauses to apply to the query. custom_query: Function to modify the query body before it is sent to BES. Returns: List of Documents most similar to the query and score for each """ if self.embedding and query is not None: query_vector = self.embedding.embed_query(query) query_body = self._query_body( query_vector=query_vector, filter=filter, search_params=search_params ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") logger.debug(f"Query body: {query_body}") # Perform the kNN search on the BES index and return the results. response = self.client.search(index=self.index_name, body=query_body) logger.debug(f"response={response}") hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][self.query_field], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity. """ results = self.similarity_search_with_score( query=query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return documents most similar to query, along with scores. Args: query: Text to look up documents similar to. size: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query and score for each """ search_params = kwargs.get("search_params") or {} if len(search_params) == 0 or search_params.get("size") is None: search_params["size"] = k return self._search(query=query, filter=filter, **kwargs) @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_documents(documents) return vectorStore @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from raw documents. Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_texts(texts, metadatas=metadatas, **kwargs) return vectorStore def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) embeddings = [] create_index_if_not_exists = kwargs.get("create_index_if_not_exists", True) ids = kwargs.get("ids", [str(uuid.uuid4()) for _ in texts]) refresh_indices = kwargs.get("refresh_indices", True) requests = [] if self.embedding is not None: embeddings = self.embedding.embed_documents(list(texts)) dims_length = len(embeddings[0]) if create_index_if_not_exists: self._create_index_if_not_exists(dims_length=dims_length) for i, (text, vector) in enumerate(zip(texts, embeddings)): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, self.vector_query_field: vector, "metadata": metadata, "_id": ids[i], } ) else: if create_index_if_not_exists: self._create_index_if_not_exists() for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, "metadata": metadata, "_id": ids[i], } ) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices ) logger.debug( f"Added {success} and failed to add {failed} texts to index" ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] @staticmethod def _bes_vector_store( embedding: Optional[Embeddings] = None, **kwargs: Any ) -> "BESVectorStore": index_name = kwargs.get("index_name") if index_name is None: raise ValueError("Please provide an index_name.") bes_url = kwargs.get("bes_url") if bes_url is None: raise ValueError("Please provided a valid bes connection url") return BESVectorStore(embedding=embedding, **kwargs)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_loaders~docusaurus.py
"""Load Documents from Docusarus Documentation""" from typing import Any, List, Optional from langchain.document_loaders.sitemap import SitemapLoader class DocusaurusLoader(SitemapLoader): """ Loader that leverages the SitemapLoader to loop through the generated pages of a Docusaurus Documentation website and extracts the content by looking for specific HTML tags. By default, the parser searches for the main content of the Docusaurus page, which is normally the <article>. You also have the option to define your own custom HTML tags by providing them as a list, for example: ["div", ".main", "a"]. """ def __init__( self, url: str, custom_html_tags: Optional[List[str]] = None, **kwargs: Any, ): """ Initialize DocusaurusLoader Args: url: The base URL of the Docusaurus website. custom_html_tags: Optional custom html tags to extract content from pages. kwargs: Additional args to extend the underlying SitemapLoader, for example: filter_urls, blocksize, meta_function, is_local, continue_on_failure """ if not kwargs.get("is_local"): url = f"{url}/sitemap.xml" self.custom_html_tags = custom_html_tags or ["main article"] super().__init__( url, parsing_function=kwargs.get("parsing_function") or self._parsing_function, **kwargs, ) def _parsing_function(self, content: Any) -> str: """Parses specific elements from a Docusarus page.""" relevant_elements = content.select(",".join(self.custom_html_tags)) for element in relevant_elements: if element not in relevant_elements: element.decompose() return str(content.get_text())
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~rocksetdb.py
from __future__ import annotations import logging from enum import Enum from typing import Any, Iterable, List, Optional, Tuple from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document logger = logging.getLogger(__name__) class Rockset(VectorStore): """`Rockset` vector store. To use, you should have the `rockset` python package installed. Note that to use this, the collection being used must already exist in your Rockset instance. You must also ensure you use a Rockset ingest transformation to apply `VECTOR_ENFORCE` on the column being used to store `embedding_key` in the collection. See: https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details Everything below assumes `commons` Rockset workspace. Example: .. code-block:: python from langchain.vectorstores import Rockset from langchain.embeddings.openai import OpenAIEmbeddings import rockset # Make sure you use the right host (region) for your Rockset instance # and APIKEY has both read-write access to your collection. rs = rockset.RocksetClient(host=rockset.Regions.use1a1, api_key="***") collection_name = "langchain_demo" embeddings = OpenAIEmbeddings() vectorstore = Rockset(rs, collection_name, embeddings, "description", "description_embedding") """ def __init__( self, client: Any, embeddings: Embeddings, collection_name: str, text_key: str, embedding_key: str, workspace: str = "commons", ): """Initialize with Rockset client. Args: client: Rockset client object collection: Rockset collection to insert docs / query embeddings: Langchain Embeddings object to use to generate embedding for given text. text_key: column in Rockset collection to use to store the text embedding_key: column in Rockset collection to use to store the embedding. Note: We must apply `VECTOR_ENFORCE()` on this column via Rockset ingest transformation. """ try: from rockset import RocksetClient except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) # TODO: check that `collection_name` exists in rockset. Create if not. self._client = client self._collection_name = collection_name self._embeddings = embeddings self._text_key = text_key self._embedding_key = embedding_key self._workspace = workspace try: self._client.set_application("langchain") except AttributeError: # ignore pass @property def embeddings(self) -> Embeddings: return self._embeddings def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. batch_size: Send documents in batches to rockset. Returns: List of ids from adding the texts into the vectorstore. """ batch: list[dict] = [] stored_ids = [] for i, text in enumerate(texts): if len(batch) == batch_size: stored_ids += self._write_documents_to_rockset(batch) batch = [] doc = {} if metadatas and len(metadatas) > i: doc = metadatas[i] if ids and len(ids) > i: doc["_id"] = ids[i] doc[self._text_key] = text doc[self._embedding_key] = self._embeddings.embed_query(text) batch.append(doc) if len(batch) > 0: stored_ids += self._write_documents_to_rockset(batch) batch = [] return stored_ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, collection_name: str = "", text_key: str = "", embedding_key: str = "", ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Rockset: """Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ # Sanitize inputs assert client is not None, "Rockset Client cannot be None" assert collection_name, "Collection name cannot be empty" assert text_key, "Text key name cannot be empty" assert embedding_key, "Embedding key cannot be empty" rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset # Rockset supports these vector distance functions. class DistanceFunction(Enum): COSINE_SIM = "COSINE_SIM" EUCLIDEAN_DIST = "EUCLIDEAN_DIST" DOT_PRODUCT = "DOT_PRODUCT" # how to sort results for "similarity" def order_by(self) -> str: if self.value == "EUCLIDEAN_DIST": return "ASC" return "DESC" def similarity_search_with_relevance_scores( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Same as `similarity_search_with_relevance_scores` but doesn't return the scores. """ return self.similarity_search_by_vector( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Accepts a query_embedding (vector), and returns documents with similar embeddings along with their relevance scores.""" q_str = self._build_query_sql(embedding, distance_func, k, where_str) try: query_response = self._client.Queries.query(sql={"query": q_str}) except Exception as e: logger.error("Exception when querying Rockset: %s\n", e) return [] finalResult: list[Tuple[Document, float]] = [] for document in query_response.results: metadata = {} assert isinstance( document, dict ), "document should be of type `dict[str,Any]`. But found: `{}`".format( type(document) ) for k, v in document.items(): if k == self._text_key: assert isinstance(v, str), ( "page content stored in column `{}` must be of type `str`. " "But found: `{}`" ).format(self._text_key, type(v)) page_content = v elif k == "dist": assert isinstance(v, float), ( "Computed distance between vectors must of type `float`. " "But found {}" ).format(type(v)) score = v elif k not in ["_id", "_event_time", "_meta"]: # These columns are populated by Rockset when documents are # inserted. No need to return them in metadata dict. metadata[k] = v finalResult.append( (Document(page_content=page_content, metadata=metadata), score) ) return finalResult # Helper functions def _build_query_sql( self, query_embedding: List[float], distance_func: DistanceFunction, k: int = 4, where_str: Optional[str] = None, ) -> str: """Builds Rockset SQL query to query similar vectors to query_vector""" q_embedding_str = ",".join(map(str, query_embedding)) distance_str = f"""{distance_func.value}({self._embedding_key}, \ [{q_embedding_str}]) as dist""" where_str = f"WHERE {where_str}\n" if where_str else "" return f"""\ SELECT * EXCEPT({self._embedding_key}), {distance_str} FROM {self._workspace}.{self._collection_name} {where_str}\ ORDER BY dist {distance_func.order_by()} LIMIT {str(k)} """ def _write_documents_to_rockset(self, batch: List[dict]) -> List[str]: add_doc_res = self._client.Documents.add_documents( collection=self._collection_name, data=batch, workspace=self._workspace ) return [doc_status._id for doc_status in add_doc_res.data] def delete_texts(self, ids: List[str]) -> None: """Delete a list of docs from the Rockset collection""" try: from rockset.models import DeleteDocumentsRequestData except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) self._client.Documents.delete_documents( collection=self._collection_name, data=[DeleteDocumentsRequestData(id=i) for i in ids], workspace=self._workspace, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_loaders~parsers~txt.py
"""Module for parsing text files..""" from typing import Iterator from langchain_core.schema import Document from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob class TextParser(BaseBlobParser): """Parser for text blobs.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_transformers~html2text.py
from typing import Any, Sequence from langchain_core.schema import BaseDocumentTransformer, Document class Html2TextTransformer(BaseDocumentTransformer): """Replace occurrences of a particular search pattern with a replacement string Arguments: ignore_links: Whether links should be ignored; defaults to True. ignore_images: Whether images should be ignored; defaults to True. Example: .. code-block:: python from langchain.document_transformers import Html2TextTransformer html2text = Html2TextTransformer() docs_transform = html2text.transform_documents(docs) """ def __init__(self, ignore_links: bool = True, ignore_images: bool = True) -> None: self.ignore_links = ignore_links self.ignore_images = ignore_images def transform_documents( self, documents: Sequence[Document], **kwargs: Any, ) -> Sequence[Document]: try: import html2text except ImportError: raise ImportError( """html2text package not found, please install it with `pip install html2text`""" ) # Create a html2text.HTML2Text object and override some properties h = html2text.HTML2Text() h.ignore_links = self.ignore_links h.ignore_images = self.ignore_images for d in documents: d.page_content = h.handle(d.page_content) return documents async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any, ) -> Sequence[Document]: raise NotImplementedError
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~arxiv.py
from typing import List from langchain_core.schema import BaseRetriever, Document from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.utilities.arxiv import ArxivAPIWrapper class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """`Arxiv` retriever. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. """ get_full_documents: bool = False def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: if self.get_full_documents: return self.load(query=query) else: return self.get_summaries_as_docs(query)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~hologres.py
from __future__ import annotations import json import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_dict_or_env ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" class HologresWrapper: """`Hologres API` wrapper.""" def __init__(self, connection_string: str, ndims: int, table_name: str) -> None: """Initialize the wrapper. Args: connection_string: Hologres connection string. ndims: Number of dimensions of the embedding output. table_name: Name of the table to store embeddings and data. """ import psycopg2 self.table_name = table_name self.conn = psycopg2.connect(connection_string) self.cursor = self.conn.cursor() self.conn.autocommit = False self.ndims = ndims def create_vector_extension(self) -> None: self.cursor.execute("create extension if not exists proxima") self.conn.commit() def create_table(self, drop_if_exist: bool = True) -> None: if drop_if_exist: self.cursor.execute(f"drop table if exists {self.table_name}") self.conn.commit() self.cursor.execute( f"""create table if not exists {self.table_name} ( id text, embedding float4[] check(array_ndims(embedding) = 1 and \ array_length(embedding, 1) = {self.ndims}), metadata json, document text);""" ) self.cursor.execute( f"call set_table_property('{self.table_name}'" + """, 'proxima_vectors', '{"embedding":{"algorithm":"Graph", "distance_method":"SquaredEuclidean", "build_params":{"min_flush_proxima_row_count" : 1, "min_compaction_proxima_row_count" : 1, "max_total_size_to_merge_mb" : 2000}}}');""" ) self.conn.commit() def get_by_id(self, id: str) -> List[Tuple]: statement = ( f"select id, embedding, metadata, " f"document from {self.table_name} where id = %s;" ) self.cursor.execute( statement, (id), ) self.conn.commit() return self.cursor.fetchall() def insert( self, embedding: List[float], metadata: dict, document: str, id: Optional[str] = None, ) -> None: self.cursor.execute( f'insert into "{self.table_name}" ' f"values (%s, array{json.dumps(embedding)}::float4[], %s, %s)", (id if id is not None else "null", json.dumps(metadata), document), ) self.conn.commit() def query_nearest_neighbours( self, embedding: List[float], k: int, filter: Optional[Dict[str, str]] = None ) -> List[Tuple[str, str, float]]: params = [] filter_clause = "" if filter is not None: conjuncts = [] for key, val in filter.items(): conjuncts.append("metadata->>%s=%s") params.append(key) params.append(val) filter_clause = "where " + " and ".join(conjuncts) sql = ( f"select document, metadata::text, " f"pm_approx_squared_euclidean_distance(array{json.dumps(embedding)}" f"::float4[], embedding) as distance from" f" {self.table_name} {filter_clause} order by distance asc limit {k};" ) self.cursor.execute(sql, tuple(params)) self.conn.commit() return self.cursor.fetchall() class Hologres(VectorStore): """`Hologres API` vector store. - `connection_string` is a hologres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `ndims` is the number of dimensions of the embedding output. - `table_name` is the name of the table to store embeddings and data. (default: langchain_pg_embedding) - NOTE: The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_table` if True, will delete the table if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.ndims = ndims self.table_name = table_name self.embedding_function = embedding_function self.pre_delete_table = pre_delete_table self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self.storage = HologresWrapper( self.connection_string, self.ndims, self.table_name ) self.create_vector_extension() self.create_table() @property def embeddings(self) -> Embeddings: return self.embedding_function def create_vector_extension(self) -> None: try: self.storage.create_vector_extension() except Exception as e: self.logger.exception(e) raise e def create_table(self) -> None: self.storage.create_table(self.pre_delete_table) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding_function: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, embedding_function=embedding_function, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ try: for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): self.storage.insert(embedding, metadata, text, id) except Exception as e: self.logger.exception(e) self.storage.conn.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Hologres with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results: List[Tuple[str, str, float]] = self.storage.query_nearest_neighbours( embedding, k, filter ) docs = [ ( Document( page_content=result[0], metadata=json.loads(result[1]), ), result[2], ) for result in results ] return docs @classmethod def from_texts( cls: Type[Hologres], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain.vectorstores import Hologres from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_existing_index( cls: Type[Hologres], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Get instance of an existing Hologres store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, ndims=ndims, table_name=table_name, embedding_function=embedding, pre_delete_table=pre_delete_table, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="HOLOGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the HOLOGRES_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[Hologres], documents: List[Document], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return ( f"dbname={database} user={user} password={password} host={host} port={port}" )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~momento.py
from __future__ import annotations import json from datetime import timedelta from typing import TYPE_CHECKING, Any, Optional from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) from langchain.utils import get_from_env if TYPE_CHECKING: import momento def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") class MomentoChatMessageHistory(BaseChatMessageHistory): """Chat message history cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = "message_store:", ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a chat message history cache that uses Momento as a backend. Note: to instantiate the cache client passed to MomentoChatMessageHistory, you must have a Momento account at https://gomomento.com/. Args: session_id (str): The session ID to use for this chat session. cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the messages. key_prefix (str, optional): The prefix to apply to the cache key. Defaults to "message_store:". ttl (Optional[timedelta], optional): The TTL to use for the messages. Defaults to None, ie the default TTL of the cache will be used. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject """ try: from momento import CacheClient from momento.requests import CollectionTtl except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.key = key_prefix + session_id self.cache_client = cache_client self.cache_name = cache_name if ttl is not None: self.ttl = CollectionTtl.of(ttl) else: self.ttl = CollectionTtl.from_cache_ttl() @classmethod def from_client_params( cls, session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, api_key: Optional[str] = None, auth_token: Optional[str] = None, # for backwards compatibility **kwargs: Any, ) -> MomentoChatMessageHistory: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() # Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility try: api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") except ValueError: api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY") credentials = CredentialProvider.from_string(api_key) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs) @property def messages(self) -> list[BaseMessage]: # type: ignore[override] """Retrieve the messages from Momento. Raises: SdkException: Momento service or network error Exception: Unexpected response Returns: list[BaseMessage]: List of cached messages """ from momento.responses import CacheListFetch fetch_response = self.cache_client.list_fetch(self.cache_name, self.key) if isinstance(fetch_response, CacheListFetch.Hit): items = [json.loads(m) for m in fetch_response.value_list_string] return messages_from_dict(items) elif isinstance(fetch_response, CacheListFetch.Miss): return [] elif isinstance(fetch_response, CacheListFetch.Error): raise fetch_response.inner_exception else: raise Exception(f"Unexpected response: {fetch_response}") def add_message(self, message: BaseMessage) -> None: """Store a message in the cache. Args: message (BaseMessage): The message object to store. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheListPushBack item = json.dumps(_message_to_dict(message)) push_response = self.cache_client.list_push_back( self.cache_name, self.key, item, ttl=self.ttl ) if isinstance(push_response, CacheListPushBack.Success): return None elif isinstance(push_response, CacheListPushBack.Error): raise push_response.inner_exception else: raise Exception(f"Unexpected response: {push_response}") def clear(self) -> None: """Remove the session's messages from the cache. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheDelete delete_response = self.cache_client.delete(self.cache_name, self.key) if isinstance(delete_response, CacheDelete.Success): return None elif isinstance(delete_response, CacheDelete.Error): raise delete_response.inner_exception else: raise Exception(f"Unexpected response: {delete_response}")
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~self_hosted.py
from typing import Any, Callable, List from langchain_core.pydantic_v1 import Extra from langchain_core.schema.embeddings import Embeddings from langchain.llms.self_hosted import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~scann.py
from __future__ import annotations import operator import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.vectorstores.utils import DistanceStrategy def normalize(x: np.ndarray) -> np.ndarray: """Normalize vectors to unit length.""" x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None) return x def dependable_scann_import() -> Any: """ Import `scann` if available, otherwise raise error. """ try: import scann except ImportError: raise ImportError( "Could not import scann python package. " "Please install it with `pip install scann` " ) return scann class ScaNN(VectorStore): """`ScaNN` vector store. To use, you should have the ``scann`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import ScaNN db = ScaNN.from_texts( ['foo', 'bar', 'barz', 'qux'], HuggingFaceEmbeddings()) db.similarity_search('foo?', k=1) """ def __init__( self, embedding: Embeddings, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[Callable[[float], float]] = None, normalize_L2: bool = False, distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, scann_config: Optional[str] = None, ): """Initialize with necessary components.""" self.embedding = embedding self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id self.distance_strategy = distance_strategy self.override_relevance_score_fn = relevance_score_fn self._normalize_L2 = normalize_L2 self._scann_config = scann_config def __add( self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) raise NotImplementedError("Updates are not available in ScaNN, yet.") def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ # Embed and create the documents. embeddings = self.embedding.embed_documents(list(texts)) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs) def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) # Embed and create the documents. texts, embeddings = zip(*text_embeddings) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ raise NotImplementedError("Deletions are not available in ScaNN, yet.") def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. **kwargs: kwargs to be passed to similarity search. Can include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of documents most similar to the query text and L2 distance in float for each. Lower score represents more similarity. """ vector = np.array([embedding], dtype=np.float32) if self._normalize_L2: vector = normalize(vector) indices, scores = self.index.search_batched( vector, k if filter is None else fetch_k ) docs = [] for j, i in enumerate(indices[0]): if i == -1: # This happens when not enough docs are returned. continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") if filter is not None: filter = { key: [value] if not isinstance(value, list) else value for key, value in filter.items() } if all(doc.metadata.get(key) in value for key, value in filter.items()): docs.append((doc, scores[0][j])) else: docs.append((doc, scores[0][j])) score_threshold = kwargs.get("score_threshold") if score_threshold is not None: cmp = ( operator.ge if self.distance_strategy in (DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.JACCARD) else operator.le ) docs = [ (doc, similarity) for doc, similarity in docs if cmp(similarity, score_threshold) ] return docs[:k] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with L2 distance in float. Lower score represents more similarity. """ embedding = self.embedding.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding, k, filter=filter, fetch_k=fetch_k, **kwargs, ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, filter=filter, fetch_k=fetch_k, **kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score( query, k, filter=filter, fetch_k=fetch_k, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, normalize_L2: bool = False, **kwargs: Any, ) -> ScaNN: scann = dependable_scann_import() distance_strategy = kwargs.get( "distance_strategy", DistanceStrategy.EUCLIDEAN_DISTANCE ) scann_config = kwargs.get("scann_config", None) vector = np.array(embeddings, dtype=np.float32) if normalize_L2: vector = normalize(vector) if scann_config is not None: index = scann.scann_ops_pybind.create_searcher(vector, scann_config) else: if distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: index = ( scann.scann_ops_pybind.builder(vector, 1, "dot_product") .score_brute_force() .build() ) else: # Default to L2, currently other metric types not initialized. index = ( scann.scann_ops_pybind.builder(vector, 1, "squared_l2") .score_brute_force() .build() ) documents = [] if ids is None: ids = [str(uuid.uuid4()) for _ in texts] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = dict(enumerate(ids)) if len(index_to_id) != len(documents): raise Exception( f"{len(index_to_id)} ids provided for {len(documents)} documents." " Each document should have an id." ) docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents))) return cls( embedding, index, docstore, index_to_id, normalize_L2=normalize_L2, **kwargs, ) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> ScaNN: """Construct ScaNN wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the ScaNN database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import ScaNN from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() scann = ScaNN.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> ScaNN: """Construct ScaNN wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the ScaNN database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import ScaNN from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, **kwargs, ) def save_local(self, folder_path: str, index_name: str = "index") -> None: """Save ScaNN index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. """ path = Path(folder_path) scann_path = path / "{index_name}.scann".format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) # save index separately since it is not picklable self.index.serialize(str(scann_path)) # save docstore and index_to_docstore_id with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) @classmethod def load_local( cls, folder_path: str, embedding: Embeddings, index_name: str = "index", **kwargs: Any, ) -> ScaNN: """Load ScaNN index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) scann_path = path / "{index_name}.scann".format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) # load index separately since it is not picklable scann = dependable_scann_import() index = scann.scann_ops_pybind.load_searcher(str(scann_path)) # load docstore and index_to_docstore_id with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embedding, index, docstore, index_to_docstore_id, **kwargs) def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided in # vectorstore constructor if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: # Default behavior is to use euclidean distance relevancy return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, max_inner_product," " or euclidean" ) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" # Pop score threshold so that only relevancy scores, not raw scores, are # filtered. score_threshold = kwargs.pop("score_threshold", None) relevance_score_fn = self._select_relevance_score_fn() if relevance_score_fn is None: raise ValueError( "normalize_score_fn must be provided to" " ScaNN constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, fetch_k=fetch_k, **kwargs, ) docs_and_rel_scores = [ (doc, relevance_score_fn(score)) for doc, score in docs_and_scores ] if score_threshold is not None: docs_and_rel_scores = [ (doc, similarity) for doc, similarity in docs_and_rel_scores if similarity >= score_threshold ] return docs_and_rel_scores
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_transformers~doctran_text_extract.py
from typing import Any, List, Optional, Sequence from langchain_core.schema import BaseDocumentTransformer, Document from langchain.utils import get_from_env class DoctranPropertyExtractor(BaseDocumentTransformer): """Extract properties from text documents using doctran. Arguments: properties: A list of the properties to extract. openai_api_key: OpenAI API key. Can also be specified via environment variable ``OPENAI_API_KEY``. Example: .. code-block:: python from langchain.document_transformers import DoctranPropertyExtractor properties = [ { "name": "category", "description": "What type of email this is.", "type": "string", "enum": ["update", "action_item", "customer_feedback", "announcement", "other"], "required": True, }, { "name": "mentions", "description": "A list of all people mentioned in this email.", "type": "array", "items": { "name": "full_name", "description": "The full name of the person mentioned.", "type": "string", }, "required": True, }, { "name": "eli5", "description": "Explain this email to me like I'm 5 years old.", "type": "string", "required": True, }, ] # Pass in openai_api_key or set env var OPENAI_API_KEY property_extractor = DoctranPropertyExtractor(properties) transformed_document = await qa_transformer.atransform_documents(documents) """ # noqa: E501 def __init__( self, properties: List[dict], openai_api_key: Optional[str] = None, openai_api_model: Optional[str] = None, ) -> None: self.properties = properties self.openai_api_key = openai_api_key or get_from_env( "openai_api_key", "OPENAI_API_KEY" ) self.openai_api_model = openai_api_model or get_from_env( "openai_api_model", "OPENAI_API_MODEL" ) def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Extracts properties from text documents using doctran.""" try: from doctran import Doctran, ExtractProperty doctran = Doctran( openai_api_key=self.openai_api_key, openai_model=self.openai_api_model ) except ImportError: raise ImportError( "Install doctran to use this parser. (pip install doctran)" ) properties = [ExtractProperty(**property) for property in self.properties] for d in documents: doctran_doc = ( await doctran.parse(content=d.page_content) .extract(properties=properties) .execute() ) d.metadata["extracted_properties"] = doctran_doc.extracted_properties return documents
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~openai_functions~tagging.py
from typing import Any, Optional from langchain_core.prompts import ChatPromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs from langchain.output_parsers.openai_functions import ( JsonOutputFunctionsParser, PydanticOutputFunctionsParser, ) def _get_tagging_function(schema: dict) -> dict: return { "name": "information_extraction", "description": "Extracts the relevant information from the passage.", "parameters": _convert_schema(schema), } _TAGGING_TEMPLATE = """Extract the desired information from the following passage. Only extract the properties mentioned in the 'information_extraction' function. Passage: {input} """ def create_tagging_chain( schema: dict, llm: BaseLanguageModel, prompt: Optional[ChatPromptTemplate] = None, **kwargs: Any, ) -> Chain: """Creates a chain that extracts information from a passage based on a schema. Args: schema: The schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage. """ function = _get_tagging_function(schema) prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) output_parser = JsonOutputFunctionsParser() llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, **kwargs, ) return chain def create_tagging_chain_pydantic( pydantic_schema: Any, llm: BaseLanguageModel, prompt: Optional[ChatPromptTemplate] = None, **kwargs: Any, ) -> Chain: """Creates a chain that extracts information from a passage based on a pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage. """ openai_schema = pydantic_schema.schema() function = _get_tagging_function(openai_schema) prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema) llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, **kwargs, ) return chain
[ "information_extraction", "Extract the desired information from the following passage.\n\nOnly extract the properties mentioned in the 'information_extraction' function.\n\nPassage:\n{input}\n" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~awa.py
from typing import Any, Dict, List from langchain_core.pydantic_v1 import BaseModel, root_validator from langchain_core.schema.embeddings import Embeddings class AwaEmbeddings(BaseModel, Embeddings): """Embedding documents and queries with Awa DB. Attributes: client: The AwaEmbedding client. model: The name of the model used for embedding. Default is "all-mpnet-base-v2". """ client: Any #: :meta private: model: str = "all-mpnet-base-v2" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that awadb library is installed.""" try: from awadb import AwaEmbedding except ImportError as exc: raise ImportError( "Could not import awadb library. " "Please install it with `pip install awadb`" ) from exc values["client"] = AwaEmbedding() return values def set_model(self, model_name: str) -> None: """Set the model used for embedding. The default model used is all-mpnet-base-v2 Args: model_name: A string which represents the name of model. """ self.model = model_name self.client.model_name = model_name def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using AwaEmbedding. Args: texts: The list of texts need to be embedded Returns: List of embeddings, one for each text. """ return self.client.EmbeddingBatch(texts) def embed_query(self, text: str) -> List[float]: """Compute query embeddings using AwaEmbedding. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.Embedding(text)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~opensearch_vector_search.py
from __future__ import annotations import uuid import warnings from typing import Any, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain_core.schema import Document from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.utils import get_from_dict_or_env from langchain.vectorstores.utils import maximal_marginal_relevance IMPORT_OPENSEARCH_PY_ERROR = ( "Could not import OpenSearch. Please install it with `pip install opensearch-py`." ) SCRIPT_SCORING_SEARCH = "script_scoring" PAINLESS_SCRIPTING_SEARCH = "painless_scripting" MATCH_ALL_QUERY = {"match_all": {}} # type: Dict def _import_opensearch() -> Any: """Import OpenSearch if available, otherwise raise error.""" try: from opensearchpy import OpenSearch except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return OpenSearch def _import_bulk() -> Any: """Import bulk if available, otherwise raise error.""" try: from opensearchpy.helpers import bulk except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return bulk def _import_not_found_error() -> Any: """Import not found error if available, otherwise raise error.""" try: from opensearchpy.exceptions import NotFoundError except ImportError: raise ImportError(IMPORT_OPENSEARCH_PY_ERROR) return NotFoundError def _get_opensearch_client(opensearch_url: str, **kwargs: Any) -> Any: """Get OpenSearch client from the opensearch_url, otherwise raise error.""" try: opensearch = _import_opensearch() client = opensearch(opensearch_url, **kwargs) except ValueError as e: raise ImportError( f"OpenSearch client string provided is not in proper format. " f"Got error: {e} " ) return client def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None: """Validate Embeddings Length and Bulk Size.""" if embeddings_length == 0: raise RuntimeError("Embeddings size is zero") if bulk_size < embeddings_length: raise RuntimeError( f"The embeddings count, {embeddings_length} is more than the " f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]." ) def _validate_aoss_with_engines(is_aoss: bool, engine: str) -> None: """Validate AOSS with the engine.""" if is_aoss and engine != "nmslib" and engine != "faiss": raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `nmslib` or `faiss` engines" ) def _is_aoss_enabled(http_auth: Any) -> bool: """Check if the service is http_auth is set as `aoss`.""" if ( http_auth is not None and hasattr(http_auth, "service") and http_auth.service == "aoss" ): return True return False def _bulk_ingest_embeddings( client: Any, index_name: str, embeddings: List[List[float]], texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, vector_field: str = "vector_field", text_field: str = "text", mapping: Optional[Dict] = None, max_chunk_bytes: Optional[int] = 1 * 1024 * 1024, is_aoss: bool = False, ) -> List[str]: """Bulk Ingest Embeddings into given index.""" if not mapping: mapping = dict() bulk = _import_bulk() not_found_error = _import_not_found_error() requests = [] return_ids = [] mapping = mapping try: client.indices.get(index=index_name) except not_found_error: client.indices.create(index=index_name, body=mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = ids[i] if ids else str(uuid.uuid4()) request = { "_op_type": "index", "_index": index_name, vector_field: embeddings[i], text_field: text, "metadata": metadata, } if is_aoss: request["id"] = _id else: request["_id"] = _id requests.append(request) return_ids.append(_id) bulk(client, requests, max_chunk_bytes=max_chunk_bytes) if not is_aoss: client.indices.refresh(index=index_name) return return_ids def _default_scripting_text_mapping( dim: int, vector_field: str = "vector_field", ) -> Dict: """For Painless Scripting or Script Scoring,the default mapping to create index.""" return { "mappings": { "properties": { vector_field: {"type": "knn_vector", "dimension": dim}, } } } def _default_text_mapping( dim: int, engine: str = "nmslib", space_type: str = "l2", ef_search: int = 512, ef_construction: int = 512, m: int = 16, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, this is the default mapping to create index.""" return { "settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}}, "mappings": { "properties": { vector_field: { "type": "knn_vector", "dimension": dim, "method": { "name": "hnsw", "space_type": space_type, "engine": engine, "parameters": {"ef_construction": ef_construction, "m": m}, }, } } }, } def _default_approximate_search_query( query_vector: List[float], k: int = 4, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, this is the default query.""" return { "size": k, "query": {"knn": {vector_field: {"vector": query_vector, "k": k}}}, } def _approximate_search_query_with_boolean_filter( query_vector: List[float], boolean_filter: Dict, k: int = 4, vector_field: str = "vector_field", subquery_clause: str = "must", ) -> Dict: """For Approximate k-NN Search, with Boolean Filter.""" return { "size": k, "query": { "bool": { "filter": boolean_filter, subquery_clause: [ {"knn": {vector_field: {"vector": query_vector, "k": k}}} ], } }, } def _approximate_search_query_with_efficient_filter( query_vector: List[float], efficient_filter: Dict, k: int = 4, vector_field: str = "vector_field", ) -> Dict: """For Approximate k-NN Search, with Efficient Filter for Lucene and Faiss Engines.""" search_query = _default_approximate_search_query( query_vector, k=k, vector_field=vector_field ) search_query["query"]["knn"][vector_field]["filter"] = efficient_filter return search_query def _default_script_query( query_vector: List[float], k: int = 4, space_type: str = "l2", pre_filter: Optional[Dict] = None, vector_field: str = "vector_field", ) -> Dict: """For Script Scoring Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY return { "size": k, "query": { "script_score": { "query": pre_filter, "script": { "source": "knn_score", "lang": "knn", "params": { "field": vector_field, "query_value": query_vector, "space_type": space_type, }, }, } }, } def __get_painless_scripting_source( space_type: str, vector_field: str = "vector_field" ) -> str: """For Painless Scripting, it returns the script source based on space type.""" source_value = ( "(1.0 + " + space_type + "(params.query_value, doc['" + vector_field + "']))" ) if space_type == "cosineSimilarity": return source_value else: return "1/" + source_value def _default_painless_scripting_query( query_vector: List[float], k: int = 4, space_type: str = "l2Squared", pre_filter: Optional[Dict] = None, vector_field: str = "vector_field", ) -> Dict: """For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY source = __get_painless_scripting_source(space_type, vector_field=vector_field) return { "size": k, "query": { "script_score": { "query": pre_filter, "script": { "source": source, "params": { "field": vector_field, "query_value": query_vector, }, }, } }, } class OpenSearchVectorSearch(VectorStore): """`Amazon OpenSearch Vector Engine` vector store. Example: .. code-block:: python from langchain.vectorstores import OpenSearchVectorSearch opensearch_vector_search = OpenSearchVectorSearch( "http://localhost:9200", "embeddings", embedding_function ) """ def __init__( self, opensearch_url: str, index_name: str, embedding_function: Embeddings, **kwargs: Any, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index_name = index_name http_auth = kwargs.get("http_auth") self.is_aoss = _is_aoss_enabled(http_auth=http_auth) self.client = _get_opensearch_client(opensearch_url, **kwargs) self.engine = kwargs.get("engine") @property def embeddings(self) -> Embeddings: return self.embedding_function def __add( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: _validate_embeddings_and_bulk_size(len(embeddings), bulk_size) index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) engine = kwargs.get("engine", "nmslib") space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) m = kwargs.get("m", 16) vector_field = kwargs.get("vector_field", "vector_field") max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024) _validate_aoss_with_engines(self.is_aoss, engine) mapping = _default_text_mapping( dim, engine, space_type, ef_search, ef_construction, m, vector_field ) return _bulk_ingest_embeddings( self.client, index_name, embeddings, texts, metadatas=metadatas, ids=ids, vector_field=vector_field, text_field=text_field, mapping=mapping, max_chunk_bytes=max_chunk_bytes, is_aoss=self.is_aoss, ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.__add( texts, embeddings, metadatas=metadatas, ids=ids, bulk_size=bulk_size, kwargs=kwargs, ) def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, bulk_size: int = 500, **kwargs: Any, ) -> List[str]: """Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ texts, embeddings = zip(*text_embeddings) return self.__add( list(texts), list(embeddings), metadatas=metadatas, ids=ids, bulk_size=bulk_size, kwargs=kwargs, ) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". metadata_field: Document field that metadata is stored in. Defaults to "metadata". Can be set to a special value "*" to include the entire document. Optional Args for Approximate Search: search_type: "approximate_search"; default: "approximate_search" boolean_filter: A Boolean filter is a post filter consists of a Boolean query that contains a k-NN query and a filter. subquery_clause: Query clause on the knn vector field; default: "must" lucene_filter: the Lucene algorithm decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. (deprecated, use `efficient_filter`) efficient_filter: the Lucene Engine or Faiss Engine decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. Optional Args for Script Scoring Search: search_type: "script_scoring"; default: "approximate_search" space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct", "hammingbit"; default: "l2" pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {"match_all": {}} Optional Args for Painless Scripting Search: search_type: "painless_scripting"; default: "approximate_search" space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared" pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {"match_all": {}} """ docs_with_scores = self.similarity_search_with_score(query, k, **kwargs) return [doc[0] for doc in docs_with_scores] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs and it's scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents along with its scores most similar to the query. Optional Args: same as `similarity_search` """ text_field = kwargs.get("text_field", "text") metadata_field = kwargs.get("metadata_field", "metadata") hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs) documents_with_scores = [ ( Document( page_content=hit["_source"][text_field], metadata=hit["_source"] if metadata_field == "*" or metadata_field not in hit["_source"] else hit["_source"][metadata_field], ), hit["_score"], ) for hit in hits ] return documents_with_scores def _raw_similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[dict]: """Return raw opensearch documents (dict) including vectors, scores most similar to query. By default, supports Approximate Search. Also supports Script Scoring and Painless Scripting. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of dict with its scores most similar to the query. Optional Args: same as `similarity_search` """ embedding = self.embedding_function.embed_query(query) search_type = kwargs.get("search_type", "approximate_search") vector_field = kwargs.get("vector_field", "vector_field") index_name = kwargs.get("index_name", self.index_name) filter = kwargs.get("filter", {}) if ( self.is_aoss and search_type != "approximate_search" and search_type != SCRIPT_SCORING_SEARCH ): raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `approximate_search` and `script_scoring`" ) if search_type == "approximate_search": boolean_filter = kwargs.get("boolean_filter", {}) subquery_clause = kwargs.get("subquery_clause", "must") efficient_filter = kwargs.get("efficient_filter", {}) # `lucene_filter` is deprecated, added for Backwards Compatibility lucene_filter = kwargs.get("lucene_filter", {}) if boolean_filter != {} and efficient_filter != {}: raise ValueError( "Both `boolean_filter` and `efficient_filter` are provided which " "is invalid" ) if lucene_filter != {} and efficient_filter != {}: raise ValueError( "Both `lucene_filter` and `efficient_filter` are provided which " "is invalid. `lucene_filter` is deprecated" ) if lucene_filter != {} and boolean_filter != {}: raise ValueError( "Both `lucene_filter` and `boolean_filter` are provided which " "is invalid. `lucene_filter` is deprecated" ) if ( efficient_filter == {} and boolean_filter == {} and lucene_filter == {} and filter != {} ): if self.engine in ["faiss", "lucene"]: efficient_filter = filter else: boolean_filter = filter if boolean_filter != {}: search_query = _approximate_search_query_with_boolean_filter( embedding, boolean_filter, k=k, vector_field=vector_field, subquery_clause=subquery_clause, ) elif efficient_filter != {}: search_query = _approximate_search_query_with_efficient_filter( embedding, efficient_filter, k=k, vector_field=vector_field ) elif lucene_filter != {}: warnings.warn( "`lucene_filter` is deprecated. Please use the keyword argument" " `efficient_filter`" ) search_query = _approximate_search_query_with_efficient_filter( embedding, lucene_filter, k=k, vector_field=vector_field ) else: search_query = _default_approximate_search_query( embedding, k=k, vector_field=vector_field ) elif search_type == SCRIPT_SCORING_SEARCH: space_type = kwargs.get("space_type", "l2") pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY) search_query = _default_script_query( embedding, k, space_type, pre_filter, vector_field ) elif search_type == PAINLESS_SCRIPTING_SEARCH: space_type = kwargs.get("space_type", "l2Squared") pre_filter = kwargs.get("pre_filter", MATCH_ALL_QUERY) search_query = _default_painless_scripting_query( embedding, k, space_type, pre_filter, vector_field ) else: raise ValueError("Invalid `search_type` provided as an argument") response = self.client.search(index=index_name, body=search_query) return [hit for hit in response["hits"]["hits"]] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> list[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector_field = kwargs.get("vector_field", "vector_field") text_field = kwargs.get("text_field", "text") metadata_field = kwargs.get("metadata_field", "metadata") # Get embedding of the user query embedding = self.embedding_function.embed_query(query) # Do ANN/KNN search to get top fetch_k results where fetch_k >= k results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs) embeddings = [result["_source"][vector_field] for result in results] # Rerank top k results using MMR, (mmr_selected is a list of indices) mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) return [ Document( page_content=results[i]["_source"][text_field], metadata=results[i]["_source"][metadata_field], ) for i in mmr_selected ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, ids: Optional[List[str]] = None, **kwargs: Any, ) -> OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from raw texts. Example: .. code-block:: python from langchain.vectorstores import OpenSearchVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ embeddings = embedding.embed_documents(texts) return cls.from_embeddings( embeddings, texts, embedding, metadatas=metadatas, bulk_size=bulk_size, ids=ids, **kwargs, ) @classmethod def from_embeddings( cls, embeddings: List[List[float]], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, ids: Optional[List[str]] = None, **kwargs: Any, ) -> OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings. Example: .. code-block:: python from langchain.vectorstores import OpenSearchVectorSearch from langchain.embeddings import OpenAIEmbeddings embedder = OpenAIEmbeddings() embeddings = embedder.embed_documents(["foo", "bar"]) opensearch_vector_search = OpenSearchVectorSearch.from_embeddings( embeddings, texts, embedder, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ opensearch_url = get_from_dict_or_env( kwargs, "opensearch_url", "OPENSEARCH_URL" ) # List of arguments that needs to be removed from kwargs # before passing kwargs to get opensearch client keys_list = [ "opensearch_url", "index_name", "is_appx_search", "vector_field", "text_field", "engine", "space_type", "ef_search", "ef_construction", "m", "max_chunk_bytes", "is_aoss", ] _validate_embeddings_and_bulk_size(len(embeddings), bulk_size) dim = len(embeddings[0]) # Get the index name from either from kwargs or ENV Variable # before falling back to random generation index_name = get_from_dict_or_env( kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex ) is_appx_search = kwargs.get("is_appx_search", True) vector_field = kwargs.get("vector_field", "vector_field") text_field = kwargs.get("text_field", "text") max_chunk_bytes = kwargs.get("max_chunk_bytes", 1 * 1024 * 1024) http_auth = kwargs.get("http_auth") is_aoss = _is_aoss_enabled(http_auth=http_auth) engine = None if is_aoss and not is_appx_search: raise ValueError( "Amazon OpenSearch Service Serverless only " "supports `approximate_search`" ) if is_appx_search: engine = kwargs.get("engine", "nmslib") space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) m = kwargs.get("m", 16) _validate_aoss_with_engines(is_aoss, engine) mapping = _default_text_mapping( dim, engine, space_type, ef_search, ef_construction, m, vector_field ) else: mapping = _default_scripting_text_mapping(dim) [kwargs.pop(key, None) for key in keys_list] client = _get_opensearch_client(opensearch_url, **kwargs) _bulk_ingest_embeddings( client, index_name, embeddings, texts, ids=ids, metadatas=metadatas, vector_field=vector_field, text_field=text_field, mapping=mapping, max_chunk_bytes=max_chunk_bytes, is_aoss=is_aoss, ) kwargs["engine"] = engine return cls(opensearch_url, index_name, embedding, **kwargs)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~weaviate_hybrid_search.py
from __future__ import annotations from typing import Any, Dict, List, Optional, cast from uuid import uuid4 from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BaseRetriever from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document class WeaviateHybridSearchRetriever(BaseRetriever): """`Weaviate hybrid search` retriever. See the documentation: https://weaviate.io/blog/hybrid-search-explained """ client: Any """keyword arguments to pass to the Weaviate client.""" index_name: str """The name of the index to use.""" text_key: str """The name of the text key to use.""" alpha: float = 0.5 """The weight of the text key in the hybrid search.""" k: int = 4 """The number of results to return.""" attributes: List[str] """The attributes to return in the results.""" create_schema_if_missing: bool = True """Whether to create the schema if it doesn't exist.""" @root_validator(pre=True) def validate_client( cls, values: Dict[str, Any], ) -> Dict[str, Any]: try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(values["client"], weaviate.Client): client = values["client"] raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) if values.get("attributes") is None: values["attributes"] = [] cast(List, values["attributes"]).append(values["text_key"]) if values.get("create_schema_if_missing", True): class_obj = { "class": values["index_name"], "properties": [{"name": values["text_key"], "dataType": ["text"]}], "vectorizer": "text2vec-openai", } if not values["client"].schema.exists(values["index_name"]): values["client"].schema.create_class(class_obj) return values class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # added text_key def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]: """Upload documents to Weaviate.""" from weaviate.util import get_valid_uuid with self.client.batch as batch: ids = [] for i, doc in enumerate(docs): metadata = doc.metadata or {} data_properties = {self.text_key: doc.page_content, **metadata} # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) batch.add_data_object(data_properties, self.index_name, _id) ids.append(_id) return ids def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, where_filter: Optional[Dict[str, object]] = None, score: bool = False, hybrid_search_kwargs: Optional[Dict[str, object]] = None, ) -> List[Document]: """Look up similar documents in Weaviate. query: The query to search for relevant documents of using weviate hybrid search. where_filter: A filter to apply to the query. https://weaviate.io/developers/weaviate/guides/querying/#filtering score: Whether to include the score, and score explanation in the returned Documents meta_data. hybrid_search_kwargs: Used to pass additional arguments to the .with_hybrid() method. The primary uses cases for this are: 1) Search specific properties only - specify which properties to be used during hybrid search portion. Note: this is not the same as the (self.attributes) to be returned. Example - hybrid_search_kwargs={"properties": ["question", "answer"]} https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only 2) Weight boosted searched properties - Boost the weight of certain properties during the hybrid search portion. Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]} https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties 3) Search with a custom vector - Define a different vector to be used during the hybrid search portion. Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]} https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector 4) Use Fusion ranking method Example - from weaviate.gql.get import HybridFusion hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE} https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method """ query_obj = self.client.query.get(self.index_name, self.attributes) if where_filter: query_obj = query_obj.with_where(where_filter) if score: query_obj = query_obj.with_additional(["score", "explainScore"]) if hybrid_search_kwargs is None: hybrid_search_kwargs = {} result = ( query_obj.with_hybrid(query, alpha=self.alpha, **hybrid_search_kwargs) .with_limit(self.k) .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self.index_name]: text = res.pop(self.text_key) docs.append(Document(page_content=text, metadata=res)) return docs
[]
2024-01-10
axgpt/langchain
libs~core~langchain_core~chat_model.py
import asyncio import inspect import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast, ) from langchain_core.callbacks.base import BaseCallbackManager from langchain_core.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain_core.globals import get_llm_cache from langchain_core.load.dump import dumpd, dumps from langchain_core.prompts.base import StringPromptValue from langchain_core.prompts.chat import ChatPromptValue from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.runnables import RunnableConfig from langchain_core.schema import ( ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo, ) from langchain_core.schema.language_model import BaseLanguageModel, LanguageModelInput from langchain_core.schema.messages import ( AIMessage, AnyMessage, BaseMessage, BaseMessageChunk, HumanMessage, ) from langchain_core.schema.output import ChatGenerationChunk def _get_verbosity() -> bool: from langchain_core.globals import get_verbose return get_verbose() def _generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult: generation: Optional[ChatGenerationChunk] = None for chunk in stream: if generation is None: generation = chunk else: generation += chunk assert generation is not None return ChatResult(generations=[generation]) async def _agenerate_from_stream( stream: AsyncIterator[ChatGenerationChunk], ) -> ChatResult: generation: Optional[ChatGenerationChunk] = None async for chunk in stream: if generation is None: generation = chunk else: generation += chunk assert generation is not None return ChatResult(generations=[generation]) class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): """Base class for Chat models.""" cache: Optional[bool] = None """Whether to cache the response.""" verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to add to the run trace.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Callback manager to add to the run trace.""" tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # --- Runnable methods --- @property def OutputType(self) -> Any: """Get the output type for this runnable.""" return AnyMessage def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessage: config = config or {} return cast( ChatGeneration, self.generate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), **kwargs, ).generations[0][0], ).message async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessage: config = config or {} llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), run_name=config.get("run_name"), **kwargs, ) return cast(ChatGeneration, llm_result.generations[0][0]).message def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[BaseMessageChunk]: if type(self)._stream == BaseChatModel._stream: # model doesn't implement streaming, so use default implementation yield cast( BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs) ) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options, name=config.get("run_name"), ) try: generation: Optional[ChatGenerationChunk] = None for chunk in self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if generation is None: generation = chunk else: generation += chunk assert generation is not None except BaseException as e: run_manager.on_llm_error(e) raise e else: run_manager.on_llm_end( LLMResult(generations=[[generation]]), ) async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[BaseMessageChunk]: if type(self)._astream == BaseChatModel._astream: # model doesn't implement streaming, so use default implementation yield cast( BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs) ) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options, name=config.get("run_name"), ) try: generation: Optional[ChatGenerationChunk] = None async for chunk in self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if generation is None: generation = chunk else: generation += chunk assert generation is not None except BaseException as e: await run_manager.on_llm_error(e) raise e else: await run_manager.on_llm_end( LLMResult(generations=[[generation]]), ) # --- Custom methods --- def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: return {} def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any, ) -> dict: params = self.dict() params["stop"] = stop return {**params, **kwargs} def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str: if self.is_lc_serializable(): params = {**kwargs, **{"stop": stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + "---" + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()])) def generate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options, name=run_name, ) results = [] for i, m in enumerate(messages): try: results.append( self._generate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) ) except BaseException as e: if run_managers: run_managers[i].on_llm_error(e) raise e flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output async def agenerate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, run_name: Optional[str] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = await callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options, name=run_name, ) results = await asyncio.gather( *[ self._agenerate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) for i, m in enumerate(messages) ], return_exceptions=True, ) exceptions = [] for i, res in enumerate(results): if isinstance(res, BaseException): if run_managers: await run_managers[i].on_llm_error(res) exceptions.append(res) if exceptions: if run_managers: await asyncio.gather( *[ run_manager.on_llm_end( LLMResult( generations=[res.generations], llm_output=res.llm_output ) ) for run_manager, res in zip(run_managers, results) if not isinstance(res, Exception) ] ) raise exceptions[0] flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return await self.agenerate( prompt_messages, stop=stop, callbacks=callbacks, **kwargs ) def _generate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache llm_cache = get_llm_cache() if llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return self._generate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = self._generate(messages, stop=stop, **kwargs) llm_cache.update(prompt, llm_string, result.generations) return result async def _agenerate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache llm_cache = get_llm_cache() if llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return await self._agenerate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = await self._agenerate(messages, stop=stop, **kwargs) llm_cache.update(prompt, llm_string, result.generations) return result @abstractmethod def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" return await asyncio.get_running_loop().run_in_executor( None, partial(self._generate, **kwargs), messages, stop, run_manager ) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: raise NotImplementedError() def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: raise NotImplementedError() def __call__( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: generation = self.generate( [messages], stop=stop, callbacks=callbacks, **kwargs ).generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") async def _call_async( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: result = await self.agenerate( [messages], stop=stop, callbacks=callbacks, **kwargs ) generation = result.generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") def call_as_llm( self, message: str, stop: Optional[List[str]] = None, **kwargs: Any ) -> str: return self.predict(message, stop=stop, **kwargs) def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = self([HumanMessage(content=text)], stop=_stop, **kwargs) if isinstance(result.content, str): return result.content else: raise ValueError("Cannot use predict when output is not a string.") def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return self(messages, stop=_stop, **kwargs) async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = await self._call_async( [HumanMessage(content=text)], stop=_stop, **kwargs ) if isinstance(result.content, str): return result.content else: raise ValueError("Cannot use predict when output is not a string.") async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(messages, stop=_stop, **kwargs) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {} @property @abstractmethod def _llm_type(self) -> str: """Return type of chat model.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict class SimpleChatModel(BaseChatModel): """Simple Chat Model.""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @abstractmethod def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Simpler interface.""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: func = partial( self._generate, messages, stop=stop, run_manager=run_manager, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, func)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~datetime.py
import random from datetime import datetime, timedelta from typing import List from langchain_core.schema import BaseOutputParser, OutputParserException from langchain.utils import comma_list def _generate_random_datetime_strings( pattern: str, n: int = 3, start_date: datetime = datetime(1, 1, 1), end_date: datetime = datetime.now() + timedelta(days=3650), ) -> List[str]: """Generates n random datetime strings conforming to the given pattern within the specified date range. Pattern should be a string containing the desired format codes. start_date and end_date should be datetime objects representing the start and end of the date range. """ examples = [] delta = end_date - start_date for i in range(n): random_delta = random.uniform(0, delta.total_seconds()) dt = start_date + timedelta(seconds=random_delta) date_string = dt.strftime(pattern) examples.append(date_string) return examples class DatetimeOutputParser(BaseOutputParser[datetime]): """Parse the output of an LLM call to a datetime.""" format: str = "%Y-%m-%dT%H:%M:%S.%fZ" """The string value that used as the datetime format.""" def get_format_instructions(self) -> str: examples = comma_list(_generate_random_datetime_strings(self.format)) return f"""Write a datetime string that matches the following pattern: "{self.format}". Examples: {examples}""" def parse(self, response: str) -> datetime: try: return datetime.strptime(response.strip(), self.format) except ValueError as e: raise OutputParserException( f"Could not parse datetime string: {response}" ) from e @property def _type(self) -> str: return "datetime"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~output_parsers~xml.py
import re import xml.etree.ElementTree as ET from typing import Any, Dict, List, Optional from langchain_core.schema import BaseOutputParser from langchain.output_parsers.format_instructions import XML_FORMAT_INSTRUCTIONS class XMLOutputParser(BaseOutputParser): """Parse an output using xml format.""" tags: Optional[List[str]] = None encoding_matcher: re.Pattern = re.compile( r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL ) def get_format_instructions(self) -> str: return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags) def parse(self, text: str) -> Dict[str, List[Any]]: text = text.strip("`").strip("xml") encoding_match = self.encoding_matcher.search(text) if encoding_match: text = encoding_match.group(2) text = text.strip() if (text.startswith("<") or text.startswith("\n<")) and ( text.endswith(">") or text.endswith(">\n") ): root = ET.fromstring(text) return self._root_to_dict(root) else: raise ValueError(f"Could not parse output: {text}") def _root_to_dict(self, root: ET.Element) -> Dict[str, List[Any]]: """Converts xml tree to python dictionary.""" result: Dict[str, List[Any]] = {root.tag: []} for child in root: if len(child) == 0: result[root.tag].append({child.tag: child.text}) else: result[root.tag].append(self._root_to_dict(child)) return result @property def _type(self) -> str: return "xml"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~mapreduce.py
"""Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain_core.pydantic_v1 import Extra from langchain_core.schema import BasePromptTemplate from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.text_splitter import TextSplitter class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=stuff_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~multi_vector.py
from typing import List from langchain_core.pydantic_v1 import Field from langchain_core.schema import BaseRetriever, BaseStore, Document from langchain_core.schema.vectorstore import VectorStore from langchain.callbacks.manager import CallbackManagerForRetrieverRun class MultiVectorRetriever(BaseRetriever): """Retrieve from a set of multiple embeddings for the same document.""" vectorstore: VectorStore """The underlying vectorstore to use to store small chunks and their embedding vectors""" docstore: BaseStore[str, Document] """The storage layer for the parent documents""" id_key: str = "doc_id" search_kwargs: dict = Field(default_factory=dict) """Keyword arguments to pass to the search function.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs) # We do this to maintain the order of the ids that are returned ids = [] for d in sub_docs: if d.metadata[self.id_key] not in ids: ids.append(d.metadata[self.id_key]) docs = self.docstore.mget(ids) return [d for d in docs if d is not None]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~embeddings~spacy_embeddings.py
import importlib.util from typing import Any, Dict, List from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator from langchain_core.schema.embeddings import Embeddings class SpacyEmbeddings(BaseModel, Embeddings): """Embeddings by SpaCy models. It only supports the 'en_core_web_sm' model. Attributes: nlp (Any): The Spacy model loaded into memory. Methods: embed_documents(texts: List[str]) -> List[List[float]]: Generates embeddings for a list of documents. embed_query(text: str) -> List[float]: Generates an embedding for a single piece of text. """ nlp: Any # The Spacy model loaded into memory class Config: """Configuration for this pydantic object.""" extra = Extra.forbid # Forbid extra attributes during model initialization @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """ Validates that the Spacy package and the 'en_core_web_sm' model are installed. Args: values (Dict): The values provided to the class constructor. Returns: The validated values. Raises: ValueError: If the Spacy package or the 'en_core_web_sm' model are not installed. """ # Check if the Spacy package is installed if importlib.util.find_spec("spacy") is None: raise ValueError( "Spacy package not found. " "Please install it with `pip install spacy`." ) try: # Try to load the 'en_core_web_sm' Spacy model import spacy values["nlp"] = spacy.load("en_core_web_sm") except OSError: # If the model is not found, raise a ValueError raise ValueError( "Spacy model 'en_core_web_sm' not found. " "Please install it with" " `python -m spacy download en_core_web_sm`." ) return values # Return the validated values def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generates embeddings for a list of documents. Args: texts (List[str]): The documents to generate embeddings for. Returns: A list of embeddings, one for each document. """ return [self.nlp(text).vector.tolist() for text in texts] def embed_query(self, text: str) -> List[float]: """ Generates an embedding for a single piece of text. Args: text (str): The text to generate an embedding for. Returns: The embedding for the text. """ return self.nlp(text).vector.tolist() async def aembed_documents(self, texts: List[str]) -> List[List[float]]: """ Asynchronously generates embeddings for a list of documents. This method is not implemented and raises a NotImplementedError. Args: texts (List[str]): The documents to generate embeddings for. Raises: NotImplementedError: This method is not implemented. """ raise NotImplementedError("Asynchronous embedding generation is not supported.") async def aembed_query(self, text: str) -> List[float]: """ Asynchronously generates an embedding for a single piece of text. This method is not implemented and raises a NotImplementedError. Args: text (str): The text to generate an embedding for. Raises: NotImplementedError: This method is not implemented. """ raise NotImplementedError("Asynchronous embedding generation is not supported.")
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~pgembedding.py
from __future__ import annotations import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type import sqlalchemy from sqlalchemy import func from sqlalchemy.dialects.postgresql import JSON, UUID from sqlalchemy.orm import Session, relationship try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.utils import get_from_dict_or_env Base = declarative_base() # type: Any ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): """Base model for all SQL stores.""" __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) class CollectionStore(BaseModel): """Collection store.""" __tablename__ = "langchain_pg_collection" name = sqlalchemy.Column(sqlalchemy.String) cmetadata = sqlalchemy.Column(JSON) embeddings = relationship( "EmbeddingStore", back_populates="collection", passive_deletes=True, ) @classmethod def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]: return session.query(cls).filter(cls.name == name).first() # type: ignore @classmethod def get_or_create( cls, session: Session, name: str, cmetadata: Optional[dict] = None, ) -> Tuple["CollectionStore", bool]: """ Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created class EmbeddingStore(BaseModel): """Embedding store.""" __tablename__ = "langchain_pg_embedding" collection_id = sqlalchemy.Column( UUID(as_uuid=True), sqlalchemy.ForeignKey( f"{CollectionStore.__tablename__}.uuid", ondelete="CASCADE", ), ) collection = relationship(CollectionStore, back_populates="embeddings") embedding = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.REAL)) # type: ignore document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) class QueryResult: """Result from a query.""" EmbeddingStore: EmbeddingStore distance: float class PGEmbedding(VectorStore): """`Postgres` with the `pg_embedding` extension as a vector store. pg_embedding uses sequential scan by default. but you can create a HNSW index using the create_hnsw_index method. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: self._conn = self.connect() self.create_hnsw_extension() self.create_tables_if_not_exists() self.create_collection() @property def embeddings(self) -> Embeddings: return self.embedding_function def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string) conn = engine.connect() return conn def create_hnsw_extension(self) -> None: try: with Session(self._conn) as session: statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS embedding") session.execute(statement) session.commit() except Exception as e: self.logger.exception(e) def create_tables_if_not_exists(self) -> None: with self._conn.begin(): Base.metadata.create_all(self._conn) def drop_tables(self) -> None: with self._conn.begin(): Base.metadata.drop_all(self._conn) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._conn) as session: CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) def create_hnsw_index( self, max_elements: int = 10000, dims: int = ADA_TOKEN_COUNT, m: int = 8, ef_construction: int = 16, ef_search: int = 16, ) -> None: create_index_query = sqlalchemy.text( "CREATE INDEX IF NOT EXISTS langchain_pg_embedding_idx " "ON langchain_pg_embedding USING hnsw (embedding) " "WITH (" "maxelements = {}, " "dims = {}, " "m = {}, " "efconstruction = {}, " "efsearch = {}" ");".format(max_elements, dims, m, ef_construction, ef_search) ) # Execute the queries try: with Session(self._conn) as session: # Create the HNSW index session.execute(create_index_query) session.commit() print("HNSW extension and index created successfully.") except Exception as e: print(f"Failed to create HNSW extension or index: {e}") def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._conn) as session: collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return session.delete(collection) session.commit() def get_collection(self, session: Session) -> Optional["CollectionStore"]: return CollectionStore.get_by_name(session, self.collection_name) @classmethod def _initialize_from_embeddings( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGEmbedding: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, pre_delete_collection=pre_delete_collection, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: List[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) collection.embeddings.append(embedding_store) session.add(embedding_store) session.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) collection.embeddings.append(embedding_store) session.add(embedding_store) session.commit() return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: with Session(self._conn) as session: collection = self.get_collection(session) set_enable_seqscan_stmt = sqlalchemy.text("SET enable_seqscan = off") session.execute(set_enable_seqscan_stmt) if not collection: raise ValueError("Collection not found") filter_by = EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = "in" if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = { k.lower(): v for k, v in value.items() } filter_by_metadata = EmbeddingStore.cmetadata[key].astext.in_( value_case_insensitive[IN] ) filter_clauses.append(filter_by_metadata) elif isinstance(value, dict) and "substring" in map( str.lower, value ): filter_by_metadata = EmbeddingStore.cmetadata[key].astext.ilike( f"%{value['substring']}%" ) filter_clauses.append(filter_by_metadata) else: filter_by_metadata = EmbeddingStore.cmetadata[ key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) results: List[QueryResult] = ( session.query( EmbeddingStore, func.abs(EmbeddingStore.embedding.op("<->")(embedding)).label( "distance" ), ) # Specify the columns you need here, e.g., EmbeddingStore.embedding .filter(filter_by) .order_by( func.abs(EmbeddingStore.embedding.op("<->")(embedding)).asc() ) # Using PostgreSQL specific operator with the correct column name .limit(k) .all() ) docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else 0.0, ) for result in results ] return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[PGEmbedding], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGEmbedding: embeddings = embedding.embed_documents(list(texts)) return cls._initialize_from_embeddings( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGEmbedding: texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls._initialize_from_embeddings( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[PGEmbedding], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGEmbedding: connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="POSTGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the POSTGRES_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[PGEmbedding], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGEmbedding: texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, **kwargs, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~milvus.py
from __future__ import annotations import logging from typing import Any, Iterable, List, Optional, Tuple, Union from uuid import uuid4 import numpy as np from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) DEFAULT_MILVUS_CONNECTION = { "host": "localhost", "port": "19530", "user": "", "password": "", "secure": False, } class Milvus(VectorStore): """`Milvus` vector store. You need to install `pymilvus` and run Milvus. See the following documentation for how to run a Milvus instance: https://milvus.io/docs/install_standalone-docker.md If looking for a hosted Milvus, take a look at this documentation: https://zilliz.com/cloud and make use of the Zilliz vectorstore found in this project. IF USING L2/IP metric, IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. Args: embedding_function (Embeddings): Function used to embed the text. collection_name (str): Which Milvus collection to use. Defaults to "LangChainCollection". connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. consistency_level (str): The consistency level to use for a collection. Defaults to "Session". index_params (Optional[dict]): Which index params to use. Defaults to HNSW/AUTOINDEX depending on service. search_params (Optional[dict]): Which search params to use. Defaults to default of index. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. primary_field (str): Name of the primary key field. Defaults to "pk". text_field (str): Name of the text field. Defaults to "text". vector_field (str): Name of the vector field. Defaults to "vector". The connection args used for this class comes in the form of a dict, here are a few of the options: address (str): The actual address of Milvus instance. Example address: "localhost:19530" uri (str): The uri of Milvus instance. Example uri: "http://randomwebsite:19530", "tcp:foobarsite:19530", "https://ok.s3.south.com:19530". host (str): The host of Milvus instance. Default at "localhost", PyMilvus will fill in the default host if only port is provided. port (str/int): The port of Milvus instance. Default at 19530, PyMilvus will fill in the default port if only host is provided. user (str): Use which user to connect to Milvus instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. secure (bool): Default is false. If set to true, tls will be enabled. client_key_path (str): If use tls two-way authentication, need to write the client.key path. client_pem_path (str): If use tls two-way authentication, need to write the client.pem path. ca_pem_path (str): If use tls two-way authentication, need to write the ca.pem path. server_pem_path (str): If use tls one-way authentication, need to write the server.pem path. server_name (str): If use tls, need to write the common name. Example: .. code-block:: python from langchain.vectorstores import Milvus from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a milvus instance on localhost milvus_store = Milvus( embedding_function = Embeddings, collection_name = "LangChainCollection", drop_old = True, ) Raises: ValueError: If the pymilvus python package is not installed. """ def __init__( self, embedding_function: Embeddings, collection_name: str = "LangChainCollection", connection_args: Optional[dict[str, Any]] = None, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False, *, primary_field: str = "pk", text_field: str = "text", vector_field: str = "vector", ): """Initialize the Milvus vector store.""" try: from pymilvus import Collection, utility except ImportError: raise ValueError( "Could not import pymilvus python package. " "Please install it with `pip install pymilvus`." ) # Default search params when one is not provided. self.default_search_params = { "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, "IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, "AUTOINDEX": {"metric_type": "L2", "params": {}}, } self.embedding_func = embedding_function self.collection_name = collection_name self.index_params = index_params self.search_params = search_params self.consistency_level = consistency_level # In order for a collection to be compatible, pk needs to be auto'id and int self._primary_field = primary_field # In order for compatibility, the text field will need to be called "text" self._text_field = text_field # In order for compatibility, the vector field needs to be called "vector" self._vector_field = vector_field self.fields: list[str] = [] # Create the connection to the server if connection_args is None: connection_args = DEFAULT_MILVUS_CONNECTION self.alias = self._create_connection_alias(connection_args) self.col: Optional[Collection] = None # Grab the existing collection if it exists if utility.has_collection(self.collection_name, using=self.alias): self.col = Collection( self.collection_name, using=self.alias, ) # If need to drop old, drop it if drop_old and isinstance(self.col, Collection): self.col.drop() self.col = None # Initialize the vector store self._init() @property def embeddings(self) -> Embeddings: return self.embedding_func def _create_connection_alias(self, connection_args: dict) -> str: """Create the connection to the Milvus server.""" from pymilvus import MilvusException, connections # Grab the connection arguments that are used for checking existing connection host: str = connection_args.get("host", None) port: Union[str, int] = connection_args.get("port", None) address: str = connection_args.get("address", None) uri: str = connection_args.get("uri", None) user = connection_args.get("user", None) # Order of use is host/port, uri, address if host is not None and port is not None: given_address = str(host) + ":" + str(port) elif uri is not None: given_address = uri.split("https://")[1] elif address is not None: given_address = address else: given_address = None logger.debug("Missing standard address type for reuse attempt") # User defaults to empty string when getting connection info if user is not None: tmp_user = user else: tmp_user = "" # If a valid address was given, then check if a connection exists if given_address is not None: for con in connections.list_connections(): addr = connections.get_connection_addr(con[0]) if ( con[1] and ("address" in addr) and (addr["address"] == given_address) and ("user" in addr) and (addr["user"] == tmp_user) ): logger.debug("Using previous connection: %s", con[0]) return con[0] # Generate a new connection if one doesn't exist alias = uuid4().hex try: connections.connect(alias=alias, **connection_args) logger.debug("Created new connection using: %s", alias) return alias except MilvusException as e: logger.error("Failed to create new connection using: %s", alias) raise e def _init( self, embeddings: Optional[list] = None, metadatas: Optional[list[dict]] = None ) -> None: if embeddings is not None: self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() self._create_search_params() self._load() def _create_collection( self, embeddings: list, metadatas: Optional[list[dict]] = None ) -> None: from pymilvus import ( Collection, CollectionSchema, DataType, FieldSchema, MilvusException, ) from pymilvus.orm.types import infer_dtype_bydata # Determine embedding dim dim = len(embeddings[0]) fields = [] # Determine metadata schema if metadatas: # Create FieldSchema for each entry in metadata. for key, value in metadatas[0].items(): # Infer the corresponding datatype of the metadata dtype = infer_dtype_bydata(value) # Datatype isn't compatible if dtype == DataType.UNKNOWN or dtype == DataType.NONE: logger.error( "Failure to create collection, unrecognized dtype for key: %s", key, ) raise ValueError(f"Unrecognized datatype for {key}.") # Dataype is a string/varchar equivalent elif dtype == DataType.VARCHAR: fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65_535)) else: fields.append(FieldSchema(key, dtype)) # Create the text field fields.append( FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535) ) # Create the primary key field fields.append( FieldSchema( self._primary_field, DataType.INT64, is_primary=True, auto_id=True ) ) # Create the vector field, supports binary or float vectors fields.append( FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim) ) # Create the schema for the collection schema = CollectionSchema(fields) # Create the collection try: self.col = Collection( name=self.collection_name, schema=schema, consistency_level=self.consistency_level, using=self.alias, ) except MilvusException as e: logger.error( "Failed to create collection: %s error: %s", self.collection_name, e ) raise e def _extract_fields(self) -> None: """Grab the existing fields from the Collection""" from pymilvus import Collection if isinstance(self.col, Collection): schema = self.col.schema for x in schema.fields: self.fields.append(x.name) # Since primary field is auto-id, no need to track it self.fields.remove(self._primary_field) def _get_index(self) -> Optional[dict[str, Any]]: """Return the vector index information if it exists""" from pymilvus import Collection if isinstance(self.col, Collection): for x in self.col.indexes: if x.field_name == self._vector_field: return x.to_dict() return None def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default HNSW based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely on Zilliz Cloud except MilvusException: # Use AUTOINDEX based index self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name ) raise e def _create_search_params(self) -> None: """Generate search params based on the current index type""" from pymilvus import Collection if isinstance(self.col, Collection) and self.search_params is None: index = self._get_index() if index is not None: index_type: str = index["index_param"]["index_type"] metric_type: str = index["index_param"]["metric_type"] self.search_params = self.default_search_params[index_type] self.search_params["metric_type"] = metric_type def _load(self) -> None: """Load the collection if available.""" from pymilvus import Collection if isinstance(self.col, Collection) and self._get_index() is not None: self.col.load() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict. Metada keys will need to be present for all inserted values. At the moment there is no None equivalent in Milvus. Args: texts (Iterable[str]): The texts to embed, it is assumed that they all fit in memory. metadatas (Optional[List[dict]]): Metadata dicts attached to each of the texts. Defaults to None. timeout (Optional[int]): Timeout for each batch insert. Defaults to None. batch_size (int, optional): Batch size to use for insertion. Defaults to 1000. Raises: MilvusException: Failure to add texts Returns: List[str]: The resulting keys for each inserted element. """ from pymilvus import Collection, MilvusException texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] # If the collection hasn't been initialized yet, perform all steps to do so if not isinstance(self.col, Collection): self._init(embeddings, metadatas) # Dict to hold all insert columns insert_dict: dict[str, list] = { self._text_field: texts, self._vector_field: embeddings, } # Collect the metadata into the insert dict. if metadatas is not None: for d in metadatas: for key, value in d.items(): if key in self.fields: insert_dict.setdefault(key, []).append(value) # Total insert count vectors: list = insert_dict[self._vector_field] total_count = len(vectors) pks: list[str] = [] assert isinstance(self.col, Collection) for i in range(0, total_count, batch_size): # Grab end index end = min(i + batch_size, total_count) # Convert dict to list of lists batch for insertion insert_list = [insert_dict[x][i:end] for x in self.fields] # Insert into the collection. try: res: Collection res = self.col.insert(insert_list, timeout=timeout, **kwargs) pks.extend(res.primary_keys) except MilvusException as e: logger.error( "Failed to insert batch starting at entity: %s/%s", i, total_count ) raise e return pks def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string. Args: query (str): The text to search. k (int, optional): How many results to return. Defaults to 4. param (dict, optional): The search params for the index type. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string. Args: embedding (List[float]): The embedding vector to search. k (int, optional): How many results to return. Defaults to 4. param (dict, optional): The search params for the index type. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: query (str): The text being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug("No existing collection to search.") return [] # Embed the query text. embedding = self.embedding_func.embed_query(query) res = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return res def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Tuple[Document, float]]: Result doc and score. """ if self.col is None: logger.debug("No existing collection to search.") return [] if param is None: param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. res = self.col.search( data=[embedding], anns_field=self._vector_field, param=param, limit=k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs, ) # Organize results. ret = [] for result in res[0]: meta = {x: result.entity.get(x) for x in output_fields} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) pair = (doc, result.score) ret.append(pair) return ret def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR. Args: query (str): The text being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=expr, timeout=timeout, **kwargs, ) def max_marginal_relevance_search_by_vector( self, embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR. Args: embedding (str): The embedding vector being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] if param is None: param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. res = self.col.search( data=[embedding], anns_field=self._vector_field, param=param, limit=fetch_k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs, ) # Organize results. ids = [] documents = [] scores = [] for result in res[0]: meta = {x: result.entity.get(x) for x in output_fields} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) documents.append(doc) scores.append(result.score) ids.append(result.id) vectors = self.col.query( expr=f"{self._primary_field} in {ids}", output_fields=[self._primary_field, self._vector_field], timeout=timeout, ) # Reorganize the results from query to match search order. vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors} ordered_result_embeddings = [vectors[x] for x in ids] # Get the new order of results. new_ordering = maximal_marginal_relevance( np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult ) # Reorder the values and return. ret = [] for x in new_ordering: # Function can return -1 index if x == -1: break else: ret.append(documents[x]) return ret @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Milvus: """Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Milvus: Milvus Vector Store """ vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
[]
2024-01-10
axgpt/langchain
libs~core~langchain_core~prompts~pipeline.py
from typing import Any, Dict, List, Tuple from langchain_core.prompts.chat import BaseChatPromptTemplate from langchain_core.pydantic_v1 import root_validator from langchain_core.schema import BasePromptTemplate, PromptValue def _get_inputs(inputs: dict, input_variables: List[str]) -> dict: return {k: inputs[k] for k in input_variables} class PipelinePromptTemplate(BasePromptTemplate): """A prompt template for composing multiple prompt templates together. This can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: - final_prompt: This is the final prompt that is returned - pipeline_prompts: This is a list of tuples, consisting of a string (`name`) and a Prompt Template. Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name` """ final_prompt: BasePromptTemplate """The final prompt that is returned.""" pipeline_prompts: List[Tuple[str, BasePromptTemplate]] """A list of tuples, consisting of a string (`name`) and a Prompt Template.""" @root_validator(pre=True) def get_input_variables(cls, values: Dict) -> Dict: """Get input variables.""" created_variables = set() all_variables = set() for k, prompt in values["pipeline_prompts"]: created_variables.add(k) all_variables.update(prompt.input_variables) values["input_variables"] = list(all_variables.difference(created_variables)) return values def format_prompt(self, **kwargs: Any) -> PromptValue: for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs) def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() @property def _prompt_type(self) -> str: raise ValueError
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~agent_toolkits~powerbi~toolkit.py
"""Toolkit for interacting with a Power BI dataset.""" from typing import List, Optional, Union from langchain_core.prompts import PromptTemplate from langchain_core.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.pydantic_v1 import Field from langchain_core.schema.language_model import BaseLanguageModel from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.tools import BaseTool from langchain.tools.powerbi.prompt import ( QUESTION_TO_QUERY_BASE, SINGLE_QUESTION_TO_QUERY, USER_INPUT, ) from langchain.tools.powerbi.tool import ( InfoPowerBITool, ListPowerBITool, QueryPowerBITool, ) from langchain.utilities.powerbi import PowerBIDataset class PowerBIToolkit(BaseToolkit): """Toolkit for interacting with Power BI dataset. *Security Note*: This toolkit interacts with an external service. Control access to who can use this toolkit. Make sure that the capabilities given by this toolkit to the calling code are appropriately scoped to the application. See https://python.langchain.com/docs/security for more information. """ powerbi: PowerBIDataset = Field(exclude=True) llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True) examples: Optional[str] = None max_iterations: int = 5 callback_manager: Optional[BaseCallbackManager] = None output_token_limit: Optional[int] = None tiktoken_model_name: Optional[str] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ QueryPowerBITool( llm_chain=self._get_chain(), powerbi=self.powerbi, examples=self.examples, max_iterations=self.max_iterations, output_token_limit=self.output_token_limit, tiktoken_model_name=self.tiktoken_model_name, ), InfoPowerBITool(powerbi=self.powerbi), ListPowerBITool(powerbi=self.powerbi), ] def _get_chain(self) -> LLMChain: """Construct the chain based on the callback manager and model type.""" if isinstance(self.llm, BaseLanguageModel): return LLMChain( llm=self.llm, callback_manager=self.callback_manager if self.callback_manager else None, prompt=PromptTemplate( template=SINGLE_QUESTION_TO_QUERY, input_variables=["tool_input", "tables", "schemas", "examples"], ), ) system_prompt = SystemMessagePromptTemplate( prompt=PromptTemplate( template=QUESTION_TO_QUERY_BASE, input_variables=["tables", "schemas", "examples"], ) ) human_prompt = HumanMessagePromptTemplate( prompt=PromptTemplate( template=USER_INPUT, input_variables=["tool_input"], ) ) return LLMChain( llm=self.llm, callback_manager=self.callback_manager if self.callback_manager else None, prompt=ChatPromptTemplate.from_messages([system_prompt, human_prompt]), )
[ "[PLACEHOLDER, PLACEHOLDER]", "tool_input" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~retrievers~knn.py
"""KNN Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from langchain_core.schema import BaseRetriever, Document from langchain_core.schema.embeddings import Embeddings from langchain.callbacks.manager import CallbackManagerForRetrieverRun def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: """ Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) class KNNRetriever(BaseRetriever): """`KNN` retriever.""" embeddings: Embeddings """Embeddings model to use.""" index: Any """Index of embeddings.""" texts: List[str] """List of texts to index.""" k: int = 4 """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> KNNRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: query_embeds = np.array(self.embeddings.embed_query(query)) # calc L2 norm index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True)) query_embeds = query_embeds / np.sqrt((query_embeds**2).sum()) similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [ Document(page_content=self.texts[row]) for row in sorted_ix[0 : self.k] if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ) ] return top_k_results
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~callbacks~tracers~evaluation.py
from langchain_core.callbacks.tracers.evaluation import ( EvaluatorCallbackHandler, wait_for_all_evaluators, ) __all__ = ["wait_for_all_evaluators", "EvaluatorCallbackHandler"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~graph_qa~sparql.py
""" Question answering over an RDF or OWL graph using SPARQL. """ from __future__ import annotations from typing import Any, Dict, List, Optional from langchain_core.prompts.base import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain_core.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( SPARQL_GENERATION_SELECT_PROMPT, SPARQL_GENERATION_UPDATE_PROMPT, SPARQL_INTENT_PROMPT, SPARQL_QA_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.rdf_graph import RdfGraph class GraphSparqlQAChain(Chain): """Question-answering against an RDF or OWL graph by generating SPARQL statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: RdfGraph = Field(exclude=True) sparql_generation_select_chain: LLMChain sparql_generation_update_chain: LLMChain sparql_intent_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: _output_keys = [self.output_key] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT, sparql_select_prompt: BasePromptTemplate = SPARQL_GENERATION_SELECT_PROMPT, sparql_update_prompt: BasePromptTemplate = SPARQL_GENERATION_UPDATE_PROMPT, sparql_intent_prompt: BasePromptTemplate = SPARQL_INTENT_PROMPT, **kwargs: Any, ) -> GraphSparqlQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt) sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt) sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt) return cls( qa_chain=qa_chain, sparql_generation_select_chain=sparql_generation_select_chain, sparql_generation_update_chain=sparql_generation_update_chain, sparql_intent_chain=sparql_intent_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """ Generate SPARQL query, use it to retrieve a response from the gdb and answer the question. """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() prompt = inputs[self.input_key] _intent = self.sparql_intent_chain.run({"prompt": prompt}, callbacks=callbacks) intent = _intent.strip() if "SELECT" in intent and "UPDATE" not in intent: sparql_generation_chain = self.sparql_generation_select_chain intent = "SELECT" elif "UPDATE" in intent and "SELECT" not in intent: sparql_generation_chain = self.sparql_generation_update_chain intent = "UPDATE" else: raise ValueError( "I am sorry, but this prompt seems to fit none of the currently " "supported SPARQL query types, i.e., SELECT and UPDATE." ) _run_manager.on_text("Identified intent:", end="\n", verbose=self.verbose) _run_manager.on_text(intent, color="green", end="\n", verbose=self.verbose) generated_sparql = sparql_generation_chain.run( {"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_sparql, color="green", end="\n", verbose=self.verbose ) if intent == "SELECT": context = self.graph.query(generated_sparql) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"prompt": prompt, "context": context}, callbacks=callbacks, ) res = result[self.qa_chain.output_key] elif intent == "UPDATE": self.graph.update(generated_sparql) res = "Successfully inserted triples into the graph." else: raise ValueError("Unsupported SPARQL query type.") return {self.output_key: res}
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~agents~format_scratchpad~test_openai_functions.py
from langchain_core.schema.agent import AgentActionMessageLog from langchain_core.schema.messages import AIMessage, FunctionMessage from langchain.agents.format_scratchpad.openai_functions import ( format_to_openai_function_messages, ) def test_calls_convert_agent_action_to_messages() -> None: additional_kwargs1 = { "function_call": { "name": "tool1", "arguments": "input1", } } message1 = AIMessage(content="", additional_kwargs=additional_kwargs1) action1 = AgentActionMessageLog( tool="tool1", tool_input="input1", log="log1", message_log=[message1] ) additional_kwargs2 = { "function_call": { "name": "tool2", "arguments": "input2", } } message2 = AIMessage(content="", additional_kwargs=additional_kwargs2) action2 = AgentActionMessageLog( tool="tool2", tool_input="input2", log="log2", message_log=[message2] ) additional_kwargs3 = { "function_call": { "name": "tool3", "arguments": "input3", } } message3 = AIMessage(content="", additional_kwargs=additional_kwargs3) action3 = AgentActionMessageLog( tool="tool3", tool_input="input3", log="log3", message_log=[message3] ) intermediate_steps = [ (action1, "observation1"), (action2, "observation2"), (action3, "observation3"), ] expected_messages = [ message1, FunctionMessage(name="tool1", content="observation1"), message2, FunctionMessage(name="tool2", content="observation2"), message3, FunctionMessage(name="tool3", content="observation3"), ] output = format_to_openai_function_messages(intermediate_steps) assert output == expected_messages def test_handles_empty_input_list() -> None: output = format_to_openai_function_messages([]) assert output == []
[ "observation3", "observation2", "observation1" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~agents~conversational_chat~output_parser.py
from __future__ import annotations from typing import Union from langchain_core.schema import AgentAction, AgentFinish, OutputParserException from langchain.agents import AgentOutputParser from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS from langchain.output_parsers.json import parse_json_markdown # Define a class that parses output for conversational agents class ConvoOutputParser(AgentOutputParser): """Output parser for the conversational agent.""" def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Attempts to parse the given text into an AgentAction or AgentFinish. Raises: OutputParserException if parsing fails. """ try: # Attempt to parse the text into a structured format (assumed to be JSON # stored as markdown) response = parse_json_markdown(text) # If the response contains an 'action' and 'action_input' if "action" in response and "action_input" in response: action, action_input = response["action"], response["action_input"] # If the action indicates a final answer, return an AgentFinish if action == "Final Answer": return AgentFinish({"output": action_input}, text) else: # Otherwise, return an AgentAction with the specified action and # input return AgentAction(action, action_input, text) else: # If the necessary keys aren't present in the response, raise an # exception raise OutputParserException( f"Missing 'action' or 'action_input' in LLM output: {text}" ) except Exception as e: # If any other exception is raised during parsing, also raise an # OutputParserException raise OutputParserException(f"Could not parse LLM output: {text}") from e @property def _type(self) -> str: return "conversational_chat"
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~deeplake.py
from __future__ import annotations import logging from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np try: import deeplake from deeplake import VectorStore as DeepLakeVectorStore from deeplake.core.fast_forwarding import version_compare _DEEPLAKE_INSTALLED = True except ImportError: _DEEPLAKE_INSTALLED = False from langchain_core.schema.embeddings import Embeddings from langchain_core.schema.vectorstore import VectorStore from langchain.docstore.document import Document from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class DeepLake(VectorStore): """`Activeloop Deep Lake` vector store. We integrated deeplake's similarity search and filtering for fast prototyping. Now, it supports Tensor Query Language (TQL) for production use cases over billion rows. Why Deep Lake? - Not only stores embeddings, but also the original data with version control. - Serverless, doesn't require another service and can be used with major cloud providers (S3, GCS, etc.) - More than just a multi-modal vector store. You can use the dataset to fine-tune your own LLM models. To use, you should have the ``deeplake`` python package installed. Example: .. code-block:: python from langchain.vectorstores import DeepLake from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) """ _LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "./deeplake/" def __init__( self, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, token: Optional[str] = None, embedding: Optional[Embeddings] = None, embedding_function: Optional[Embeddings] = None, read_only: bool = False, ingestion_batch_size: int = 1000, num_workers: int = 0, verbose: bool = True, exec_option: Optional[str] = None, runtime: Optional[Dict] = None, index_params: Optional[Dict[str, Union[int, str]]] = None, **kwargs: Any, ) -> None: """Creates an empty DeepLakeVectorStore or loads an existing one. The DeepLakeVectorStore is located at the specified ``path``. Examples: >>> # Create a vector store with default tensors >>> deeplake_vectorstore = DeepLake( ... path = <path_for_storing_Data>, ... ) >>> >>> # Create a vector store in the Deep Lake Managed Tensor Database >>> data = DeepLake( ... path = "hub://org_id/dataset_name", ... runtime = {"tensor_db": True}, ... ) Args: dataset_path (str): Path to existing dataset or where to create a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH. token (str, optional): Activeloop token, for fetching credentials to the dataset at path if it is a Deep Lake dataset. Tokens are normally autogenerated. Optional. embedding (Embeddings, optional): Function to convert either documents or query. Optional. embedding_function (Embeddings, optional): Function to convert either documents or query. Optional. Deprecated: keeping this parameter for backwards compatibility. read_only (bool): Open dataset in read-only mode. Default is False. ingestion_batch_size (int): During data ingestion, data is divided into batches. Batch size is the size of each batch. Default is 1000. num_workers (int): Number of workers to use during data ingestion. Default is 0. verbose (bool): Print dataset summary after each operation. Default is True. exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform searching - "python", "compute_engine", "tensor_db" and auto. Default is None. - ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option. - ``python`` - Pure-python implementation that runs on the client. WARNING: using this with big datasets can lead to memory issues. Data can be stored anywhere. - ``compute_engine`` - C++ implementation of the Deep Lake Compute Engine that runs on the client. Can be used for any data stored in or connected to Deep Lake. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database that is responsible for storage and query execution. Only for data stored in the Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. runtime (Dict, optional): Parameters for creating the Vector Store in Deep Lake's Managed Tensor Database. Not applicable when loading an existing Vector Store. To create a Vector Store in the Managed Tensor Database, set `runtime = {"tensor_db": True}`. index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary containing information about vector index that will be created. Defaults to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from ``deeplake.constants``. The specified key-values override the default ones. - threshold: The threshold for the dataset size above which an index will be created for the embedding tensor. When the threshold value is set to -1, index creation is turned off. Defaults to -1, which turns off the index. - distance_metric: This key specifies the method of calculating the distance between vectors when creating the vector database (VDB) index. It can either be a string that corresponds to a member of the DistanceType enumeration, or the string value itself. - If no value is provided, it defaults to "L2". - "L2" corresponds to DistanceType.L2_NORM. - "COS" corresponds to DistanceType.COSINE_SIMILARITY. - additional_params: Additional parameters for fine-tuning the index. **kwargs: Other optional keyword arguments. Raises: ValueError: If some condition is not met. """ self.ingestion_batch_size = ingestion_batch_size self.num_workers = num_workers self.verbose = verbose if _DEEPLAKE_INSTALLED is False: raise ImportError( "Could not import deeplake python package. " "Please install it with `pip install deeplake[enterprise]`." ) if ( runtime == {"tensor_db": True} and version_compare(deeplake.__version__, "3.6.7") == -1 ): raise ImportError( "To use tensor_db option you need to update deeplake to `3.6.7` or " "higher. " f"Currently installed deeplake version is {deeplake.__version__}. " ) self.dataset_path = dataset_path if embedding_function: logger.warning( "Using embedding function is deprecated and will be removed " "in the future. Please use embedding instead." ) self.vectorstore = DeepLakeVectorStore( path=self.dataset_path, embedding_function=embedding_function or embedding, read_only=read_only, token=token, exec_option=exec_option, verbose=verbose, runtime=runtime, index_params=index_params, **kwargs, ) self._embedding_function = embedding_function or embedding self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id" @property def embeddings(self) -> Optional[Embeddings]: return self._embedding_function def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Examples: >>> ids = deeplake_vectorstore.add_texts( ... texts = <list_of_texts>, ... metadatas = <list_of_metadata_jsons>, ... ids = <list_of_ids>, ... ) Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. embedding_function (Optional[Embeddings], optional): Embedding function to use to convert the text into embeddings. **kwargs (Any): Any additional keyword arguments passed is not supported by this method. Returns: List[str]: List of IDs of the added texts. """ if kwargs: unsupported_items = "`, `".join(set(kwargs.keys())) raise TypeError( f"`{unsupported_items}` is/are not a valid argument to add_text method" ) kwargs = {} if ids: if self._id_tensor_name == "ids": # for backwards compatibility kwargs["ids"] = ids else: kwargs["id"] = ids if metadatas is None: metadatas = [{}] * len(list(texts)) if not isinstance(texts, list): texts = list(texts) if texts is None: raise ValueError("`texts` parameter shouldn't be None.") elif len(texts) == 0: raise ValueError("`texts` parameter shouldn't be empty.") return self.vectorstore.add( text=texts, metadata=metadatas, embedding_data=texts, embedding_tensor="embedding", embedding_function=self._embedding_function.embed_documents, # type: ignore return_ids=True, **kwargs, ) def _search_tql( self, tql: Optional[str], exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Function for performing tql_search. Args: tql (str): TQL Query string for direct evaluation. Available only for `compute_engine` and `tensor_db`. exec_option (str, optional): Supports 3 ways to search. Could be "python", "compute_engine" or "tensor_db". Default is "python". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets due to potential memory issues. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. return_score (bool): Return score with document. Default is False. Returns: Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists. The first list contains Documents, and the second list contains tuples of Document and float score. Raises: ValueError: If return_score is True but some condition is not met. """ result = self.vectorstore.search( query=tql, exec_option=exec_option, ) metadatas = result["metadata"] texts = result["text"] docs = [ Document( page_content=text, metadata=metadata, ) for text, metadata in zip(texts, metadatas) ] if kwargs: unsupported_argument = next(iter(kwargs)) if kwargs[unsupported_argument] is not False: raise ValueError( f"specifying {unsupported_argument} is " "not supported with tql search." ) return docs def _search( self, query: Optional[str] = None, embedding: Optional[Union[List[float], np.ndarray]] = None, embedding_function: Optional[Callable] = None, k: int = 4, distance_metric: Optional[str] = None, use_maximal_marginal_relevance: bool = False, fetch_k: Optional[int] = 20, filter: Optional[Union[Dict, Callable]] = None, return_score: bool = False, exec_option: Optional[str] = None, deep_memory: bool = False, **kwargs: Any, ) -> Any[List[Document], List[Tuple[Document, float]]]: """ Return docs similar to query. Args: query (str, optional): Text to look up similar docs. embedding (Union[List[float], np.ndarray], optional): Query's embedding. embedding_function (Callable, optional): Function to convert `query` into embedding. k (int): Number of Documents to return. distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. filter (Union[Dict, Callable], optional): Additional filter prior to the embedding search. - ``Dict`` - Key-value search on tensors of htype json, on an AND basis (a sample must satisfy all key-value filters to be True) Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. use_maximal_marginal_relevance (bool): Use maximal marginal relevance. fetch_k (int): Number of Documents for MMR algorithm. return_score (bool): Return the score. exec_option (str, optional): Supports 3 ways to perform searching. Could be "python", "compute_engine" or "tensor_db". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments. Returns: List of Documents by the specified distance metric, if return_score True, return a tuple of (Document, score) Raises: ValueError: if both `embedding` and `embedding_function` are not specified. """ if kwargs.get("tql"): return self._search_tql( tql=kwargs["tql"], exec_option=exec_option, return_score=return_score, embedding=embedding, embedding_function=embedding_function, distance_metric=distance_metric, use_maximal_marginal_relevance=use_maximal_marginal_relevance, filter=filter, ) if embedding_function: if isinstance(embedding_function, Embeddings): _embedding_function = embedding_function.embed_query else: _embedding_function = embedding_function elif self._embedding_function: _embedding_function = self._embedding_function.embed_query else: _embedding_function = None if embedding is None: if _embedding_function is None: raise ValueError( "Either `embedding` or `embedding_function` needs to be" " specified." ) embedding = _embedding_function(query) if query else None if isinstance(embedding, list): embedding = np.array(embedding, dtype=np.float32) if len(embedding.shape) > 1: embedding = embedding[0] result = self.vectorstore.search( embedding=embedding, k=fetch_k if use_maximal_marginal_relevance else k, distance_metric=distance_metric, filter=filter, exec_option=exec_option, return_tensors=["embedding", "metadata", "text", self._id_tensor_name], deep_memory=deep_memory, ) scores = result["score"] embeddings = result["embedding"] metadatas = result["metadata"] texts = result["text"] if use_maximal_marginal_relevance: lambda_mult = kwargs.get("lambda_mult", 0.5) indices = maximal_marginal_relevance( # type: ignore embedding, # type: ignore embeddings, k=min(k, len(texts)), lambda_mult=lambda_mult, ) scores = [scores[i] for i in indices] texts = [texts[i] for i in indices] metadatas = [metadatas[i] for i in indices] docs = [ Document( page_content=text, metadata=metadata, ) for text, metadata in zip(texts, metadatas) ] if return_score: return [(doc, score) for doc, score in zip(docs, scores)] return docs def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """ Return docs most similar to query. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search( ... query=<your_query>, ... k=<num_items>, ... exec_option=<preferred_exec_option>, ... ) >>> # Run tql search: >>> data = vector_store.similarity_search( ... query=None, ... tql="SELECT * WHERE id == <id>", ... exec_option="compute_engine", ... ) Args: k (int): Number of Documents to return. Defaults to 4. query (str): Text to look up similar documents. **kwargs: Additional keyword arguments include: embedding (Callable): Embedding function to use. Defaults to None. distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max' for L-infinity, 'cos' for cosine, 'dot' for dot product. Defaults to 'L2'. filter (Union[Dict, Callable], optional): Additional filter before embedding search. - Dict: Key-value search on tensors of htype json, (sample must satisfy all key-value filters) Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}} - Function: Compatible with `deeplake.filter`. Defaults to None. exec_option (str): Supports 3 ways to perform searching. 'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'. - 'python': Pure-python implementation for the client. WARNING: not recommended for big datasets. - 'compute_engine': C++ implementation of the Compute Engine for the client. Not for in-memory or local datasets. - 'tensor_db': Managed Tensor Database for storage and query. Only for data in Deep Lake Managed Database. Use `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search( query=query, k=k, use_maximal_marginal_relevance=False, return_score=False, **kwargs, ) def similarity_search_by_vector( self, embedding: Union[List[float], np.ndarray], k: int = 4, **kwargs: Any, ) -> List[Document]: """ Return docs most similar to embedding vector. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search_by_vector( ... embedding=<your_embedding>, ... k=<num_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: embedding (Union[List[float], np.ndarray]): Embedding to find similar docs. k (int): Number of Documents to return. Defaults to 4. **kwargs: Additional keyword arguments including: filter (Union[Dict, Callable], optional): Additional filter before embedding search. - ``Dict`` - Key-value search on tensors of htype json. True if all key-value filters are satisfied. Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. Defaults to None. exec_option (str): Options for search execution include "python", "compute_engine", or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search( embedding=embedding, k=k, use_maximal_marginal_relevance=False, return_score=False, **kwargs, ) def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Run similarity search with Deep Lake with distance returned. Examples: >>> data = vector_store.similarity_search_with_score( ... query=<your_query>, ... embedding=<your_embedding_function> ... k=<number_of_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. **kwargs: Additional keyword arguments. Some of these arguments are: distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. embedding_function (Callable): Embedding function to use. Defaults to None. exec_option (str): DeepLakeVectorStore supports 3 ways to perform searching. It could be either "python", "compute_engine" or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float.""" return self._search( query=query, k=k, return_score=True, **kwargs, ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """ Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected docs. Examples: >>> data = vector_store.max_marginal_relevance_search_by_vector( ... embedding=<your_embedding>, ... fetch_k=<elements_to_fetch_before_mmr_search>, ... k=<number_of_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch for MMR algorithm. lambda_mult: Number between 0 and 1 determining the degree of diversity. 0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5. exec_option (str): DeepLakeVectorStore supports 3 ways for searching. Could be "python", "compute_engine" or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments. Returns: List[Documents] - A list of documents. """ return self._search( embedding=embedding, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, exec_option=exec_option, **kwargs, ) def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Examples: >>> # Search using an embedding >>> data = vector_store.max_marginal_relevance_search( ... query = <query_to_search>, ... embedding_function = <embedding_function_for_query>, ... k = <number_of_items_to_return>, ... exec_option = <preferred_exec_option>, ... ) Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents for MMR algorithm. lambda_mult: Value between 0 and 1. 0 corresponds to maximum diversity and 1 to minimum. Defaults to 0.5. exec_option (str): Supports 3 ways to perform searching. - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. **kwargs: Additional keyword arguments Returns: List of Documents selected by maximal marginal relevance. Raises: ValueError: when MRR search is on but embedding function is not specified. """ embedding_function = kwargs.get("embedding") or self._embedding_function if embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" " `creation` or during add call." ) return self._search( query=query, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, exec_option=exec_option, embedding_function=embedding_function, # type: ignore **kwargs, ) @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, **kwargs: Any, ) -> DeepLake: """Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at `./deeplake` Examples: >>> # Search using an embedding >>> vector_store = DeepLake.from_texts( ... texts = <the_texts_that_you_want_to_embed>, ... embedding_function = <embedding_function_for_query>, ... k = <number_of_items_to_return>, ... exec_option = <preferred_exec_option>, ... ) Args: dataset_path (str): - The full path to the dataset. Can be: - Deep Lake cloud path of the form ``hub://username/dataset_name``. To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use 'activeloop login' from command line) - AWS S3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment - Google Cloud Storage path of the form ``gcs://bucketname/path/to/dataset`` Credentials are required in either the environment - Local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``. - In-memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist. texts (List[Document]): List of documents to add. embedding (Optional[Embeddings]): Embedding function. Defaults to None. Note, in other places, it is called embedding_function. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. **kwargs: Additional keyword arguments. Returns: DeepLake: Deep Lake dataset. """ deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding, **kwargs) deeplake_dataset.add_texts( texts=texts, metadatas=metadatas, ids=ids, ) return deeplake_dataset def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool: """Delete the entities in the dataset. Args: ids (Optional[List[str]], optional): The document_ids to delete. Defaults to None. **kwargs: Other keyword arguments that subclasses might use. - filter (Optional[Dict[str, str]], optional): The filter to delete by. - delete_all (Optional[bool], optional): Whether to drop the dataset. Returns: bool: Whether the delete operation was successful. """ filter = kwargs.get("filter") delete_all = kwargs.get("delete_all") self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all) return True @classmethod def force_delete_by_path(cls, path: str) -> None: """Force delete dataset by path. Args: path (str): path of the dataset to delete. Raises: ValueError: if deeplake is not installed. """ try: import deeplake except ImportError: raise ValueError( "Could not import deeplake python package. " "Please install it with `pip install deeplake`." ) deeplake.delete(path, large_ok=True, force=True) def delete_dataset(self) -> None: """Delete the collection.""" self.delete(delete_all=True) def ds(self) -> Any: logger.warning( "this method is deprecated and will be removed, " "better to use `db.vectorstore.dataset` instead." ) return self.vectorstore.dataset
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chains~graph_qa~arangodb.py
"""Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from langchain_core.pydantic_v1 import Field from langchain_core.schema import BasePromptTemplate from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( AQL_FIX_PROMPT, AQL_GENERATION_PROMPT, AQL_QA_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.arangodb_graph import ArangoGraph class ArangoGraphQAChain(Chain): """Chain for question-answering against a graph by generating AQL statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: ArangoGraph = Field(exclude=True) aql_generation_chain: LLMChain aql_fix_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: # Specifies the maximum number of AQL Query Results to return top_k: int = 10 # Specifies the set of AQL Query Examples that promote few-shot-learning aql_examples: str = "" # Specify whether to return the AQL Query in the output dictionary return_aql_query: bool = False # Specify whether to return the AQL JSON Result in the output dictionary return_aql_result: bool = False # Specify the maximum amount of AQL Generation attempts that should be made max_aql_generation_attempts: int = 3 @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] @property def _chain_type(self) -> str: return "graph_aql_chain" @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = AQL_QA_PROMPT, aql_generation_prompt: BasePromptTemplate = AQL_GENERATION_PROMPT, aql_fix_prompt: BasePromptTemplate = AQL_FIX_PROMPT, **kwargs: Any, ) -> ArangoGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt) aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt) return cls( qa_chain=qa_chain, aql_generation_chain=aql_generation_chain, aql_fix_chain=aql_fix_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """ Generate an AQL statement from user input, use it retrieve a response from an ArangoDB Database instance, and respond to the user input in natural language. Users can modify the following ArangoGraphQAChain Class Variables: :var top_k: The maximum number of AQL Query Results to return :type top_k: int :var aql_examples: A set of AQL Query Examples that are passed to the AQL Generation Prompt Template to promote few-shot-learning. Defaults to an empty string. :type aql_examples: str :var return_aql_query: Whether to return the AQL Query in the output dictionary. Defaults to False. :type return_aql_query: bool :var return_aql_result: Whether to return the AQL Query in the output dictionary. Defaults to False :type return_aql_result: bool :var max_aql_generation_attempts: The maximum amount of AQL Generation attempts to be made prior to raising the last AQL Query Execution Error. Defaults to 3. :type max_aql_generation_attempts: int """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() user_input = inputs[self.input_key] ######################### # Generate AQL Query # aql_generation_output = self.aql_generation_chain.run( { "adb_schema": self.graph.schema, "aql_examples": self.aql_examples, "user_input": user_input, }, callbacks=callbacks, ) ######################### aql_query = "" aql_error = "" aql_result = None aql_generation_attempt = 1 while ( aql_result is None and aql_generation_attempt < self.max_aql_generation_attempts + 1 ): ##################### # Extract AQL Query # pattern = r"```(?i:aql)?(.*?)```" matches = re.findall(pattern, aql_generation_output, re.DOTALL) if not matches: _run_manager.on_text( "Invalid Response: ", end="\n", verbose=self.verbose ) _run_manager.on_text( aql_generation_output, color="red", end="\n", verbose=self.verbose ) raise ValueError(f"Response is Invalid: {aql_generation_output}") aql_query = matches[0] ##################### _run_manager.on_text( f"AQL Query ({aql_generation_attempt}):", verbose=self.verbose ) _run_manager.on_text( aql_query, color="green", end="\n", verbose=self.verbose ) ##################### # Execute AQL Query # from arango import AQLQueryExecuteError try: aql_result = self.graph.query(aql_query, self.top_k) except AQLQueryExecuteError as e: aql_error = e.error_message _run_manager.on_text( "AQL Query Execution Error: ", end="\n", verbose=self.verbose ) _run_manager.on_text( aql_error, color="yellow", end="\n\n", verbose=self.verbose ) ######################## # Retry AQL Generation # aql_generation_output = self.aql_fix_chain.run( { "adb_schema": self.graph.schema, "aql_query": aql_query, "aql_error": aql_error, }, callbacks=callbacks, ) ######################## ##################### aql_generation_attempt += 1 if aql_result is None: m = f""" Maximum amount of AQL Query Generation attempts reached. Unable to execute the AQL Query due to the following error: {aql_error} """ raise ValueError(m) _run_manager.on_text("AQL Result:", end="\n", verbose=self.verbose) _run_manager.on_text( str(aql_result), color="green", end="\n", verbose=self.verbose ) ######################## # Interpret AQL Result # result = self.qa_chain( { "adb_schema": self.graph.schema, "user_input": user_input, "aql_query": aql_query, "aql_result": aql_result, }, callbacks=callbacks, ) ######################## # Return results # result = {self.output_key: result[self.qa_chain.output_key]} if self.return_aql_query: result["aql_query"] = aql_query if self.return_aql_result: result["aql_result"] = aql_result return result
[]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~memory~test_xata.py
"""Test Xata chat memory store functionality. Before running this test, please create a Xata database. """ import json import os from langchain_core.schema.messages import _message_to_dict from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import XataChatMessageHistory class TestXata: @classmethod def setup_class(cls) -> None: assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set" assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set" def test_xata_chat_memory(self) -> None: message_history = XataChatMessageHistory( api_key=os.getenv("XATA_API_KEY", ""), db_url=os.getenv("XATA_DB_URL", ""), session_id="integration-test-session", ) memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) # add some messages memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") # get the message history from the memory store and turn it into a json messages = memory.chat_memory.messages messages_json = json.dumps([_message_to_dict(msg) for msg in messages]) assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # remove the record from Redis, so the next test run won't pick it up memory.chat_memory.clear()
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~memory~chat_message_histories~cassandra.py
"""Cassandra-based chat message history, based on cassIO.""" from __future__ import annotations import json import typing from typing import List if typing.TYPE_CHECKING: from cassandra.cluster import Session from langchain_core.schema import ( BaseChatMessageHistory, ) from langchain_core.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, ) DEFAULT_TABLE_NAME = "message_store" DEFAULT_TTL_SECONDS = None class CassandraChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in Cassandra. Args: session_id: arbitrary key that is used to store the messages of a single chat session. session: a Cassandra `Session` object (an open DB connection) keyspace: name of the keyspace to use. table_name: name of the table to use. ttl_seconds: time-to-live (seconds) for automatic expiration of stored entries. None (default) for no expiration. """ def __init__( self, session_id: str, session: Session, keyspace: str, table_name: str = DEFAULT_TABLE_NAME, ttl_seconds: typing.Optional[int] = DEFAULT_TTL_SECONDS, ) -> None: try: from cassio.history import StoredBlobHistory except (ImportError, ModuleNotFoundError): raise ImportError( "Could not import cassio python package. " "Please install it with `pip install cassio`." ) self.session_id = session_id self.ttl_seconds = ttl_seconds self.blob_history = StoredBlobHistory(session, keyspace, table_name) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve all session messages from DB""" message_blobs = self.blob_history.retrieve( self.session_id, ) items = [json.loads(message_blob) for message_blob in message_blobs] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Write a message to the table""" self.blob_history.store( self.session_id, json.dumps(_message_to_dict(message)), self.ttl_seconds ) def clear(self) -> None: """Clear session memory from DB""" self.blob_history.clear_session_id(self.session_id)
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~schema~chat_history.py
from langchain_core.schema.chat_history import BaseChatMessageHistory __all__ = ["BaseChatMessageHistory"]
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~smith~evaluation~string_run_evaluator.py
"""Run evaluator wrapper for string evaluators.""" from __future__ import annotations from abc import abstractmethod from typing import Any, Dict, List, Optional from langchain_core.load.dump import dumpd from langchain_core.load.load import load from langchain_core.load.serializable import Serializable from langchain_core.schema import RUN_KEY, messages_from_dict from langchain_core.schema.messages import BaseMessage, get_buffer_string from langsmith import EvaluationResult, RunEvaluator from langsmith.schemas import DataType, Example, Run from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.evaluation.schema import StringEvaluator def _get_messages_from_run_dict(messages: List[dict]) -> List[BaseMessage]: if not messages: return [] first_message = messages[0] if "lc" in first_message: return [load(dumpd(message)) for message in messages] else: return messages_from_dict(messages) class StringRunMapper(Serializable): """Extract items to evaluate from the run object.""" @property def output_keys(self) -> List[str]: """The keys to extract from the run.""" return ["prediction", "input"] @abstractmethod def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" def __call__(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if not run.outputs: raise ValueError(f"Run {run.id} has no outputs to evaluate.") return self.map(run) class LLMStringRunMapper(StringRunMapper): """Extract items to evaluate from the run object.""" def serialize_chat_messages(self, messages: List[Dict]) -> str: """Extract the input messages from the run.""" if isinstance(messages, list) and messages: if isinstance(messages[0], dict): chat_messages = _get_messages_from_run_dict(messages) elif isinstance(messages[0], list): # Runs from Tracer have messages as a list of lists of dicts chat_messages = _get_messages_from_run_dict(messages[0]) else: raise ValueError(f"Could not extract messages to evaluate {messages}") return get_buffer_string(chat_messages) raise ValueError(f"Could not extract messages to evaluate {messages}") def serialize_inputs(self, inputs: Dict) -> str: if "prompts" in inputs: # Should we even accept this? input_ = "\n\n".join(inputs["prompts"]) elif "prompt" in inputs: input_ = inputs["prompt"] elif "messages" in inputs: input_ = self.serialize_chat_messages(inputs["messages"]) else: raise ValueError("LLM Run must have either messages or prompts as inputs.") return input_ def serialize_outputs(self, outputs: Dict) -> str: if not outputs.get("generations"): raise ValueError("Cannot evaluate LLM Run without generations.") generations: List[Dict] = outputs["generations"] if not generations: raise ValueError("Cannot evaluate LLM run with empty generations.") first_generation: Dict = generations[0] if isinstance(first_generation, list): # Runs from Tracer have generations as a list of lists of dicts # Whereas Runs from the API have a list of dicts first_generation = first_generation[0] if "message" in first_generation: output_ = self.serialize_chat_messages([first_generation["message"]]) else: output_ = first_generation["text"] return output_ def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if run.run_type != "llm": raise ValueError("LLM RunMapper only supports LLM runs.") elif not run.outputs: if run.error: raise ValueError( f"Cannot evaluate errored LLM run {run.id}: {run.error}" ) else: raise ValueError( f"Run {run.id} has no outputs. Cannot evaluate this run." ) else: try: inputs = self.serialize_inputs(run.inputs) except Exception as e: raise ValueError( f"Could not parse LM input from run inputs {run.inputs}" ) from e try: output_ = self.serialize_outputs(run.outputs) except Exception as e: raise ValueError( f"Could not parse LM prediction from run outputs {run.outputs}" ) from e return {"input": inputs, "prediction": output_} class ChainStringRunMapper(StringRunMapper): """Extract items to evaluate from the run object from a chain.""" input_key: Optional[str] = None """The key from the model Run's inputs to use as the eval input. If not provided, will use the only input key or raise an error if there are multiple.""" prediction_key: Optional[str] = None """The key from the model Run's outputs to use as the eval prediction. If not provided, will use the only output key or raise an error if there are multiple.""" def _get_key(self, source: Dict, key: Optional[str], which: str) -> str: if key is not None: return source[key] elif len(source) == 1: return next(iter(source.values())) else: raise ValueError( f"Could not map run {which} with multiple keys: " f"{source}\nPlease manually specify a {which}_key" ) def map(self, run: Run) -> Dict[str, str]: """Maps the Run to a dictionary.""" if not run.outputs: raise ValueError( f"Run with ID {run.id} lacks outputs required for evaluation." " Ensure the Run has valid outputs." ) if self.input_key is not None and self.input_key not in run.inputs: raise ValueError( f"Run with ID {run.id} is missing the expected input key" f" '{self.input_key}'.\nAvailable input keys in this Run" f" are: {run.inputs.keys()}.\nAdjust the evaluator's" f" input_key or ensure your input data includes key" f" '{self.input_key}'." ) elif self.prediction_key is not None and self.prediction_key not in run.outputs: available_keys = ", ".join(run.outputs.keys()) raise ValueError( f"Run with ID {run.id} doesn't have the expected prediction key" f" '{self.prediction_key}'. Available prediction keys in this Run are:" f" {available_keys}. Adjust the evaluator's prediction_key or" " ensure the Run object's outputs the expected key." ) else: input_ = self._get_key(run.inputs, self.input_key, "input") prediction = self._get_key(run.outputs, self.prediction_key, "prediction") return { "input": input_, "prediction": prediction, } class ToolStringRunMapper(StringRunMapper): """Map an input to the tool.""" def map(self, run: Run) -> Dict[str, str]: if not run.outputs: raise ValueError(f"Run {run.id} has no outputs to evaluate.") return {"input": run.inputs["input"], "prediction": run.outputs["output"]} class StringExampleMapper(Serializable): """Map an example, or row in the dataset, to the inputs of an evaluation.""" reference_key: Optional[str] = None @property def output_keys(self) -> List[str]: """The keys to extract from the run.""" return ["reference"] def serialize_chat_messages(self, messages: List[Dict]) -> str: """Extract the input messages from the run.""" chat_messages = _get_messages_from_run_dict(messages) return get_buffer_string(chat_messages) def map(self, example: Example) -> Dict[str, str]: """Maps the Example, or dataset row to a dictionary.""" if not example.outputs: raise ValueError( f"Example {example.id} has no outputs to use as a reference." ) if self.reference_key is None: if len(example.outputs) > 1: raise ValueError( f"Example {example.id} has multiple outputs, so you must" " specify a reference_key." ) else: output = list(example.outputs.values())[0] elif self.reference_key not in example.outputs: raise ValueError( f"Example {example.id} does not have reference key" f" {self.reference_key}." ) else: output = example.outputs[self.reference_key] return { "reference": self.serialize_chat_messages([output]) if isinstance(output, dict) and output.get("type") and output.get("data") else output } def __call__(self, example: Example) -> Dict[str, str]: """Maps the Run and Example to a dictionary.""" if not example.outputs: raise ValueError( f"Example {example.id} has no outputs to use as areference label." ) return self.map(example) class StringRunEvaluatorChain(Chain, RunEvaluator): """Evaluate Run and optional examples.""" run_mapper: StringRunMapper """Maps the Run to a dictionary with 'input' and 'prediction' strings.""" example_mapper: Optional[StringExampleMapper] = None """Maps the Example (dataset row) to a dictionary with a 'reference' string.""" name: str """The name of the evaluation metric.""" string_evaluator: StringEvaluator """The evaluation chain.""" @property def input_keys(self) -> List[str]: return ["run", "example"] @property def output_keys(self) -> List[str]: return ["feedback"] def _prepare_input(self, inputs: Dict[str, Any]) -> Dict[str, str]: run: Run = inputs["run"] example: Optional[Example] = inputs.get("example") evaluate_strings_inputs = self.run_mapper(run) if not self.string_evaluator.requires_input: # Hide warning about unused input evaluate_strings_inputs.pop("input", None) if example and self.example_mapper and self.string_evaluator.requires_reference: evaluate_strings_inputs.update(self.example_mapper(example)) elif self.string_evaluator.requires_reference: raise ValueError( f"Evaluator {self.name} requires an reference" " example from the dataset," f" but none was provided for run {run.id}." ) return evaluate_strings_inputs def _prepare_output(self, output: Dict[str, Any]) -> Dict[str, Any]: evaluation_result = EvaluationResult( key=self.name, comment=output.get("reasoning"), **output ) if RUN_KEY in output: # TODO: Not currently surfaced. Update evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY] return {"feedback": evaluation_result} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Call the evaluation chain.""" evaluate_strings_inputs = self._prepare_input(inputs) _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() chain_output = self.string_evaluator.evaluate_strings( **evaluate_strings_inputs, callbacks=callbacks, include_run_info=True, ) return self._prepare_output(chain_output) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Call the evaluation chain.""" evaluate_strings_inputs = self._prepare_input(inputs) _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() chain_output = await self.string_evaluator.aevaluate_strings( **evaluate_strings_inputs, callbacks=callbacks, include_run_info=True, ) return self._prepare_output(chain_output) def _prepare_evaluator_output(self, output: Dict[str, Any]) -> EvaluationResult: feedback: EvaluationResult = output["feedback"] if RUN_KEY not in feedback.evaluator_info: feedback.evaluator_info[RUN_KEY] = output[RUN_KEY] return feedback def evaluate_run( self, run: Run, example: Optional[Example] = None ) -> EvaluationResult: """Evaluate an example.""" try: result = self({"run": run, "example": example}, include_run_info=True) return self._prepare_evaluator_output(result) except Exception as e: return EvaluationResult( key=self.string_evaluator.evaluation_name, comment=f"Error evaluating run {run.id}: {e}", # TODO: Add run ID once we can declare it via callbacks ) async def aevaluate_run( self, run: Run, example: Optional[Example] = None ) -> EvaluationResult: """Evaluate an example.""" try: result = await self.acall( {"run": run, "example": example}, include_run_info=True ) return self._prepare_evaluator_output(result) except Exception as e: return EvaluationResult( key=self.string_evaluator.evaluation_name, comment=f"Error evaluating run {run.id}: {e}", ) @classmethod def from_run_and_data_type( cls, evaluator: StringEvaluator, run_type: str, data_type: DataType, input_key: Optional[str] = None, prediction_key: Optional[str] = None, reference_key: Optional[str] = None, tags: Optional[List[str]] = None, ) -> StringRunEvaluatorChain: """ Create a StringRunEvaluatorChain from an evaluator and the run and dataset types. This method provides an easy way to instantiate a StringRunEvaluatorChain, by taking an evaluator and information about the type of run and the data. The method supports LLM and chain runs. Args: evaluator (StringEvaluator): The string evaluator to use. run_type (str): The type of run being evaluated. Supported types are LLM and Chain. data_type (DataType): The type of dataset used in the run. input_key (str, optional): The key used to map the input from the run. prediction_key (str, optional): The key used to map the prediction from the run. reference_key (str, optional): The key used to map the reference from the dataset. tags (List[str], optional): List of tags to attach to the evaluation chain. Returns: StringRunEvaluatorChain: The instantiated evaluation chain. Raises: ValueError: If the run type is not supported, or if the evaluator requires a reference from the dataset but the reference key is not provided. """ # noqa: E501 # Configure how run inputs/predictions are passed to the evaluator if run_type == "llm": run_mapper: StringRunMapper = LLMStringRunMapper() elif run_type == "chain": run_mapper = ChainStringRunMapper( input_key=input_key, prediction_key=prediction_key ) else: raise ValueError( f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'." ) # Configure how example rows are fed as a reference string to the evaluator if ( reference_key is not None or data_type in (DataType.llm, DataType.chat) or evaluator.requires_reference ): example_mapper = StringExampleMapper(reference_key=reference_key) elif evaluator.requires_reference: raise ValueError( f"Evaluator {evaluator.evaluation_name} requires a reference" " example from the dataset. Please specify the reference key from" " amongst the dataset outputs keys." ) else: example_mapper = None return cls( name=evaluator.evaluation_name, run_mapper=run_mapper, example_mapper=example_mapper, string_evaluator=evaluator, tags=tags, )
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~callbacks~labelstudio_callback.py
import os import warnings from datetime import datetime from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Union from uuid import UUID from langchain_core.schema import ( AgentAction, AgentFinish, BaseMessage, ChatMessage, Generation, LLMResult, ) from langchain.callbacks.base import BaseCallbackHandler class LabelStudioMode(Enum): """Label Studio mode enumerator.""" PROMPT = "prompt" CHAT = "chat" def get_default_label_configs( mode: Union[str, LabelStudioMode] ) -> Tuple[str, LabelStudioMode]: """Get default Label Studio configs for the given mode. Parameters: mode: Label Studio mode ("prompt" or "chat") Returns: Tuple of Label Studio config and mode """ _default_label_configs = { LabelStudioMode.PROMPT.value: """ <View> <Style> .prompt-box { background-color: white; border-radius: 10px; box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1); padding: 20px; } </Style> <View className="root"> <View className="prompt-box"> <Text name="prompt" value="$prompt"/> </View> <TextArea name="response" toName="prompt" maxSubmissions="1" editable="true" required="true"/> </View> <Header value="Rate the response:"/> <Rating name="rating" toName="prompt"/> </View>""", LabelStudioMode.CHAT.value: """ <View> <View className="root"> <Paragraphs name="dialogue" value="$prompt" layout="dialogue" textKey="content" nameKey="role" granularity="sentence"/> <Header value="Final response:"/> <TextArea name="response" toName="dialogue" maxSubmissions="1" editable="true" required="true"/> </View> <Header value="Rate the response:"/> <Rating name="rating" toName="dialogue"/> </View>""", } if isinstance(mode, str): mode = LabelStudioMode(mode) return _default_label_configs[mode.value], mode class LabelStudioCallbackHandler(BaseCallbackHandler): """Label Studio callback handler. Provides the ability to send predictions to Label Studio for human evaluation, feedback and annotation. Parameters: api_key: Label Studio API key url: Label Studio URL project_id: Label Studio project ID project_name: Label Studio project name project_config: Label Studio project config (XML) mode: Label Studio mode ("prompt" or "chat") Examples: >>> from langchain.llms import OpenAI >>> from langchain.callbacks import LabelStudioCallbackHandler >>> handler = LabelStudioCallbackHandler( ... api_key='<your_key_here>', ... url='http://localhost:8080', ... project_name='LangChain-%Y-%m-%d', ... mode='prompt' ... ) >>> llm = OpenAI(callbacks=[handler]) >>> llm.predict('Tell me a story about a dog.') """ DEFAULT_PROJECT_NAME: str = "LangChain-%Y-%m-%d" def __init__( self, api_key: Optional[str] = None, url: Optional[str] = None, project_id: Optional[int] = None, project_name: str = DEFAULT_PROJECT_NAME, project_config: Optional[str] = None, mode: Union[str, LabelStudioMode] = LabelStudioMode.PROMPT, ): super().__init__() # Import LabelStudio SDK try: import label_studio_sdk as ls except ImportError: raise ImportError( f"You're using {self.__class__.__name__} in your code," f" but you don't have the LabelStudio SDK " f"Python package installed or upgraded to the latest version. " f"Please run `pip install -U label-studio-sdk`" f" before using this callback." ) # Check if Label Studio API key is provided if not api_key: if os.getenv("LABEL_STUDIO_API_KEY"): api_key = str(os.getenv("LABEL_STUDIO_API_KEY")) else: raise ValueError( f"You're using {self.__class__.__name__} in your code," f" Label Studio API key is not provided. " f"Please provide Label Studio API key: " f"go to the Label Studio instance, navigate to " f"Account & Settings -> Access Token and copy the key. " f"Use the key as a parameter for the callback: " f"{self.__class__.__name__}" f"(label_studio_api_key='<your_key_here>', ...) or " f"set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>" ) self.api_key = api_key if not url: if os.getenv("LABEL_STUDIO_URL"): url = os.getenv("LABEL_STUDIO_URL") else: warnings.warn( f"Label Studio URL is not provided, " f"using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}" f"If you want to provide your own URL, use the parameter: " f"{self.__class__.__name__}" f"(label_studio_url='<your_url_here>', ...) " f"or set the environment variable LABEL_STUDIO_URL=<your_url_here>" ) url = ls.LABEL_STUDIO_DEFAULT_URL self.url = url # Maps run_id to prompts self.payload: Dict[str, Dict] = {} self.ls_client = ls.Client(url=self.url, api_key=self.api_key) self.project_name = project_name if project_config: self.project_config = project_config self.mode = None else: self.project_config, self.mode = get_default_label_configs(mode) self.project_id = project_id or os.getenv("LABEL_STUDIO_PROJECT_ID") if self.project_id is not None: self.ls_project = self.ls_client.get_project(int(self.project_id)) else: project_title = datetime.today().strftime(self.project_name) existing_projects = self.ls_client.get_projects(title=project_title) if existing_projects: self.ls_project = existing_projects[0] self.project_id = self.ls_project.id else: self.ls_project = self.ls_client.create_project( title=project_title, label_config=self.project_config ) self.project_id = self.ls_project.id self.parsed_label_config = self.ls_project.parsed_label_config # Find the first TextArea tag # "from_name", "to_name", "value" will be used to create predictions self.from_name, self.to_name, self.value, self.input_type = ( None, None, None, None, ) for tag_name, tag_info in self.parsed_label_config.items(): if tag_info["type"] == "TextArea": self.from_name = tag_name self.to_name = tag_info["to_name"][0] self.value = tag_info["inputs"][0]["value"] self.input_type = tag_info["inputs"][0]["type"] break if not self.from_name: error_message = ( f'Label Studio project "{self.project_name}" ' f"does not have a TextArea tag. " f"Please add a TextArea tag to the project." ) if self.mode == LabelStudioMode.PROMPT: error_message += ( "\nHINT: go to project Settings -> " "Labeling Interface -> Browse Templates" ' and select "Generative AI -> ' 'Supervised Language Model Fine-tuning" template.' ) else: error_message += ( "\nHINT: go to project Settings -> " "Labeling Interface -> Browse Templates" " and check available templates under " '"Generative AI" section.' ) raise ValueError(error_message) def add_prompts_generations( self, run_id: str, generations: List[List[Generation]] ) -> None: # Create tasks in Label Studio tasks = [] prompts = self.payload[run_id]["prompts"] model_version = ( self.payload[run_id]["kwargs"] .get("invocation_params", {}) .get("model_name") ) for prompt, generation in zip(prompts, generations): tasks.append( { "data": { self.value: prompt, "run_id": run_id, }, "predictions": [ { "result": [ { "from_name": self.from_name, "to_name": self.to_name, "type": "textarea", "value": {"text": [g.text for g in generation]}, } ], "model_version": model_version, } ], } ) self.ls_project.import_tasks(tasks) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> None: """Save the prompts in memory when an LLM starts.""" if self.input_type != "Text": raise ValueError( f'\nLabel Studio project "{self.project_name}" ' f"has an input type <{self.input_type}>. " f'To make it work with the mode="chat", ' f"the input type should be <Text>.\n" f"Read more here https://labelstud.io/tags/text" ) run_id = str(kwargs["run_id"]) self.payload[run_id] = {"prompts": prompts, "kwargs": kwargs} def _get_message_role(self, message: BaseMessage) -> str: """Get the role of the message.""" if isinstance(message, ChatMessage): return message.role else: return message.__class__.__name__ def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Save the prompts in memory when an LLM starts.""" if self.input_type != "Paragraphs": raise ValueError( f'\nLabel Studio project "{self.project_name}" ' f"has an input type <{self.input_type}>. " f'To make it work with the mode="chat", ' f"the input type should be <Paragraphs>.\n" f"Read more here https://labelstud.io/tags/paragraphs" ) prompts = [] for message_list in messages: dialog = [] for message in message_list: dialog.append( { "role": self._get_message_role(message), "content": message.content, } ) prompts.append(dialog) self.payload[str(run_id)] = { "prompts": prompts, "tags": tags, "metadata": metadata, "run_id": run_id, "parent_run_id": parent_run_id, "kwargs": kwargs, } def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Create a new Label Studio task for each prompt and generation.""" run_id = str(kwargs["run_id"]) # Submit results to Label Studio self.add_prompts_generations(run_id, response.generations) # Pop current run from `self.runs` self.payload.pop(run_id) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM outputs an error.""" pass def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: pass def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM chain outputs an error.""" pass def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when tool outputs an error.""" pass def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
[ "[]", "prompt" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~unit_tests~evaluation~agents~test_eval_chain.py
"""Test agent trajectory evaluation chain.""" from typing import Any, Dict, List, Optional, Tuple import pytest from langchain_core.pydantic_v1 import Field from langchain_core.schema import AgentAction, BaseMessage, OutputParserException from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.evaluation.agents.trajectory_eval_chain import ( TrajectoryEval, TrajectoryEvalChain, TrajectoryOutputParser, ) from langchain.tools.base import tool from tests.unit_tests.llms.fake_chat_model import FakeChatModel @pytest.fixture def intermediate_steps() -> List[Tuple[AgentAction, str]]: return [ ( AgentAction( tool="Foo", tool_input="Bar", log="Star date 2021-06-13: Foo received input: Bar", ), "Baz", ), ] @tool def foo(bar: str) -> str: """Foo.""" return bar class _FakeTrajectoryChatModel(FakeChatModel): queries: Dict = Field(default_factory=dict) sequential_responses: Optional[bool] = False response_index: int = 0 def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: if self.sequential_responses: response = self.queries[list(self.queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response else: prompt = messages[0].content return self.queries[prompt] def test_trajectory_output_parser_parse() -> None: trajectory_output_parser = TrajectoryOutputParser() text = """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 2""" got = trajectory_output_parser.parse(text) want = TrajectoryEval( score=0.25, reasoning="""Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.""", ) assert got["score"] == want["score"] assert got["reasoning"] == want["reasoning"] with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.""" ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 9""" ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 10""" ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 0.1""" ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: One""" ) def test_trajectory_eval_chain( intermediate_steps: List[Tuple[AgentAction, str]] ) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # type: ignore # Test when ref is not provided res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", ) assert res["score"] == 1.0 # Test when ref is provided res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", reference="Paris", ) assert res["score"] == 0.0 def test_trajectory_eval_chain_no_tools( intermediate_steps: List[Tuple[AgentAction, str]] ) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", ) assert res["score"] == 1.0 res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", reference="Paris", ) assert res["score"] == 0.0 def test_old_api_works(intermediate_steps: List[Tuple[AgentAction, str]]) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore res = chain( { "question": "What is your favorite food?", "agent_trajectory": intermediate_steps, "answer": "I like pie.", } ) assert res["score"] == 1.0 res = chain( { "question": "What is your favorite food?", "agent_trajectory": intermediate_steps, "answer": "I like pie.", "reference": "Paris", } ) assert res["score"] == 0.0
[ "Foo." ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~chat_models~test_ernie.py
import pytest from langchain_core.schema.messages import AIMessage, HumanMessage from langchain.chat_models.ernie import ErnieBotChat def test_chat_ernie_bot() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_chat_ernie_bot_with_model_name() -> None: chat = ErnieBotChat(model_name="ERNIE-Bot") message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_chat_ernie_bot_with_temperature() -> None: chat = ErnieBotChat(model_name="ERNIE-Bot", temperature=1.0) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_chat_ernie_bot_with_kwargs() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") response = chat([message], temperature=0.88, top_p=0.7) assert isinstance(response, AIMessage) assert isinstance(response.content, str) def test_extra_kwargs() -> None: chat = ErnieBotChat(temperature=0.88, top_p=0.7) assert chat.temperature == 0.88 assert chat.top_p == 0.7 def test_wrong_temperature_1() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") with pytest.raises(ValueError) as e: chat([message], temperature=1.2) assert "parameter check failed, temperature range is (0, 1.0]" in str(e) def test_wrong_temperature_2() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") with pytest.raises(ValueError) as e: chat([message], temperature=0) assert "parameter check failed, temperature range is (0, 1.0]" in str(e)
[ "Hello" ]
2024-01-10
axgpt/langchain
libs~langchain~langchain~document_transformers~doctran_text_qa.py
from typing import Any, Optional, Sequence from langchain_core.schema import BaseDocumentTransformer, Document from langchain.utils import get_from_env class DoctranQATransformer(BaseDocumentTransformer): """Extract QA from text documents using doctran. Arguments: openai_api_key: OpenAI API key. Can also be specified via environment variable ``OPENAI_API_KEY``. Example: .. code-block:: python from langchain.document_transformers import DoctranQATransformer # Pass in openai_api_key or set env var OPENAI_API_KEY qa_transformer = DoctranQATransformer() transformed_document = await qa_transformer.atransform_documents(documents) """ def __init__( self, openai_api_key: Optional[str] = None, openai_api_model: Optional[str] = None, ) -> None: self.openai_api_key = openai_api_key or get_from_env( "openai_api_key", "OPENAI_API_KEY" ) self.openai_api_model = openai_api_model or get_from_env( "openai_api_model", "OPENAI_API_MODEL" ) def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Extracts QA from text documents using doctran.""" try: from doctran import Doctran doctran = Doctran( openai_api_key=self.openai_api_key, openai_model=self.openai_api_model ) except ImportError: raise ImportError( "Install doctran to use this parser. (pip install doctran)" ) for d in documents: doctran_doc = ( await doctran.parse(content=d.page_content).interrogate().execute() ) questions_and_answers = doctran_doc.extracted_properties.get( "questions_and_answers" ) d.metadata["questions_and_answers"] = questions_and_answers return documents
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~vectorstores~docarray~hnsw.py
from __future__ import annotations from typing import Any, List, Literal, Optional from langchain_core.schema.embeddings import Embeddings from langchain.vectorstores.docarray.base import ( DocArrayIndex, _check_docarray_import, ) class DocArrayHnswSearch(DocArrayIndex): """`HnswLib` storage using `DocArray` package. To use it, you should have the ``docarray`` package with version >=0.32.0 installed. You can install it with `pip install "langchain[docarray]"`. """ @classmethod def from_params( cls, embedding: Embeddings, work_dir: str, n_dim: int, dist_metric: Literal["cosine", "ip", "l2"] = "cosine", max_elements: int = 1024, index: bool = True, ef_construction: int = 200, ef: int = 10, M: int = 16, allow_replace_deleted: bool = True, num_threads: int = 1, **kwargs: Any, ) -> DocArrayHnswSearch: """Initialize DocArrayHnswSearch store. Args: embedding (Embeddings): Embedding function. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. dist_metric (str): Distance metric for DocArrayHnswSearch can be one of: "cosine", "ip", and "l2". Defaults to "cosine". max_elements (int): Maximum number of vectors that can be stored. Defaults to 1024. index (bool): Whether an index should be built for this field. Defaults to True. ef_construction (int): defines a construction time/accuracy trade-off. Defaults to 200. ef (int): parameter controlling query time/accuracy trade-off. Defaults to 10. M (int): parameter that defines the maximum number of outgoing connections in the graph. Defaults to 16. allow_replace_deleted (bool): Enables replacing of deleted elements with new added ones. Defaults to True. num_threads (int): Sets the number of cpu threads to use. Defaults to 1. **kwargs: Other keyword arguments to be passed to the get_doc_cls method. """ _check_docarray_import() from docarray.index import HnswDocumentIndex doc_cls = cls._get_doc_cls( dim=n_dim, space=dist_metric, max_elements=max_elements, index=index, ef_construction=ef_construction, ef=ef, M=M, allow_replace_deleted=allow_replace_deleted, num_threads=num_threads, **kwargs, ) doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore return cls(doc_index, embedding) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, work_dir: Optional[str] = None, n_dim: Optional[int] = None, **kwargs: Any, ) -> DocArrayHnswSearch: """Create an DocArrayHnswSearch store and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. work_dir (str): path to the location where all the data will be stored. n_dim (int): dimension of an embedding. **kwargs: Other keyword arguments to be passed to the __init__ method. Returns: DocArrayHnswSearch Vector Store """ if work_dir is None: raise ValueError("`work_dir` parameter has not been set.") if n_dim is None: raise ValueError("`n_dim` parameter has not been set.") store = cls.from_params(embedding, work_dir, n_dim, **kwargs) store.add_texts(texts=texts, metadatas=metadatas) return store
[]
2024-01-10
axgpt/langchain
libs~langchain~langchain~chat_models~baichuan.py
import hashlib import json import logging import time from typing import Any, Dict, Iterator, List, Mapping, Optional, Type import requests from langchain_core.pydantic_v1 import Field, SecretStr, root_validator from langchain_core.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, ) from langchain_core.schema.messages import ( AIMessageChunk, BaseMessageChunk, ChatMessageChunk, HumanMessageChunk, ) from langchain_core.schema.output import ChatGenerationChunk from langchain_core.utils import ( convert_to_secret_str, get_pydantic_field_names, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import BaseChatModel, _generate_from_stream from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) DEFAULT_API_BASE = "https://api.baichuan-ai.com/v1" def _convert_message_to_dict(message: BaseMessage) -> dict: message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} else: raise TypeError(f"Got unknown type {message}") return message_dict def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": return AIMessage(content=_dict.get("content", "") or "") else: return ChatMessage(content=_dict["content"], role=role) def _convert_delta_to_message_chunk( _dict: Mapping[str, Any], default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: role = _dict.get("role") content = _dict.get("content") or "" if role == "user" or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content) # signature generation def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int) -> str: input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp) md5 = hashlib.md5() md5.update(input_str.encode("utf-8")) return md5.hexdigest() class ChatBaichuan(BaseChatModel): """Baichuan chat models API by Baichuan Intelligent Technology. For more information, see https://platform.baichuan-ai.com/docs/api """ @property def lc_secrets(self) -> Dict[str, str]: return { "baichuan_api_key": "BAICHUAN_API_KEY", "baichuan_secret_key": "BAICHUAN_SECRET_KEY", } @property def lc_serializable(self) -> bool: return True baichuan_api_base: str = Field(default=DEFAULT_API_BASE) """Baichuan custom endpoints""" baichuan_api_key: Optional[str] = None """Baichuan API Key""" baichuan_secret_key: Optional[SecretStr] = None """Baichuan Secret Key""" streaming: bool = False """Whether to stream the results or not.""" request_timeout: int = 60 """request timeout for chat http requests""" model = "Baichuan2-53B" """model name of Baichuan, default is `Baichuan2-53B`.""" temperature: float = 0.3 """What sampling temperature to use.""" top_k: int = 5 """What search sampling control to use.""" top_p: float = 0.85 """What probability mass to use.""" with_search_enhance: bool = False """Whether to use search enhance, default is False.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for API call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: values["baichuan_api_base"] = get_from_dict_or_env( values, "baichuan_api_base", "BAICHUAN_API_BASE", DEFAULT_API_BASE, ) values["baichuan_api_key"] = get_from_dict_or_env( values, "baichuan_api_key", "BAICHUAN_API_KEY", ) values["baichuan_secret_key"] = convert_to_secret_str( get_from_dict_or_env( values, "baichuan_secret_key", "BAICHUAN_SECRET_KEY", ) ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Baichuan API.""" normal_params = { "model": self.model, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "with_search_enhance": self.with_search_enhance, } return {**normal_params, **self.model_kwargs} def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: stream_iter = self._stream( messages=messages, stop=stop, run_manager=run_manager, **kwargs ) return _generate_from_stream(stream_iter) res = self._chat(messages, **kwargs) response = res.json() if response.get("code") != 0: raise ValueError(f"Error from Baichuan api response: {response}") return self._create_chat_result(response) def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: res = self._chat(messages, **kwargs) default_chunk_class = AIMessageChunk for chunk in res.iter_lines(): response = json.loads(chunk) if response.get("code") != 0: raise ValueError(f"Error from Baichuan api response: {response}") data = response.get("data") for m in data.get("messages"): chunk = _convert_delta_to_message_chunk(m, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content) def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response: if self.baichuan_secret_key is None: raise ValueError("Baichuan secret key is not set.") parameters = {**self._default_params, **kwargs} model = parameters.pop("model") headers = parameters.pop("headers", {}) payload = { "model": model, "messages": [_convert_message_to_dict(m) for m in messages], "parameters": parameters, } timestamp = int(time.time()) url = self.baichuan_api_base if self.streaming: url = f"{url}/stream" url = f"{url}/chat" res = requests.post( url=url, timeout=self.request_timeout, headers={ "Content-Type": "application/json", "Authorization": f"Bearer {self.baichuan_api_key}", "X-BC-Timestamp": str(timestamp), "X-BC-Signature": _signature( secret_key=self.baichuan_secret_key, payload=payload, timestamp=timestamp, ), "X-BC-Sign-Algo": "MD5", **headers, }, json=payload, stream=self.streaming, ) return res def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for m in response["data"]["messages"]: message = _convert_dict_to_message(m) gen = ChatGeneration(message=message) generations.append(gen) token_usage = response["usage"] llm_output = {"token_usage": token_usage, "model": self.model} return ChatResult(generations=generations, llm_output=llm_output) @property def _llm_type(self) -> str: return "baichuan-chat"
[ "content" ]
2024-01-10
axgpt/langchain
libs~langchain~tests~integration_tests~memory~test_upstash_redis.py
import json import pytest from langchain_core.schema.messages import _message_to_dict from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories.upstash_redis import ( UpstashRedisChatMessageHistory, ) URL = "<UPSTASH_REDIS_REST_URL>" TOKEN = "<UPSTASH_REDIS_REST_TOKEN>" @pytest.mark.requires("upstash_redis") def test_memory_with_message_store() -> None: """Test the memory with a message store.""" # setup Upstash Redis as a message store message_history = UpstashRedisChatMessageHistory( url=URL, token=TOKEN, ttl=10, session_id="my-test-session" ) memory = ConversationBufferMemory( memory_key="baz", chat_memory=message_history, return_messages=True ) # add some messages memory.chat_memory.add_ai_message("This is me, the AI") memory.chat_memory.add_user_message("This is me, the human") # get the message history from the memory store and turn it into a json messages = memory.chat_memory.messages messages_json = json.dumps([_message_to_dict(msg) for msg in messages]) assert "This is me, the AI" in messages_json assert "This is me, the human" in messages_json # remove the record from Redis, so the next test run won't pick it up memory.chat_memory.clear()
[]