date_collected
stringclasses
1 value
repo_name
stringlengths
6
116
file_name
stringlengths
2
220
file_contents
stringlengths
13
357k
prompts
sequence
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~vectorstores~vectara.py
from __future__ import annotations import json import logging import os from hashlib import md5 from typing import Any, Iterable, List, Optional, Tuple, Type import requests from langchain.pydantic_v1 import Field from langchain.schema import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore, VectorStoreRetriever logger = logging.getLogger(__name__) class Vectara(VectorStore): """`Vectara API` vector store. See (https://vectara.com). Example: .. code-block:: python from langchain.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, vectara_api_timeout: int = 60, source: str = "langchain", ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logger.warning( "Can't find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logger.debug(f"Using corpus id {self._vectara_corpus_id}") self._source = source self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) self.vectara_api_timeout = vectara_api_timeout @property def embeddings(self) -> Optional[Embeddings]: return None def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", "X-Source": self._source, } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: url (str): URL of the page to delete. doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict) -> str: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/index", data=json.dumps(request), timeout=self.vectara_api_timeout, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or status_str and (status_str == "ALREADY_EXISTS"): return "E_ALREADY_EXISTS" elif status_str and (status_str == "FORBIDDEN"): return "E_NO_PERMISSIONS" else: return "E_SUCCEEDED" def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Vectara provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list metadatas: Optional list of metadatas associated with each file Returns: List of ids associated with each of the files indexed """ doc_ids = [] for inx, file in enumerate(files_list): if not os.path.exists(file): logger.error(f"File {file} does not exist, skipping") continue md = metadatas[inx] if metadatas else {} files: dict = { "file": (file, open(file, "rb")), "doc_metadata": json.dumps(md), } headers = self._get_post_headers() headers.pop("Content-Type") response = self._session.post( f"https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True", files=files, verify=True, headers=headers, timeout=self.vectara_api_timeout, ) if response.status_code == 409: doc_id = response.json()["document"]["documentId"] logger.info( f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" ) elif response.status_code == 200: doc_id = response.json()["document"]["documentId"] doc_ids.append(doc_id) else: logger.info(f"Error indexing file {file}: {response.json()}") return doc_ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata["source"] = "langchain" else: doc_metadata = {"source": "langchain"} doc = { "document_id": doc_id, "metadataJson": json.dumps(doc_metadata), "section": [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } success_str = self._index_doc(doc) if success_str == "E_ALREADY_EXISTS": self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id] def similarity_search_with_score( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, score_threshold: Optional[float] = None, n_sentence_context: int = 2, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. score_threshold: minimal score threshold for the result. If defined, results with score less than this value will be filtered out. n_sentence_context: number of sentences before/after the matching segment to add, defaults to 2 Returns: List of Documents most similar to the query and score for each. """ data = json.dumps( { "query": [ { "query": query, "start": 0, "num_results": k, "context_config": { "sentences_before": n_sentence_context, "sentences_after": n_sentence_context, }, "corpus_key": [ { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "metadataFilter": filter, "lexical_interpolation_config": {"lambda": lambda_val}, } ], } ] } ) response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=data, timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] result = response.json() if score_threshold: responses = [ r for r in result["responseSet"][0]["response"] if r["score"] > score_threshold ] else: responses = result["responseSet"][0]["response"] documents = result["responseSet"][0]["document"] metadatas = [] for x in responses: md = {m["name"]: m["value"] for m in x["metadata"]} doc_num = x["documentIndex"] doc_md = {m["name"]: m["value"] for m in documents[doc_num]["metadata"]} md.update(doc_md) metadatas.append(md) docs_with_score = [ ( Document( page_content=x["text"], metadata=md, ), x["score"], ) for x, md in zip(responses, metadatas) ] return docs_with_score def similarity_search( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 2, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add, defaults to 2 Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, lambda_val=lambda_val, filter=filter, score_threshold=None, n_sentence_context=n_sentence_context, **kwargs, ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Notes: # * Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) # * when metadatas[] are provided they are associated with each "part" # in Vectara. doc_metadata can be used to provide additional metadata # for the document itself (applies to all "texts" in this call) doc_metadata = kwargs.pop("doc_metadata", {}) vectara = cls(**kwargs) vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs) return vectara @classmethod def from_files( cls: Type[Vectara], files: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_files(files, metadatas) return vectara def as_retriever(self, **kwargs: Any) -> VectaraRetriever: tags = kwargs.pop("tags", None) or [] tags.extend(self._get_retriever_tags()) return VectaraRetriever(vectorstore=self, **kwargs, tags=tags) class VectaraRetriever(VectorStoreRetriever): """Retriever class for `Vectara`.""" vectorstore: Vectara """Vectara vectorstore.""" search_kwargs: dict = Field( default_factory=lambda: { "lambda_val": 0.025, "k": 5, "filter": "", "n_sentence_context": "2", } ) """Search params. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add """ def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, ) -> None: """Add text to the Vectara vectorstore. Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.vectorstore.add_texts(texts, metadatas, doc_metadata or {})
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~ctransformers.py
from functools import partial from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.pydantic_v1 import root_validator class CTransformers(LLM): """C Transformers LLM models. To use, you should have the ``ctransformers`` python package installed. See https://github.com/marella/ctransformers Example: .. code-block:: python from langchain.llms import CTransformers llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2") """ client: Any #: :meta private: model: str """The path to a model file or directory or the name of a Hugging Face Hub model repo.""" model_type: Optional[str] = None """The model type.""" model_file: Optional[str] = None """The name of the model file in repo or directory.""" config: Optional[Dict[str, Any]] = None """The config parameters. See https://github.com/marella/ctransformers#config""" lib: Optional[str] = None """The path to a shared library or one of `avx2`, `avx`, `basic`.""" @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "model": self.model, "model_type": self.model_type, "model_file": self.model_file, "config": self.config, } @property def _llm_type(self) -> str: """Return type of llm.""" return "ctransformers" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that ``ctransformers`` package is installed.""" try: from ctransformers import AutoModelForCausalLM except ImportError: raise ImportError( "Could not import `ctransformers` package. " "Please install it with `pip install ctransformers`" ) config = values["config"] or {} values["client"] = AutoModelForCausalLM.from_pretrained( values["model"], model_type=values["model_type"], model_file=values["model_file"], lib=values["lib"], **config, ) return values def _call( self, prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() for chunk in self.client(prompt, stop=stop, stream=True): text.append(chunk) _run_manager.on_llm_new_token(chunk, verbose=self.verbose) return "".join(text) async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Asynchronous Call out to CTransformers generate method. Very helpful when streaming (like with websockets!) Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python response = llm("Once upon a time, ") """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client(prompt, stop=stop, stream=True): if text_callback: await text_callback(token) text += token return text
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~obs_file.py
# coding:utf-8 import os import tempfile from typing import Any, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader class OBSFileLoader(BaseLoader): """Load from the `Huawei OBS file`.""" def __init__( self, bucket: str, key: str, client: Any = None, endpoint: str = "", config: Optional[dict] = None, ) -> None: """Initialize the OBSFileLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. key (str): The name of the object in the OBS bucket. client (ObsClient, optional): An instance of the ObsClient to connect to OBS. endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided. config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. Raises: ValueError: If the `esdk-obs-python` package is not installed. TypeError: If the provided `client` is not an instance of ObsClient. ValueError: If `client` is not provided, but `endpoint` is missing. Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSFileLoader with a new client: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config) ``` To create a new OBSFileLoader with an existing client: ``` from obs import ObsClient # Assuming you have an existing ObsClient object 'obs_client' obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client) ``` To create a new OBSFileLoader without an existing client: ``` obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url") ``` """ # noqa: E501 try: from obs import ObsClient except ImportError: raise ImportError( "Could not import esdk-obs-python python package. " "Please install it with `pip install esdk-obs-python`." ) if not client: if not endpoint: raise ValueError("Either OBSClient or endpoint must be provided.") if not config: config = dict() if config.get("get_token_from_ecs"): client = ObsClient(server=endpoint, security_provider_policy="ECS") else: client = ObsClient( access_key_id=config.get("ak"), secret_access_key=config.get("sk"), security_token=config.get("token"), server=endpoint, ) if not isinstance(client, ObsClient): raise TypeError("Client must be ObsClient type") self.client = client self.bucket = bucket self.key = key def load(self) -> List[Document]: """Load documents.""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.bucket}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination self.client.downloadFile( bucketName=self.bucket, objectKey=self.key, downloadFile=file_path ) loader = UnstructuredFileLoader(file_path) return loader.load()
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~ensemble.py
""" Ensemble retriever that ensemble the results of multiple retrievers by using weighted Reciprocal Rank Fusion """ from typing import Any, Dict, List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.pydantic_v1 import root_validator from langchain.schema import BaseRetriever, Document class EnsembleRetriever(BaseRetriever): """Retriever that ensembles the multiple retrievers. It uses a rank fusion. Args: retrievers: A list of retrievers to ensemble. weights: A list of weights corresponding to the retrievers. Defaults to equal weighting for all retrievers. c: A constant added to the rank, controlling the balance between the importance of high-ranked items and the consideration given to lower-ranked items. Default is 60. """ retrievers: List[BaseRetriever] weights: List[float] c: int = 60 @root_validator(pre=True) def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]: if not values.get("weights"): n_retrievers = len(values["retrievers"]) values["weights"] = [1 / n_retrievers] * n_retrievers return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = self.rank_fusion(query, run_manager) return fused_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = await self.arank_fusion(query, run_manager) return fused_documents def rank_fusion( self, query: str, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """ Retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents async def arank_fusion( self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """ Asynchronously retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ await retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents def weighted_reciprocal_rank( self, doc_lists: List[List[Document]] ) -> List[Document]: """ Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order. """ if len(doc_lists) != len(self.weights): raise ValueError( "Number of rank lists must be equal to the number of weights." ) # Create a union of all unique documents in the input doc_lists all_documents = set() for doc_list in doc_lists: for doc in doc_list: all_documents.add(doc.page_content) # Initialize the RRF score dictionary for each document rrf_score_dic = {doc: 0.0 for doc in all_documents} # Calculate RRF scores for each document for doc_list, weight in zip(doc_lists, self.weights): for rank, doc in enumerate(doc_list, start=1): rrf_score = weight * (1 / (rank + self.c)) rrf_score_dic[doc.page_content] += rrf_score # Sort documents by their RRF scores in descending order sorted_documents = sorted( rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True ) # Map the sorted page_content back to the original document objects page_content_to_doc_map = { doc.page_content: doc for doc_list in doc_lists for doc in doc_list } sorted_docs = [ page_content_to_doc_map[page_content] for page_content in sorted_documents ] return sorted_docs
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~utilities~powerbi.py
"""Wrapper around a Power BI endpoint.""" from __future__ import annotations import asyncio import logging import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union import aiohttp import requests from aiohttp import ServerTimeoutError from requests.exceptions import Timeout from langchain.pydantic_v1 import BaseModel, Field, root_validator, validator logger = logging.getLogger(__name__) BASE_URL = os.getenv("POWERBI_BASE_URL", "https://api.powerbi.com/v1.0/myorg") if TYPE_CHECKING: from azure.core.credentials import TokenCredential class PowerBIDataset(BaseModel): """Create PowerBI engine from dataset ID and credential or token. Use either the credential or a supplied token to authenticate. If both are supplied the credential is used to generate a token. The impersonated_user_name is the UPN of a user to be impersonated. If the model is not RLS enabled, this will be ignored. """ dataset_id: str table_names: List[str] group_id: Optional[str] = None credential: Optional[TokenCredential] = None token: Optional[str] = None impersonated_user_name: Optional[str] = None sample_rows_in_table_info: int = Field(default=1, gt=0, le=10) schemas: Dict[str, str] = Field(default_factory=dict) aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @validator("table_names", allow_reuse=True) def fix_table_names(cls, table_names: List[str]) -> List[str]: """Fix the table names.""" return [fix_table_name(table) for table in table_names] @root_validator(pre=True, allow_reuse=True) def token_or_credential_present(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that at least one of token and credentials is present.""" if "token" in values or "credential" in values: return values raise ValueError("Please provide either a credential or a token.") @property def request_url(self) -> str: """Get the request url.""" if self.group_id: return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 @property def headers(self) -> Dict[str, str]: """Get the token.""" if self.token: return { "Content-Type": "application/json", "Authorization": "Bearer " + self.token, } from azure.core.exceptions import ( ClientAuthenticationError, # pylint: disable=import-outside-toplevel ) if self.credential: try: token = self.credential.get_token( "https://analysis.windows.net/powerbi/api/.default" ).token return { "Content-Type": "application/json", "Authorization": "Bearer " + token, } except Exception as exc: # pylint: disable=broad-exception-caught raise ClientAuthenticationError( "Could not get a token from the supplied credentials." ) from exc raise ClientAuthenticationError("No credential or token supplied.") def get_table_names(self) -> Iterable[str]: """Get names of tables available.""" return self.table_names def get_schemas(self) -> str: """Get the available schema's.""" if self.schemas: return ", ".join([f"{key}: {value}" for key, value in self.schemas.items()]) return "No known schema's yet. Use the schema_powerbi tool first." @property def table_info(self) -> str: """Information about all tables in the database.""" return self.get_table_info() def _get_tables_to_query( self, table_names: Optional[Union[List[str], str]] = None ) -> Optional[List[str]]: """Get the tables names that need to be queried, after checking they exist.""" if table_names is not None: if ( isinstance(table_names, list) and len(table_names) > 0 and table_names[0] != "" ): fixed_tables = [fix_table_name(table) for table in table_names] non_existing_tables = [ table for table in fixed_tables if table not in self.table_names ] if non_existing_tables: logger.warning( "Table(s) %s not found in dataset.", ", ".join(non_existing_tables), ) tables = [ table for table in fixed_tables if table not in non_existing_tables ] return tables if tables else None if isinstance(table_names, str) and table_names != "": if table_names not in self.table_names: logger.warning("Table %s not found in dataset.", table_names) return None return [fix_table_name(table_names)] return self.table_names def _get_tables_todo(self, tables_todo: List[str]) -> List[str]: """Get the tables that still need to be queried.""" return [table for table in tables_todo if table not in self.schemas] def _get_schema_for_tables(self, table_names: List[str]) -> str: """Create a string of the table schemas for the supplied tables.""" schemas = [ schema for table, schema in self.schemas.items() if table in table_names ] return ", ".join(schemas) def get_table_info( self, table_names: Optional[Union[List[str], str]] = None ) -> str: """Get information about specified tables.""" tables_requested = self._get_tables_to_query(table_names) if tables_requested is None: return "No (valid) tables requested." tables_todo = self._get_tables_todo(tables_requested) for table in tables_todo: self._get_schema(table) return self._get_schema_for_tables(tables_requested) async def aget_table_info( self, table_names: Optional[Union[List[str], str]] = None ) -> str: """Get information about specified tables.""" tables_requested = self._get_tables_to_query(table_names) if tables_requested is None: return "No (valid) tables requested." tables_todo = self._get_tables_todo(tables_requested) await asyncio.gather(*[self._aget_schema(table) for table in tables_todo]) return self._get_schema_for_tables(tables_requested) def _get_schema(self, table: str) -> None: """Get the schema for a table.""" try: result = self.run( f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})" ) self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"]) except Timeout: logger.warning("Timeout while getting table info for %s", table) self.schemas[table] = "unknown" except Exception as exc: # pylint: disable=broad-exception-caught logger.warning("Error while getting table info for %s: %s", table, exc) self.schemas[table] = "unknown" async def _aget_schema(self, table: str) -> None: """Get the schema for a table.""" try: result = await self.arun( f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})" ) self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"]) except ServerTimeoutError: logger.warning("Timeout while getting table info for %s", table) self.schemas[table] = "unknown" except Exception as exc: # pylint: disable=broad-exception-caught logger.warning("Error while getting table info for %s: %s", table, exc) self.schemas[table] = "unknown" def _create_json_content(self, command: str) -> dict[str, Any]: """Create the json content for the request.""" return { "queries": [{"query": rf"{command}"}], "impersonatedUserName": self.impersonated_user_name, "serializerSettings": {"includeNulls": True}, } def run(self, command: str) -> Any: """Execute a DAX command and return a json representing the results.""" logger.debug("Running command: %s", command) response = requests.post( self.request_url, json=self._create_json_content(command), headers=self.headers, timeout=10, ) if response.status_code == 403: return ( "TokenError: Could not login to PowerBI, please check your credentials." ) return response.json() async def arun(self, command: str) -> Any: """Execute a DAX command and return the result asynchronously.""" logger.debug("Running command: %s", command) if self.aiosession: async with self.aiosession.post( self.request_url, headers=self.headers, json=self._create_json_content(command), timeout=10, ) as response: if response.status == 403: return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501 response_json = await response.json(content_type=response.content_type) return response_json async with aiohttp.ClientSession() as session: async with session.post( self.request_url, headers=self.headers, json=self._create_json_content(command), timeout=10, ) as response: if response.status == 403: return "TokenError: Could not login to PowerBI, please check your credentials." # noqa: E501 response_json = await response.json(content_type=response.content_type) return response_json def json_to_md( json_contents: List[Dict[str, Union[str, int, float]]], table_name: Optional[str] = None, ) -> str: """Converts a JSON object to a markdown table.""" if len(json_contents) == 0: return "" output_md = "" headers = json_contents[0].keys() for header in headers: header.replace("[", ".").replace("]", "") if table_name: header.replace(f"{table_name}.", "") output_md += f"| {header} " output_md += "|\n" for row in json_contents: for value in row.values(): output_md += f"| {value} " output_md += "|\n" return output_md def fix_table_name(table: str) -> str: """Add single quotes around table names that contain spaces.""" if " " in table and not table.startswith("'") and not table.endswith("'"): return f"'{table}'" return table
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~chat_models~test_jinachat.py
"""Test JinaChat wrapper.""" import pytest from langchain.callbacks.manager import CallbackManager from langchain.chat_models.jinachat import JinaChat from langchain.schema import ( BaseMessage, ChatGeneration, HumanMessage, LLMResult, SystemMessage, ) from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_jinachat() -> None: """Test JinaChat wrapper.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_system_message() -> None: """Test JinaChat wrapper with system message.""" chat = JinaChat(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_jinachat_generate() -> None: """Test JinaChat wrapper with generate.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) @pytest.mark.asyncio async def test_async_jinachat() -> None: """Test async generation.""" chat = JinaChat(max_tokens=102) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content @pytest.mark.asyncio async def test_async_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat( max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True, ) message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert callback_handler.llm_streams > 0 assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_jinachat_extra_kwargs() -> None: """Test extra kwargs to chat openai.""" # Check that foo is saved in extra_kwargs. llm = JinaChat(foo=3, max_tokens=10) assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. llm = JinaChat(foo=3, model_kwargs={"bar": 2}) assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): JinaChat(foo=3, model_kwargs={"foo": 2}) # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): JinaChat(model_kwargs={"temperature": 0.2})
[ "Hello", "You are to chat with the user." ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~diffbot.py
import logging from typing import Any, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class DiffbotLoader(BaseLoader): """Load `Diffbot` json file.""" def __init__( self, api_token: str, urls: List[str], continue_on_failure: bool = True ): """Initialize with API token, ids, and key. Args: api_token: Diffbot API token. urls: List of URLs to load. continue_on_failure: Whether to continue loading other URLs if one fails. Defaults to True. """ self.api_token = api_token self.urls = urls self.continue_on_failure = continue_on_failure def _diffbot_api_url(self, diffbot_api: str) -> str: return f"https://api.diffbot.com/v3/{diffbot_api}" def _get_diffbot_data(self, url: str) -> Any: """Get Diffbot file from Diffbot REST API.""" # TODO: Add support for other Diffbot APIs diffbot_url = self._diffbot_api_url("article") params = { "token": self.api_token, "url": url, } response = requests.get(diffbot_url, params=params, timeout=10) # TODO: handle non-ok errors return response.json() if response.ok else {} def load(self) -> List[Document]: """Extract text from Diffbot on all the URLs and return Documents""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data["objects"][0]["text"] if "objects" in data else "" metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e return docs
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~loading.py
"""Base interface for loading large language model APIs.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.llms import get_type_to_cls_dict from langchain.llms.base import BaseLLM def load_llm_from_config(config: dict, **kwargs: Any) -> BaseLLM: """Load LLM from Config Dict.""" if "_type" not in config: raise ValueError("Must specify an LLM Type in config") config_type = config.pop("_type") type_to_cls_dict = get_type_to_cls_dict() if config_type not in type_to_cls_dict: raise ValueError(f"Loading {config_type} LLM not supported") llm_cls = type_to_cls_dict[config_type]() return llm_cls(**config, **kwargs) def load_llm(file: Union[str, Path], **kwargs: Any) -> BaseLLM: """Load LLM from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Load the LLM from the config now. return load_llm_from_config(config, **kwargs)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~agents~react~textworld_prompt.py
# flake8: noqa from langchain.prompts.prompt import PromptTemplate EXAMPLES = [ """Настройка: Ты сейчас играешь в быстрый раунд TextWorld! Вот твоя задача на сегодня. Прежде всего, ты можешь, например, попробовать отправиться на восток. После этого, возьми папку из шкафчика. С папкой, положи папку на каминную полку. Хорошо, спасибо! -= Хранилище =- Ты только что вошел в хранилище. Ты начинаешь оценивать, что здесь есть. Здесь есть открытый сейф. Какое разочарование! Сейф пуст! Ты разглядываешь полку. Но на ней ничего нет. Что, ты думаешь, что в TextWorld должны быть вещи? Тебе не нравятся двери? Почему бы тебе не попробовать пойти на восток, этот вход не охраняется. Thought: Мне нужно отправиться на восток Action: Играть[идти на восток] Observation: -= Офис =- Ты прибываешь в офис. Обычный. Ты можешь разглядеть шкафчик. В шкафчике есть папка. Ты видишь чемодан. Чемодан пуст, какой ужасный день! Ты прислоняешься к стене, случайно нажимая на секретную кнопку. Стена открывается, открывая каминную полку. Ты задумчиво спрашиваешься, кто оставил это здесь. Каминная полка стандартная. Каминная полка, похоже, пуста. Если ты еще не заметил, кажется, что у стены есть что-то, это стол. К сожалению, на нем ничего нет. Хм. Ну да ладно Есть выход на запад. Не волнуйся, он не охраняется. Thought: Мне нужно взять папку из шкафчика Action: Играть[взять папку] Observation: Ты берешь папку из шкафчика. Thought: Мне нужно положить папку на каминную полку Action: Играть[положить папку на каминную полку] Observation: Ты кладешь папку на каминную полку. Твой счет только что увеличился на одно очко. *** Конец *** Thought: Произошел Конец Action: Закончить[да] """ ] SUFFIX = """\n\nНастройка: {input} {agent_scratchpad}""" TEXTWORLD_PROMPT = PromptTemplate.from_examples( EXAMPLES, SUFFIX, ["input", "agent_scratchpad"] )
[ "['input', 'agent_scratchpad']", "input", "agent_scratchpad", "\n\nНастройка: {input}\n{agent_scratchpad}" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~ifixit.py
from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.web_base import WebBaseLoader IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0" class IFixitLoader(BaseLoader): """Load `iFixit` repair guides, device wikis and answers. iFixit is the largest, open repair community on the web. The site contains nearly 100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under CC-BY. This loader will allow you to download the text of a repair guide, text of Q&A's and wikis from devices on iFixit using their open APIs and web scraping. """ def __init__(self, web_path: str): """Initialize with a web path.""" if not web_path.startswith("https://www.ifixit.com"): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace("https://www.ifixit.com", "") allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths): raise ValueError( "web path must start with /Device, /Guide, /Teardown or /Answers" ) pieces = [x for x in path.split("/") if x] """Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide" if self.page_type == "Guide" or self.page_type == "Answers": self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path def load(self) -> List[Document]: if self.page_type == "Device": return self.load_device() elif self.page_type == "Guide" or self.page_type == "Teardown": return self.load_guide() elif self.page_type == "Answers": return self.load_questions_and_answers() else: raise ValueError("Unknown page type: " + self.page_type) @staticmethod def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]: """Load suggestions. Args: query: A query string doc_type: The type of document to search for. Can be one of "all", "device", "guide", "teardown", "answer", "wiki". Returns: """ res = requests.get( IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type ) if res.status_code != 200: raise ValueError( 'Could not load suggestions for "' + query + '"\n' + res.json() ) data = res.json() results = data["results"] output = [] for result in results: try: loader = IFixitLoader(result["url"]) if loader.page_type == "Device": output += loader.load_device(include_guides=False) else: output += loader.load() except ValueError: continue return output def load_questions_and_answers( self, url_override: Optional[str] = None ) -> List[Document]: """Load a list of questions and answers. Args: url_override: A URL to override the default URL. Returns: List[Document] """ loader = WebBaseLoader(self.web_path if url_override is None else url_override) soup = loader.scrape() output = [] title = soup.find("h1", "post-title").text output.append("# " + title) output.append(soup.select_one(".post-content .post-text").text.strip()) answersHeader = soup.find("div", "post-answers-header") if answersHeader: output.append("\n## " + answersHeader.text.strip()) for answer in soup.select(".js-answers-list .post.post-answer"): if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]: output.append("\n### Accepted Answer") elif "post-helpful" in answer["class"]: output.append("\n### Most Helpful Answer") else: output.append("\n### Other Answer") output += [ a.text.strip() for a in answer.select(".post-content .post-text") ] output.append("\n") text = "\n".join(output).strip() metadata = {"source": self.web_path, "title": title} return [Document(page_content=text, metadata=metadata)] def load_device( self, url_override: Optional[str] = None, include_guides: bool = True ) -> List[Document]: """Loads a device Args: url_override: A URL to override the default URL. include_guides: Whether to include guides linked to from the device. Defaults to True. Returns: """ documents = [] if url_override is None: url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id else: url = url_override res = requests.get(url) data = res.json() text = "\n".join( [ data[key] for key in ["title", "description", "contents_raw"] if key in data ] ).strip() metadata = {"source": self.web_path, "title": data["title"]} documents.append(Document(page_content=text, metadata=metadata)) if include_guides: """Load and return documents for each guide linked to from the device""" guide_urls = [guide["url"] for guide in data["guides"]] for guide_url in guide_urls: documents.append(IFixitLoader(guide_url).load()[0]) return documents def load_guide(self, url_override: Optional[str] = None) -> List[Document]: """Load a guide Args: url_override: A URL to override the default URL. Returns: List[Document] """ if url_override is None: url = IFIXIT_BASE_URL + "/guides/" + self.id else: url = url_override res = requests.get(url) if res.status_code != 200: raise ValueError( "Could not load guide: " + self.web_path + "\n" + res.json() ) data = res.json() doc_parts = ["# " + data["title"], data["introduction_raw"]] doc_parts.append("\n\n###Tools Required:") if len(data["tools"]) == 0: doc_parts.append("\n - None") else: for tool in data["tools"]: doc_parts.append("\n - " + tool["text"]) doc_parts.append("\n\n###Parts Required:") if len(data["parts"]) == 0: doc_parts.append("\n - None") else: for part in data["parts"]: doc_parts.append("\n - " + part["text"]) for row in data["steps"]: doc_parts.append( "\n\n## " + ( row["title"] if row["title"] != "" else "Step {}".format(row["orderby"]) ) ) for line in row["lines"]: doc_parts.append(line["text_raw"]) doc_parts.append(data["conclusion_raw"]) text = "\n".join(doc_parts) metadata = {"source": self.web_path, "title": data["title"]} return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~memory~chat_message_histories~dynamodb.py
from __future__ import annotations import logging from typing import TYPE_CHECKING, Dict, List, Optional from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import ( BaseMessage, _message_to_dict, messages_from_dict, messages_to_dict, ) if TYPE_CHECKING: from boto3.session import Session logger = logging.getLogger(__name__) class DynamoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table exists with name `table_name` Args: table_name: name of the DynamoDB table session_id: arbitrary key that is used to store the messages of a single chat session. endpoint_url: URL of the AWS endpoint to connect to. This argument is optional and useful for test purposes, like using Localstack. If you plan to use AWS cloud service, you normally don't have to worry about setting the endpoint_url. primary_key_name: name of the primary key of the DynamoDB table. This argument is optional, defaulting to "SessionId". key: an optional dictionary with a custom primary and secondary key. This argument is optional, but useful when using composite dynamodb keys, or isolating records based off of application details such as a user id. This may also contain global and local secondary index keys. kms_key_id: an optional AWS KMS Key ID, AWS KMS Key ARN, or AWS KMS Alias for client-side encryption """ def __init__( self, table_name: str, session_id: str, endpoint_url: Optional[str] = None, primary_key_name: str = "SessionId", key: Optional[Dict[str, str]] = None, boto3_session: Optional[Session] = None, kms_key_id: Optional[str] = None, ): if boto3_session: client = boto3_session.resource("dynamodb") else: try: import boto3 except ImportError as e: raise ImportError( "Unable to import boto3, please install with `pip install boto3`." ) from e if endpoint_url: client = boto3.resource("dynamodb", endpoint_url=endpoint_url) else: client = boto3.resource("dynamodb") self.table = client.Table(table_name) self.session_id = session_id self.key: Dict = key or {primary_key_name: session_id} if kms_key_id: try: from dynamodb_encryption_sdk.encrypted.table import EncryptedTable from dynamodb_encryption_sdk.identifiers import CryptoAction from dynamodb_encryption_sdk.material_providers.aws_kms import ( AwsKmsCryptographicMaterialsProvider, ) from dynamodb_encryption_sdk.structures import AttributeActions except ImportError as e: raise ImportError( "Unable to import dynamodb_encryption_sdk, please install with " "`pip install dynamodb-encryption-sdk`." ) from e actions = AttributeActions( default_action=CryptoAction.DO_NOTHING, attribute_actions={"History": CryptoAction.ENCRYPT_AND_SIGN}, ) aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=kms_key_id) self.table = EncryptedTable( table=self.table, materials_provider=aws_kms_cmp, attribute_actions=actions, auto_refresh_table_indexes=False, ) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e response = None try: response = self.table.get_item(Key=self.key) except ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": logger.warning("No record found with session id: %s", self.session_id) else: logger.error(error) if response and "Item" in response: items = response["Item"]["History"] else: items = [] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e messages = messages_to_dict(self.messages) _message = _message_to_dict(message) messages.append(_message) try: self.table.put_item(Item={**self.key, "History": messages}) except ClientError as err: logger.error(err) def clear(self) -> None: """Clear session memory from DynamoDB""" try: from botocore.exceptions import ClientError except ImportError as e: raise ImportError( "Unable to import botocore, please install with `pip install botocore`." ) from e try: self.table.delete_item(Key=self.key) except ClientError as err: logger.error(err)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~chat_models~test_qianfan_endpoint.py
"""Test Baidu Qianfan Chat Endpoint.""" from typing import Any from langchain.callbacks.manager import CallbackManager from langchain.chains.openai_functions import ( create_openai_fn_chain, ) from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, FunctionMessage, HumanMessage, LLMResult, ) from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler _FUNCTIONS: Any = [ { "name": "format_person_info", "description": ( "Output formatter. Should always be used to format your response to the" " user." ), "parameters": { "title": "Person", "description": "Identifying information about a person.", "type": "object", "properties": { "name": { "title": "Name", "description": "The person's name", "type": "string", }, "age": { "title": "Age", "description": "The person's age", "type": "integer", }, "fav_food": { "title": "Fav Food", "description": "The person's favorite food", "type": "string", }, }, "required": ["name", "age"], }, }, { "name": "get_current_temperature", "description": ("Used to get the location's temperature."), "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "city name", }, "unit": { "type": "string", "enum": ["centigrade", "Fahrenheit"], }, }, "required": ["location", "unit"], }, "responses": { "type": "object", "properties": { "temperature": { "type": "integer", "description": "city temperature", }, "unit": { "type": "string", "enum": ["centigrade", "Fahrenheit"], }, }, }, }, ] def test_default_call() -> None: """Test default model(`ERNIE-Bot`) call.""" chat = QianfanChatEndpoint() response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_model() -> None: """Test model kwarg works.""" chat = QianfanChatEndpoint(model="BLOOMZ-7B") response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_model_param() -> None: """Test model params works.""" chat = QianfanChatEndpoint() response = chat(model="BLOOMZ-7B", messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_endpoint() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") response = chat(messages=[HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_endpoint_param() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint() response = chat( messages=[ HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello") ] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_multiple_history() -> None: """Tests multiple history works.""" chat = QianfanChatEndpoint() response = chat( messages=[ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), ] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_stream() -> None: """Test that stream works.""" chat = QianfanChatEndpoint(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat( messages=[ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, callbacks=callback_manager, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) def test_multiple_messages() -> None: """Tests multiple messages works.""" chat = QianfanChatEndpoint() message = HumanMessage(content="Hi, how are you.") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content def test_functions_call_thoughts() -> None: chat = QianfanChatEndpoint(model="ERNIE-Bot") prompt_tmpl = "Use the given functions to answer following question: {input}" prompt_msgs = [ HumanMessagePromptTemplate.from_template(prompt_tmpl), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_openai_fn_chain( _FUNCTIONS, chat, prompt, output_parser=None, ) message = HumanMessage(content="What's the temperature in Shanghai today?") response = chain.generate([{"input": message}]) assert isinstance(response.generations[0][0], ChatGeneration) assert isinstance(response.generations[0][0].message, AIMessage) assert "function_call" in response.generations[0][0].message.additional_kwargs def test_functions_call() -> None: chat = QianfanChatEndpoint(model="ERNIE-Bot") prompt = ChatPromptTemplate( messages=[ HumanMessage(content="What's the temperature in Shanghai today?"), AIMessage( content="", additional_kwargs={ "function_call": { "name": "get_current_temperature", "thoughts": "i will use get_current_temperature " "to resolve the questions", "arguments": '{"location":"Shanghai","unit":"centigrade"}', } }, ), FunctionMessage( name="get_current_weather", content='{"temperature": "25", \ "unit": "摄氏度", "description": "晴朗"}', ), ] ) llm_chain = create_openai_fn_chain( _FUNCTIONS, chat, prompt, output_parser=None, ) resp = llm_chain.generate([{}]) assert isinstance(resp, LLMResult)
[ "function_call", "Who are you?", "get_current_weather", "{\"temperature\": \"25\", \"unit\": \"摄氏度\", \"description\": \"晴朗\"}", "Use the given functions to answer following question: {input}", "get_current_temperature", "to resolve the questions", "name", "Hello.", "Hello!", "How are you doing?", "Hi, how are you.", "i will use get_current_temperature ", "{\"location\":\"Shanghai\",\"unit\":\"centigrade\"}", "What's the temperature in Shanghai today?", "arguments", "Hello" ]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~data_anonymizer~presidio.py
from __future__ import annotations import json from pathlib import Path from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union import yaml from langchain_experimental.data_anonymizer.base import ( DEFAULT_DEANONYMIZER_MATCHING_STRATEGY, AnonymizerBase, ReversibleAnonymizerBase, ) from langchain_experimental.data_anonymizer.deanonymizer_mapping import ( DeanonymizerMapping, MappingDataType, create_anonymizer_mapping, ) from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import ( exact_matching_strategy, ) from langchain_experimental.data_anonymizer.faker_presidio_mapping import ( get_pseudoanonymizer_mapping, ) try: from presidio_analyzer import AnalyzerEngine from presidio_analyzer.nlp_engine import NlpEngineProvider except ImportError as e: raise ImportError( "Could not import presidio_analyzer, please install with " "`pip install presidio-analyzer`. You will also need to download a " "spaCy model to use the analyzer, e.g. " "`python -m spacy download en_core_web_lg`." ) from e try: from presidio_anonymizer import AnonymizerEngine from presidio_anonymizer.entities import OperatorConfig except ImportError as e: raise ImportError( "Could not import presidio_anonymizer, please install with " "`pip install presidio-anonymizer`." ) from e if TYPE_CHECKING: from presidio_analyzer import EntityRecognizer # Configuring Anonymizer for multiple languages # Detailed description and examples can be found here: # langchain/docs/extras/guides/privacy/multi_language_anonymization.ipynb DEFAULT_LANGUAGES_CONFIG = { # You can also use Stanza or transformers library. # See https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/ "nlp_engine_name": "spacy", "models": [ {"lang_code": "en", "model_name": "en_core_web_lg"}, # {"lang_code": "de", "model_name": "de_core_news_md"}, # {"lang_code": "es", "model_name": "es_core_news_md"}, # ... # List of available models: https://spacy.io/usage/models ], } class PresidioAnonymizerBase(AnonymizerBase): def __init__( self, analyzed_fields: Optional[List[str]] = None, operators: Optional[Dict[str, OperatorConfig]] = None, languages_config: Dict = DEFAULT_LANGUAGES_CONFIG, add_default_faker_operators: bool = True, faker_seed: Optional[int] = None, ): """ Args: analyzed_fields: List of fields to detect and then anonymize. Defaults to all entities supported by Microsoft Presidio. operators: Operators to use for anonymization. Operators allow for custom anonymization of detected PII. Learn more: https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/ languages_config: Configuration for the NLP engine. First language in the list will be used as the main language in self.anonymize(...) when no language is specified. Learn more: https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/ faker_seed: Seed used to initialize faker. Defaults to None, in which case faker will be seeded randomly and provide random values. """ self.analyzed_fields = ( analyzed_fields if analyzed_fields is not None else list(get_pseudoanonymizer_mapping().keys()) ) if add_default_faker_operators: self.operators = { field: OperatorConfig( operator_name="custom", params={"lambda": faker_function} ) for field, faker_function in get_pseudoanonymizer_mapping( faker_seed ).items() } else: self.operators = {} if operators: self.add_operators(operators) provider = NlpEngineProvider(nlp_configuration=languages_config) nlp_engine = provider.create_engine() self.supported_languages = list(nlp_engine.nlp.keys()) self._analyzer = AnalyzerEngine( supported_languages=self.supported_languages, nlp_engine=nlp_engine ) self._anonymizer = AnonymizerEngine() def add_recognizer(self, recognizer: EntityRecognizer) -> None: """Add a recognizer to the analyzer Args: recognizer: Recognizer to add to the analyzer. """ self._analyzer.registry.add_recognizer(recognizer) self.analyzed_fields.extend(recognizer.supported_entities) def add_operators(self, operators: Dict[str, OperatorConfig]) -> None: """Add operators to the anonymizer Args: operators: Operators to add to the anonymizer. """ self.operators.update(operators) class PresidioAnonymizer(PresidioAnonymizerBase): def _anonymize( self, text: str, language: Optional[str] = None, allow_list: Optional[List[str]] = None, ) -> str: """Anonymize text. Each PII entity is replaced with a fake value. Each time fake values will be different, as they are generated randomly. PresidioAnonymizer has no built-in memory - so it will not remember the effects of anonymizing previous texts. >>> anonymizer = PresidioAnonymizer() >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") 'My name is Noah Rhodes. Hi Noah Rhodes!' >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") 'My name is Brett Russell. Hi Brett Russell!' Args: text: text to anonymize language: language to use for analysis of PII If None, the first (main) language in the list of languages specified in the configuration will be used. """ if language is None: language = self.supported_languages[0] if language not in self.supported_languages: raise ValueError( f"Language '{language}' is not supported. " f"Supported languages are: {self.supported_languages}. " "Change your language configuration file to add more languages." ) # Check supported entities for given language # e.g. IT_FISCAL_CODE is not supported for English in Presidio by default # If you want to use it, you need to add a recognizer manually supported_entities = [] for recognizer in self._analyzer.get_recognizers(language): recognizer_dict = recognizer.to_dict() supported_entities.extend( [recognizer_dict["supported_entity"]] if "supported_entity" in recognizer_dict else recognizer_dict["supported_entities"] ) entities_to_analyze = list( set(supported_entities).intersection(set(self.analyzed_fields)) ) analyzer_results = self._analyzer.analyze( text, entities=entities_to_analyze, language=language, allow_list=allow_list, ) filtered_analyzer_results = ( self._anonymizer._remove_conflicts_and_get_text_manipulation_data( analyzer_results ) ) anonymizer_results = self._anonymizer.anonymize( text, analyzer_results=analyzer_results, operators=self.operators, ) anonymizer_mapping = create_anonymizer_mapping( text, filtered_analyzer_results, anonymizer_results, ) return exact_matching_strategy(text, anonymizer_mapping) class PresidioReversibleAnonymizer(PresidioAnonymizerBase, ReversibleAnonymizerBase): def __init__( self, analyzed_fields: Optional[List[str]] = None, operators: Optional[Dict[str, OperatorConfig]] = None, languages_config: Dict = DEFAULT_LANGUAGES_CONFIG, add_default_faker_operators: bool = True, faker_seed: Optional[int] = None, ): super().__init__( analyzed_fields, operators, languages_config, add_default_faker_operators, faker_seed, ) self._deanonymizer_mapping = DeanonymizerMapping() @property def deanonymizer_mapping(self) -> MappingDataType: """Return the deanonymizer mapping""" return self._deanonymizer_mapping.data @property def anonymizer_mapping(self) -> MappingDataType: """Return the anonymizer mapping This is just the reverse version of the deanonymizer mapping.""" return { key: {v: k for k, v in inner_dict.items()} for key, inner_dict in self.deanonymizer_mapping.items() } def _anonymize( self, text: str, language: Optional[str] = None, allow_list: Optional[List[str]] = None, ) -> str: """Anonymize text. Each PII entity is replaced with a fake value. Each time fake values will be different, as they are generated randomly. At the same time, we will create a mapping from each anonymized entity back to its original text value. Thanks to the built-in memory, all previously anonymised entities will be remembered and replaced by the same fake values: >>> anonymizer = PresidioReversibleAnonymizer() >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") 'My name is Noah Rhodes. Hi Noah Rhodes!' >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") 'My name is Noah Rhodes. Hi Noah Rhodes!' Args: text: text to anonymize language: language to use for analysis of PII If None, the first (main) language in the list of languages specified in the configuration will be used. """ if language is None: language = self.supported_languages[0] if language not in self.supported_languages: raise ValueError( f"Language '{language}' is not supported. " f"Supported languages are: {self.supported_languages}. " "Change your language configuration file to add more languages." ) # Check supported entities for given language # e.g. IT_FISCAL_CODE is not supported for English in Presidio by default # If you want to use it, you need to add a recognizer manually supported_entities = [] for recognizer in self._analyzer.get_recognizers(language): recognizer_dict = recognizer.to_dict() supported_entities.extend( [recognizer_dict["supported_entity"]] if "supported_entity" in recognizer_dict else recognizer_dict["supported_entities"] ) entities_to_analyze = list( set(supported_entities).intersection(set(self.analyzed_fields)) ) analyzer_results = self._analyzer.analyze( text, entities=entities_to_analyze, language=language, allow_list=allow_list, ) filtered_analyzer_results = ( self._anonymizer._remove_conflicts_and_get_text_manipulation_data( analyzer_results ) ) anonymizer_results = self._anonymizer.anonymize( text, analyzer_results=analyzer_results, operators=self.operators, ) new_deanonymizer_mapping = create_anonymizer_mapping( text, filtered_analyzer_results, anonymizer_results, is_reversed=True, ) self._deanonymizer_mapping.update(new_deanonymizer_mapping) return exact_matching_strategy(text, self.anonymizer_mapping) def _deanonymize( self, text_to_deanonymize: str, deanonymizer_matching_strategy: Callable[ [str, MappingDataType], str ] = DEFAULT_DEANONYMIZER_MATCHING_STRATEGY, ) -> str: """Deanonymize text. Each anonymized entity is replaced with its original value. This method exploits the mapping created during the anonymization process. Args: text_to_deanonymize: text to deanonymize deanonymizer_matching_strategy: function to use to match anonymized entities with their original values and replace them. """ if not self._deanonymizer_mapping: raise ValueError( "Deanonymizer mapping is empty.", "Please call anonymize() and anonymize some text first.", ) text_to_deanonymize = deanonymizer_matching_strategy( text_to_deanonymize, self.deanonymizer_mapping ) return text_to_deanonymize def reset_deanonymizer_mapping(self) -> None: """Reset the deanonymizer mapping""" self._deanonymizer_mapping = DeanonymizerMapping() def save_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None: """Save the deanonymizer mapping to a JSON or YAML file. Args: file_path: Path to file to save the mapping to. Example: .. code-block:: python anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json") """ save_path = Path(file_path) if save_path.suffix not in [".json", ".yaml"]: raise ValueError(f"{save_path} must have an extension of .json or .yaml") # Make sure parent directories exist save_path.parent.mkdir(parents=True, exist_ok=True) if save_path.suffix == ".json": with open(save_path, "w") as f: json.dump(self.deanonymizer_mapping, f, indent=2) elif save_path.suffix == ".yaml": with open(save_path, "w") as f: yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False) def load_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None: """Load the deanonymizer mapping from a JSON or YAML file. Args: file_path: Path to file to load the mapping from. Example: .. code-block:: python anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json") """ load_path = Path(file_path) if load_path.suffix not in [".json", ".yaml"]: raise ValueError(f"{load_path} must have an extension of .json or .yaml") if load_path.suffix == ".json": with open(load_path, "r") as f: loaded_mapping = json.load(f) elif load_path.suffix == ".yaml": with open(load_path, "r") as f: loaded_mapping = yaml.load(f, Loader=yaml.FullLoader) self._deanonymizer_mapping.update(loaded_mapping)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~storage~exceptions.py
from langchain.schema import LangChainException class InvalidKeyException(LangChainException): """Raised when a key is invalid; e.g., uses incorrect characters."""
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~indexes~prompts~knowledge_triplet_extraction.py
# flake8: noqa from langchain.graphs.networkx_graph import KG_TRIPLE_DELIMITER from langchain.prompts.prompt import PromptTemplate _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( "Ты сетевой интеллект, помогающий человеку отслеживать тройки знаний" " обо всех соответствующих людях, вещах, концепциях и т.д. и интегрировать" " их с твоими знаниями, хранящимися в твоих весах," " а также с теми, что хранятся в графе знаний." " Извлеки все тройки знаний из текста." " Тройка знаний - это предложение, которое содержит субъект, предикат" " и объект. Субъект - это описываемая сущность," " предикат - это свойство субъекта, которое описывается," " а объект - это значение свойства.\n\n" "ПРИМЕР\n" "Это штат в США. Это также номер 1 производитель золота в США.\n\n" f"Вывод: (Невада, является, штатом){KG_TRIPLE_DELIMITER}(Невада, находится в, США)" f"{KG_TRIPLE_DELIMITER}(Невада, является номером 1 производителем, золота)\n" "КОНЕЦ ПРИМЕРА\n\n" "ПРИМЕР\n" "Я иду в магазин.\n\n" "Вывод: НЕТ\n" "КОНЕЦ ПРИМЕРА\n\n" "ПРИМЕР\n" "О, ха. Я знаю, что Декарт любит ездить на антикварных скутерах и играть на мандолине.\n" f"Вывод: (Декарт, любит ездить на, антикварных скутерах){KG_TRIPLE_DELIMITER}(Декарт, играет на, мандолине)\n" "КОНЕЦ ПРИМЕРА\n\n" "ПРИМЕР\n" "{text}" "Вывод:" ) KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate( input_variables=["text"], template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE, )
[ "Ты сетевой интеллект, помогающий человеку отслеживать тройки знаний обо всех соответствующих людях, вещах, концепциях и т.д. и интегрировать их с твоими знаниями, хранящимися в твоих весах, а также с теми, что хранятся в графе знаний. Извлеки все тройки знаний из текста. Тройка знаний - это предложение, которое содержит субъект, предикат и объект. Субъект - это описываемая сущность, предикат - это свойство субъекта, которое описывается, а объект - это значение свойства.\n\nПРИМЕР\nЭто штат в США. Это также номер 1 производитель золота в США.\n\nВывод: (Невада, является, штатом)PLACEHOLDER(Невада, находится в, США)PLACEHOLDER(Невада, является номером 1 производителем, золота)\nКОНЕЦ ПРИМЕРА\n\nПРИМЕР\nЯ иду в магазин.\n\nВывод: НЕТ\nКОНЕЦ ПРИМЕРА\n\nПРИМЕР\nО, ха. Я знаю, что Декарт любит ездить на антикварных скутерах и играть на мандолине.\nВывод: (Декарт, любит ездить на, антикварных скутерах)PLACEHOLDER(Декарт, играет на, мандолине)\nКОНЕЦ ПРИМЕРА\n\nПРИМЕР\n{text}Вывод:" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chat_models~yandex.py
"""Wrapper around YandexGPT chat models.""" import logging from typing import Any, Dict, List, Optional, Tuple from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.utils import enforce_stop_tokens from langchain.llms.yandex import _BaseYandexGPT from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage, ) logger = logging.getLogger(__name__) def _parse_message(role: str, text: str) -> Dict: return {"role": role, "text": text} def _parse_chat_history(history: List[BaseMessage]) -> Tuple[List[Dict[str, str]], str]: """Parse a sequence of messages into history. Returns: A tuple of a list of parsed messages and an instruction message for the model. """ chat_history = [] instruction = "" for message in history: if isinstance(message, HumanMessage): chat_history.append(_parse_message("user", message.content)) if isinstance(message, AIMessage): chat_history.append(_parse_message("assistant", message.content)) if isinstance(message, SystemMessage): instruction = message.content return chat_history, instruction class ChatYandexGPT(_BaseYandexGPT, BaseChatModel): """Wrapper around YandexGPT large language models. There are two authentication options for the service account with the ``ai.languageModels.user`` role: - You can specify the token in a constructor parameter `iam_token` or in an environment variable `YC_IAM_TOKEN`. - You can specify the key in a constructor parameter `api_key` or in an environment variable `YC_API_KEY`. Example: .. code-block:: python from langchain.chat_models import ChatYandexGPT chat_model = ChatYandexGPT(iam_token="t1.9eu...") """ def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ try: import grpc from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value from yandex.cloud.ai.llm.v1alpha.llm_pb2 import GenerationOptions, Message from yandex.cloud.ai.llm.v1alpha.llm_service_pb2 import ChatRequest from yandex.cloud.ai.llm.v1alpha.llm_service_pb2_grpc import ( TextGenerationServiceStub, ) except ImportError as e: raise ImportError( "Please install YandexCloud SDK" " with `pip install yandexcloud`." ) from e if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) message_history, instruction = _parse_chat_history(messages) channel_credentials = grpc.ssl_channel_credentials() channel = grpc.secure_channel(self.url, channel_credentials) request = ChatRequest( model=self.model_name, generation_options=GenerationOptions( temperature=DoubleValue(value=self.temperature), max_tokens=Int64Value(value=self.max_tokens), ), instruction_text=instruction, messages=[Message(**message) for message in message_history], ) stub = TextGenerationServiceStub(channel) if self.iam_token: metadata = (("authorization", f"Bearer {self.iam_token}"),) else: metadata = (("authorization", f"Api-Key {self.api_key}"),) res = stub.Chat(request, metadata=metadata) text = list(res)[0].message.text text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """YandexGPT doesn't support async requests at the moment.""" )
[]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~sql~__init__.py
"""Chain for interacting with SQL Database.""" from langchain_experimental.sql.base import SQLDatabaseChain __all__ = ["SQLDatabaseChain"]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~sql_database.py
"""Keep here for backwards compatibility.""" from langchain.utilities.sql_database import SQLDatabase __all__ = ["SQLDatabase"]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~memory~motorhead_memory.py
from typing import Any, Dict, List, Optional import requests from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import get_buffer_string MANAGED_URL = "https://api.getmetal.io/v1/motorhead" # LOCAL_URL = "http://localhost:8080" class MotorheadMemory(BaseChatMemory): """Chat message memory backed by Motorhead service.""" url: str = MANAGED_URL timeout: int = 3000 memory_key: str = "history" session_id: str context: Optional[str] = None # Managed Params api_key: Optional[str] = None client_id: Optional[str] = None def __get_headers(self) -> Dict[str, str]: is_managed = self.url == MANAGED_URL headers = { "Content-Type": "application/json", } if is_managed and not (self.api_key and self.client_id): raise ValueError( """ You must provide an API key or a client ID to use the managed version of Motorhead. Visit https://getmetal.io for more information. """ ) if is_managed and self.api_key and self.client_id: headers["x-metal-api-key"] = self.api_key headers["x-metal-client-id"] = self.client_id return headers async def init(self) -> None: res = requests.get( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, headers=self.__get_headers(), ) res_data = res.json() res_data = res_data.get("data", res_data) # Handle Managed Version messages = res_data.get("messages", []) context = res_data.get("context", "NONE") for message in reversed(messages): if message["role"] == "AI": self.chat_memory.add_ai_message(message["content"]) else: self.chat_memory.add_user_message(message["content"]) if context and context != "NONE": self.context = context def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]: if self.return_messages: return {self.memory_key: self.chat_memory.messages} else: return {self.memory_key: get_buffer_string(self.chat_memory.messages)} @property def memory_variables(self) -> List[str]: return [self.memory_key] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: input_str, output_str = self._get_input_output(inputs, outputs) requests.post( f"{self.url}/sessions/{self.session_id}/memory", timeout=self.timeout, json={ "messages": [ {"role": "Human", "content": f"{input_str}"}, {"role": "AI", "content": f"{output_str}"}, ] }, headers=self.__get_headers(), ) super().save_context(inputs, outputs) def delete_session(self) -> None: """Delete a session""" requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
[ "PLACEHOLDER" ]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~unit_tests~agents~format_scratchpad~test_xml.py
from langchain.agents.format_scratchpad.xml import format_xml from langchain.schema.agent import AgentAction def test_single_agent_action_observation() -> None: # Arrange agent_action = AgentAction(tool="Tool1", tool_input="Input1", log="Log1") observation = "Observation1" intermediate_steps = [(agent_action, observation)] # Act result = format_xml(intermediate_steps) expected_result = """<tool>Tool1</tool><tool_input>Input1\ </tool_input><observation>Observation1</observation>""" # Assert assert result == expected_result def test_multiple_agent_actions_observations() -> None: # Arrange agent_action1 = AgentAction(tool="Tool1", tool_input="Input1", log="Log1") agent_action2 = AgentAction(tool="Tool2", tool_input="Input2", log="Log2") observation1 = "Observation1" observation2 = "Observation2" intermediate_steps = [(agent_action1, observation1), (agent_action2, observation2)] # Act result = format_xml(intermediate_steps) # Assert expected_result = """<tool>Tool1</tool><tool_input>Input1\ </tool_input><observation>Observation1</observation><tool>\ Tool2</tool><tool_input>Input2</tool_input><observation>\ Observation2</observation>""" assert result == expected_result def test_empty_list_agent_actions() -> None: result = format_xml([]) assert result == ""
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~nuclia.py
import json import uuid from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.tools.nuclia.tool import NucliaUnderstandingAPI class NucliaLoader(BaseLoader): """Load from any file type using `Nuclia Understanding API`.""" def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI): self.nua = nuclia_tool self.id = str(uuid.uuid4()) self.nua.run({"action": "push", "id": self.id, "path": path, "text": None}) def load(self) -> List[Document]: """Load documents.""" data = self.nua.run( {"action": "pull", "id": self.id, "path": None, "text": None} ) if not data: return [] obj = json.loads(data) text = obj["extracted_text"][0]["body"]["text"] print(text) metadata = { "file": obj["file_extracted_data"][0], "metadata": obj["field_metadata"][0], } return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~llms~test_writer.py
"""Test Writer API wrapper.""" from langchain.llms.writer import Writer def test_writer_call() -> None: """Test valid call to Writer.""" llm = Writer() output = llm("Say foo:") assert isinstance(output, str)
[]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~llms~rellm_decoder.py
"""Experimental implementation of RELLM wrapped LLM.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.utils import enforce_stop_tokens from langchain_experimental.pydantic_v1 import Field, root_validator if TYPE_CHECKING: import rellm from regex import Pattern as RegexPattern else: try: from regex import Pattern as RegexPattern except ImportError: pass def import_rellm() -> rellm: """Lazily import rellm.""" try: import rellm except ImportError: raise ImportError( "Could not import rellm python package. " "Please install it with `pip install rellm`." ) return rellm class RELLM(HuggingFacePipeline): """RELLM wrapped LLM using HuggingFace Pipeline API.""" regex: RegexPattern = Field(..., description="The structured format to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) # TODO: move away from `root_validator` since it is deprecated in pydantic v2 # and causes mypy type-checking failures (hence the `type: ignore`) @root_validator # type: ignore[call-overload] def check_rellm_installation(cls, values: dict) -> dict: import_rellm() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: rellm = import_rellm() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) text = rellm.complete_re( prompt, self.regex, tokenizer=pipeline.tokenizer, model=pipeline.model, max_new_tokens=self.max_new_tokens, ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~ainetwork~rule.py
import builtins import json from typing import Optional, Type from langchain.callbacks.manager import AsyncCallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.ainetwork.base import AINBaseTool, OperationType class RuleSchema(BaseModel): """Schema for owner operations.""" type: OperationType = Field(...) path: str = Field(..., description="Path on the blockchain where the rule applies") eval: Optional[str] = Field(None, description="eval string to determine permission") class AINRuleOps(AINBaseTool): """Tool for owner operations.""" name: str = "AINruleOps" description: str = """ Covers the write `rule` for the AINetwork Blockchain database. The SET type specifies write permissions using the `eval` variable as a JavaScript eval string. In order to AINvalueOps with SET at the path, the execution result of the `eval` string must be true. ## Path Rules 1. Allowed characters for directory: `[a-zA-Z_0-9]` 2. Use `$<key>` for template variables as directory. ## Eval String Special Variables - auth.addr: Address of the writer for the path - newData: New data for the path - data: Current data for the path - currentTime: Time in seconds - lastBlockNumber: Latest processed block number ## Eval String Functions - getValue(<path>) - getRule(<path>) - getOwner(<path>) - getFunction(<path>) - evalRule(<path>, <value to set>, auth, currentTime) - evalOwner(<path>, 'write_owner', auth) ## SET Example - type: SET - path: /apps/langchain_project_1/$from/$to/$img - eval: auth.addr===$from&&!getValue('/apps/image_db/'+$img) ## GET Example - type: GET - path: /apps/langchain_project_1 """ # noqa: E501 args_schema: Type[BaseModel] = RuleSchema async def _arun( self, type: OperationType, path: str, eval: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: from ain.types import ValueOnlyTransactionInput try: if type is OperationType.SET: if eval is None: raise ValueError("'eval' is required for SET operation.") res = await self.interface.db.ref(path).setRule( transactionInput=ValueOnlyTransactionInput( value={".rule": {"write": eval}} ) ) elif type is OperationType.GET: res = await self.interface.db.ref(path).getRule() else: raise ValueError(f"Unsupported 'type': {type}.") return json.dumps(res, ensure_ascii=False) except Exception as e: return f"{builtins.type(e).__name__}: {str(e)}"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~trello.py
from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env if TYPE_CHECKING: from trello import Board, Card, TrelloClient class TrelloLoader(BaseLoader): """Load cards from a `Trello` board.""" def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY") token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~symblai_nebula.py
import json import logging from typing import Any, Callable, Dict, List, Mapping, Optional import requests from requests import ConnectTimeout, ReadTimeout, RequestException from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env DEFAULT_NEBULA_SERVICE_URL = "https://api-nebula.symbl.ai" DEFAULT_NEBULA_SERVICE_PATH = "/v1/model/generate" logger = logging.getLogger(__name__) class Nebula(LLM): """Nebula Service models. To use, you should have the environment variable ``NEBULA_SERVICE_URL``, ``NEBULA_SERVICE_PATH`` and ``NEBULA_API_KEY`` set with your Nebula Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Nebula nebula = Nebula( nebula_service_url="NEBULA_SERVICE_URL", nebula_service_path="NEBULA_SERVICE_PATH", nebula_api_key="NEBULA_API_KEY", ) """ # noqa: E501 """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None """Optional""" nebula_service_url: Optional[str] = None nebula_service_path: Optional[str] = None nebula_api_key: Optional[str] = None model: Optional[str] = None max_new_tokens: Optional[int] = 128 temperature: Optional[float] = 0.6 top_p: Optional[float] = 0.95 repetition_penalty: Optional[float] = 1.0 top_k: Optional[int] = 0 penalty_alpha: Optional[float] = 0.0 stop_sequences: Optional[List[str]] = None max_retries: Optional[int] = 10 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nebula_service_url = get_from_dict_or_env( values, "nebula_service_url", "NEBULA_SERVICE_URL", DEFAULT_NEBULA_SERVICE_URL, ) nebula_service_path = get_from_dict_or_env( values, "nebula_service_path", "NEBULA_SERVICE_PATH", DEFAULT_NEBULA_SERVICE_PATH, ) nebula_api_key = get_from_dict_or_env( values, "nebula_api_key", "NEBULA_API_KEY", None ) if nebula_service_url.endswith("/"): nebula_service_url = nebula_service_url[:-1] if not nebula_service_path.startswith("/"): nebula_service_path = "/" + nebula_service_path values["nebula_service_url"] = nebula_service_url values["nebula_service_path"] = nebula_service_path values["nebula_api_key"] = nebula_api_key return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "repetition_penalty": self.repetition_penalty, "penalty_alpha": self.penalty_alpha, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { "nebula_service_url": self.nebula_service_url, "nebula_service_path": self.nebula_service_path, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "nebula" def _invocation_params( self, stop_sequences: Optional[List[str]], **kwargs: Any ) -> dict: params = self._default_params if self.stop_sequences is not None and stop_sequences is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop_sequences return {**params, **kwargs} @staticmethod def _process_response(response: Any, stop: Optional[List[str]]) -> str: text = response["output"]["text"] if stop: text = enforce_stop_tokens(text, stop) return text def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Nebula Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = nebula("Tell me a joke.") """ params = self._invocation_params(stop, **kwargs) prompt = prompt.strip() if "\n" in prompt: instruction = prompt.split("\n")[0] conversation = "\n".join(prompt.split("\n")[1:]) else: raise ValueError("Prompt must contain instruction and conversation.") response = completion_with_retry( self, instruction=instruction, conversation=conversation, params=params, url=f"{self.nebula_service_url}{self.nebula_service_path}", ) _stop = params.get("stop_sequences") return self._process_response(response, _stop) def make_request( self: Nebula, instruction: str, conversation: str, url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}", params: Optional[Dict] = None, ) -> Any: """Generate text from the model.""" params = params or {} headers = { "Content-Type": "application/json", "ApiKey": f"{self.nebula_api_key}", } body = { "prompt": { "instruction": instruction, "conversation": {"text": f"{conversation}"}, } } # add params to body for key, value in params.items(): body[key] = value # make request response = requests.post(url, headers=headers, json=body) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) return json.loads(response.text) def _create_retry_decorator(llm: Nebula) -> Callable[[Any], Any]: min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterward max_retries = llm.max_retries if llm.max_retries is not None else 3 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type((RequestException, ConnectTimeout, ReadTimeout)) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Nebula, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**_kwargs: Any) -> Any: return make_request(llm, **_kwargs) return _completion_with_retry(**kwargs)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~whatsapp_chat.py
import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" class WhatsAppChatLoader(BaseLoader): """Load `WhatsApp` messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,4} [\/.] \d{1,2} [\/.] \d{1,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[\s_](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ ignore_lines = ["This message was deleted", "<Media omitted>"] for line in lines: result = re.match( message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE ) if result: date, sender, text = result.groups() if text not in ignore_lines: text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~recursive_url_loader.py
from __future__ import annotations import asyncio import logging import re from typing import ( TYPE_CHECKING, Callable, Iterator, List, Optional, Sequence, Set, Union, ) import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils.html import extract_sub_links if TYPE_CHECKING: import aiohttp logger = logging.getLogger(__name__) def _metadata_extractor(raw_html: str, url: str) -> dict: """Extract metadata from raw html using BeautifulSoup.""" metadata = {"source": url} try: from bs4 import BeautifulSoup except ImportError: logger.warning( "The bs4 package is required for default metadata extraction. " "Please install it with `pip install bs4`." ) return metadata soup = BeautifulSoup(raw_html, "html.parser") if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata class RecursiveUrlLoader(BaseLoader): """Load all child links from a URL page. **Security Note**: This loader is a crawler that will start crawling at a given URL and then expand to crawl child links recursively. Web crawlers should generally NOT be deployed with network access to any internal servers. Control access to who can submit crawling requests and what network access the crawler has. While crawling, the crawler may encounter malicious URLs that would lead to a server-side request forgery (SSRF) attack. To mitigate risks, the crawler by default will only load URLs from the same domain as the start URL (controlled via prevent_outside named argument). This will mitigate the risk of SSRF attacks, but will not eliminate it. For example, if crawling a host which hosts several sites: https://some_host/alice_site/ https://some_host/bob_site/ A malicious URL on Alice's site could cause the crawler to make a malicious GET request to an endpoint on Bob's site. Both sites are hosted on the same host, so such a request would not be prevented by default. See https://python.langchain.com/docs/security """ def __init__( self, url: str, max_depth: Optional[int] = 2, use_async: Optional[bool] = None, extractor: Optional[Callable[[str], str]] = None, metadata_extractor: Optional[Callable[[str, str], str]] = None, exclude_dirs: Optional[Sequence[str]] = (), timeout: Optional[int] = 10, prevent_outside: bool = True, link_regex: Union[str, re.Pattern, None] = None, headers: Optional[dict] = None, check_response_status: bool = False, ) -> None: """Initialize with URL to crawl and any subdirectories to exclude. Args: url: The URL to crawl. max_depth: The max depth of the recursive loading. use_async: Whether to use asynchronous loading. If True, this function will not be lazy, but it will still work in the expected way, just not lazy. extractor: A function to extract document contents from raw html. When extract function returns an empty string, the document is ignored. metadata_extractor: A function to extract metadata from raw html and the source url (args in that order). Default extractor will attempt to use BeautifulSoup4 to extract the title, description and language of the page. exclude_dirs: A list of subdirectories to exclude. timeout: The timeout for the requests, in the unit of seconds. If None then connection will not timeout. prevent_outside: If True, prevent loading from urls which are not children of the root url. link_regex: Regex for extracting sub-links from the raw html of a web page. check_response_status: If True, check HTTP response status and skip URLs with error responses (400-599). """ self.url = url self.max_depth = max_depth if max_depth is not None else 2 self.use_async = use_async if use_async is not None else False self.extractor = extractor if extractor is not None else lambda x: x self.metadata_extractor = ( metadata_extractor if metadata_extractor is not None else _metadata_extractor ) self.exclude_dirs = exclude_dirs if exclude_dirs is not None else () if any(url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs): raise ValueError( f"Base url is included in exclude_dirs. Received base_url: {url} and " f"exclude_dirs: {self.exclude_dirs}" ) self.timeout = timeout self.prevent_outside = prevent_outside if prevent_outside is not None else True self.link_regex = link_regex self._lock = asyncio.Lock() if self.use_async else None self.headers = headers self.check_response_status = check_response_status def _get_child_links_recursive( self, url: str, visited: Set[str], *, depth: int = 0 ) -> Iterator[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl. visited: A set of visited URLs. depth: Current depth of recursion. Stop when depth >= max_depth. """ if depth >= self.max_depth: return # Get all links that can be accessed from the current URL visited.add(url) try: response = requests.get(url, timeout=self.timeout, headers=self.headers) if self.check_response_status and 400 <= response.status_code <= 599: raise ValueError(f"Received HTTP status {response.status_code}") except Exception as e: logger.warning( f"Unable to load from {url}. Received error {e} of type " f"{e.__class__.__name__}" ) return content = self.extractor(response.text) if content: yield Document( page_content=content, metadata=self.metadata_extractor(response.text, url), ) # Store the visited links and recursively visit the children sub_links = extract_sub_links( response.text, url, base_url=self.url, pattern=self.link_regex, prevent_outside=self.prevent_outside, exclude_prefixes=self.exclude_dirs, ) for link in sub_links: # Check all unvisited links if link not in visited: yield from self._get_child_links_recursive( link, visited, depth=depth + 1 ) async def _async_get_child_links_recursive( self, url: str, visited: Set[str], *, session: Optional[aiohttp.ClientSession] = None, depth: int = 0, ) -> List[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl. visited: A set of visited URLs. depth: To reach the current url, how many pages have been visited. """ try: import aiohttp except ImportError: raise ImportError( "The aiohttp package is required for the RecursiveUrlLoader. " "Please install it with `pip install aiohttp`." ) if depth >= self.max_depth: return [] # Disable SSL verification because websites may have invalid SSL certificates, # but won't cause any security issues for us. close_session = session is None session = ( session if session is not None else aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False), timeout=aiohttp.ClientTimeout(total=self.timeout), headers=self.headers, ) ) async with self._lock: # type: ignore visited.add(url) try: async with session.get(url) as response: text = await response.text() if self.check_response_status and 400 <= response.status <= 599: raise ValueError(f"Received HTTP status {response.status}") except (aiohttp.client_exceptions.InvalidURL, Exception) as e: logger.warning( f"Unable to load {url}. Received error {e} of type " f"{e.__class__.__name__}" ) if close_session: await session.close() return [] results = [] content = self.extractor(text) if content: results.append( Document( page_content=content, metadata=self.metadata_extractor(text, url), ) ) if depth < self.max_depth - 1: sub_links = extract_sub_links( text, url, base_url=self.url, pattern=self.link_regex, prevent_outside=self.prevent_outside, exclude_prefixes=self.exclude_dirs, ) # Recursively call the function to get the children of the children sub_tasks = [] async with self._lock: # type: ignore to_visit = set(sub_links).difference(visited) for link in to_visit: sub_tasks.append( self._async_get_child_links_recursive( link, visited, session=session, depth=depth + 1 ) ) next_results = await asyncio.gather(*sub_tasks) for sub_result in next_results: if isinstance(sub_result, Exception) or sub_result is None: # We don't want to stop the whole process, so just ignore it # Not standard html format or invalid url or 404 may cause this. continue # locking not fully working, temporary hack to ensure deduplication results += [r for r in sub_result if r not in results] if close_session: await session.close() return results def lazy_load(self) -> Iterator[Document]: """Lazy load web pages. When use_async is True, this function will not be lazy, but it will still work in the expected way, just not lazy.""" visited: Set[str] = set() if self.use_async: results = asyncio.run( self._async_get_child_links_recursive(self.url, visited) ) return iter(results or []) else: return self._get_child_links_recursive(self.url, visited) def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
[]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~autonomous_agents~hugginggpt~repsonse_generator.py
from typing import Any, List, Optional from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.chains import LLMChain from langchain.prompts import PromptTemplate class ResponseGenerationChain(LLMChain): """Chain to execute tasks.""" @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: execution_template = ( "AI-ассистент проанализировал ввод пользователя," " разбил его на несколько задач" "и выполнил их. Результаты следующие:\n" "{task_execution}" "\nПожалуйста, суммируй результаты и сформулируй ответ." ) prompt = PromptTemplate( template=execution_template, input_variables=["task_execution"], ) return cls(prompt=prompt, llm=llm, verbose=verbose) class ResponseGenerator: def __init__(self, llm_chain: LLMChain, stop: Optional[List] = None): self.llm_chain = llm_chain self.stop = stop def generate(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> str: """Given input, decided what to do.""" llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) return llm_response def load_response_generator(llm: BaseLanguageModel) -> ResponseGenerator: llm_chain = ResponseGenerationChain.from_llm(llm) return ResponseGenerator( llm_chain=llm_chain, )
[ "task_execution", "AI-ассистент проанализировал ввод пользователя, разбил его на несколько задачи выполнил их. Результаты следующие:\n{task_execution}\nПожалуйста, суммируй результаты и сформулируй ответ." ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~toml.py
import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class TomlLoader(BaseLoader): """Load `TOML` files. It can load a single source file or several files in a single directory. """ def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}")
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~assemblyai.py
from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import assemblyai class TranscriptFormat(Enum): """Transcript format to use for the document loader.""" TEXT = "text" """One document with the transcription text""" SENTENCES = "sentences" """Multiple documents, splits the transcription by each sentence""" PARAGRAPHS = "paragraphs" """Multiple documents, splits the transcription by each paragraph""" SUBTITLES_SRT = "subtitles_srt" """One document with the transcript exported in SRT subtitles format""" SUBTITLES_VTT = "subtitles_vtt" """One document with the transcript exported in VTT subtitles format""" class AssemblyAIAudioTranscriptLoader(BaseLoader): """ Loader for AssemblyAI audio transcripts. It uses the AssemblyAI API to transcribe audio files and loads the transcribed text into one or more Documents, depending on the specified format. To use, you should have the ``assemblyai`` python package installed, and the environment variable ``ASSEMBLYAI_API_KEY`` set with your API key. Alternatively, the API key can also be passed as an argument. Audio files can be specified via an URL or a local file path. """ def __init__( self, file_path: str, *, transcript_format: TranscriptFormat = TranscriptFormat.TEXT, config: Optional[assemblyai.TranscriptionConfig] = None, api_key: Optional[str] = None, ): """ Initializes the AssemblyAI AudioTranscriptLoader. Args: file_path: An URL or a local file path. transcript_format: Transcript format to use. See class ``TranscriptFormat`` for more info. config: Transcription options and features. If ``None`` is given, the Transcriber's default configuration will be used. api_key: AssemblyAI API key. """ try: import assemblyai except ImportError: raise ImportError( "Could not import assemblyai python package. " "Please install it with `pip install assemblyai`." ) if api_key is not None: assemblyai.settings.api_key = api_key self.file_path = file_path self.transcript_format = transcript_format self.transcriber = assemblyai.Transcriber(config=config) def load(self) -> List[Document]: """Transcribes the audio file and loads the transcript into documents. It uses the AssemblyAI API to transcribe the audio file and blocks until the transcription is finished. """ transcript = self.transcriber.transcribe(self.file_path) # This will raise a ValueError if no API key is set. if transcript.error: raise ValueError(f"Could not transcribe file: {transcript.error}") if self.transcript_format == TranscriptFormat.TEXT: return [ Document( page_content=transcript.text, metadata=transcript.json_response ) ] elif self.transcript_format == TranscriptFormat.SENTENCES: sentences = transcript.get_sentences() return [ Document(page_content=s.text, metadata=s.dict(exclude={"text"})) for s in sentences ] elif self.transcript_format == TranscriptFormat.PARAGRAPHS: paragraphs = transcript.get_paragraphs() return [ Document(page_content=p.text, metadata=p.dict(exclude={"text"})) for p in paragraphs ] elif self.transcript_format == TranscriptFormat.SUBTITLES_SRT: return [Document(page_content=transcript.export_subtitles_srt())] elif self.transcript_format == TranscriptFormat.SUBTITLES_VTT: return [Document(page_content=transcript.export_subtitles_vtt())] else: raise ValueError("Unknown transcript format.")
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~vectorstores~test_weaviate.py
"""Test Weaviate functionality.""" import logging import os import uuid from typing import Generator, Union import pytest from langchain.docstore.document import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores.weaviate import Weaviate from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings logging.basicConfig(level=logging.DEBUG) """ cd tests/integration_tests/vectorstores/docker-compose docker compose -f weaviate.yml up """ class TestWeaviate: @classmethod def setup_class(cls) -> None: if not os.getenv("OPENAI_API_KEY"): raise ValueError("OPENAI_API_KEY environment variable is not set") @pytest.fixture(scope="class", autouse=True) def weaviate_url(self) -> Union[str, Generator[str, None, None]]: """Return the weaviate url.""" from weaviate import Client url = "http://localhost:8080" yield url # Clear the test index client = Client(url) client.schema.delete_all() @pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_without_metadata( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search without metadata.""" texts = ["foo", "bar", "baz"] docsearch = Weaviate.from_texts( texts, embedding_openai, weaviate_url=weaviate_url, ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] @pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_with_metadata( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search with metadata.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": 0})] @pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_with_metadata_and_filter( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search with metadata.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) output = docsearch.similarity_search( "foo", k=2, where_filter={"path": ["page"], "operator": "Equal", "valueNumber": 0}, ) assert output == [Document(page_content="foo", metadata={"page": 0})] @pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_with_metadata_and_additional( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search with metadata and additional.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) output = docsearch.similarity_search( "foo", k=1, additional=["certainty"], ) assert output == [ Document( page_content="foo", metadata={"page": 0, "_additional": {"certainty": 1}}, ) ] @pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_with_uuids( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search with uuids.""" texts = ["foo", "bar", "baz"] # Weaviate replaces the object if the UUID already exists uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, "same-name") for text in texts] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url, uuids=uuids, ) output = docsearch.similarity_search("foo", k=2) assert len(output) == 1 @pytest.mark.vcr(ignore_localhost=True) def test_max_marginal_relevance_search( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) # if lambda=1 the algorithm should be equivalent to standard ranking standard_ranking = docsearch.similarity_search("foo", k=2) output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=1.0 ) assert output == standard_ranking # if lambda=0 the algorithm should favour maximal diversity output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=0.0 ) assert output == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="bar", metadata={"page": 1}), ] @pytest.mark.vcr(ignore_localhost=True) def test_max_marginal_relevance_search_by_vector( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and MRR search by vector.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) foo_embedding = embedding_openai.embed_query("foo") # if lambda=1 the algorithm should be equivalent to standard ranking standard_ranking = docsearch.similarity_search("foo", k=2) output = docsearch.max_marginal_relevance_search_by_vector( foo_embedding, k=2, fetch_k=3, lambda_mult=1.0 ) assert output == standard_ranking # if lambda=0 the algorithm should favour maximal diversity output = docsearch.max_marginal_relevance_search_by_vector( foo_embedding, k=2, fetch_k=3, lambda_mult=0.0 ) assert output == [ Document(page_content="foo", metadata={"page": 0}), Document(page_content="bar", metadata={"page": 1}), ] @pytest.mark.vcr(ignore_localhost=True) def test_max_marginal_relevance_search_with_filter( self, weaviate_url: str, embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and MRR search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Weaviate.from_texts( texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url ) where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0} # if lambda=1 the algorithm should be equivalent to standard ranking standard_ranking = docsearch.similarity_search( "foo", k=2, where_filter=where_filter ) output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter ) assert output == standard_ranking # if lambda=0 the algorithm should favour maximal diversity output = docsearch.max_marginal_relevance_search( "foo", k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter ) assert output == [ Document(page_content="foo", metadata={"page": 0}), ] def test_add_texts_with_given_embedding(self, weaviate_url: str) -> None: texts = ["foo", "bar", "baz"] embedding = FakeEmbeddings() docsearch = Weaviate.from_texts( texts, embedding=embedding, weaviate_url=weaviate_url ) docsearch.add_texts(["foo"]) output = docsearch.similarity_search_by_vector( embedding.embed_query("foo"), k=2 ) assert output == [ Document(page_content="foo"), Document(page_content="foo"), ] def test_add_texts_with_given_uuids(self, weaviate_url: str) -> None: texts = ["foo", "bar", "baz"] embedding = FakeEmbeddings() uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts] docsearch = Weaviate.from_texts( texts, embedding=embedding, weaviate_url=weaviate_url, uuids=uuids, ) # Weaviate replaces the object if the UUID already exists docsearch.add_texts(["foo"], uuids=[uuids[0]]) output = docsearch.similarity_search_by_vector( embedding.embed_query("foo"), k=2 ) assert output[0] == Document(page_content="foo") assert output[1] != Document(page_content="foo")
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~databricks.py
import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.pydantic_v1 import ( BaseModel, Extra, Field, PrivateAttr, root_validator, validator, ) __all__ = ["Databricks"] class _DatabricksClientBase(BaseModel, ABC): """A base JSON API client that talks to Databricks.""" api_url: str api_token: str def post_raw(self, request: Any) -> Any: headers = {"Authorization": f"Bearer {self.api_token}"} response = requests.post(self.api_url, headers=headers, json=request) # TODO: error handling and automatic retries if not response.ok: raise ValueError(f"HTTP {response.status_code} error: {response.text}") return response.json() @abstractmethod def post(self, request: Any) -> Any: ... class _DatabricksServingEndpointClient(_DatabricksClientBase): """An API client that talks to a Databricks serving endpoint.""" host: str endpoint_name: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] endpoint_name = values["endpoint_name"] api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations" values["api_url"] = api_url return values def post(self, request: Any) -> Any: # See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html wrapped_request = {"dataframe_records": [request]} response = self.post_raw(wrapped_request)["predictions"] # For a single-record query, the result is not a list. if isinstance(response, list): response = response[0] return response class _DatabricksClusterDriverProxyClient(_DatabricksClientBase): """An API client that talks to a Databricks cluster driver proxy app.""" host: str cluster_id: str cluster_driver_port: str @root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] cluster_id = values["cluster_id"] port = values["cluster_driver_port"] api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}" values["api_url"] = api_url return values def post(self, request: Any) -> Any: return self.post_raw(request) def get_repl_context() -> Any: """Gets the notebook REPL context if running inside a Databricks notebook. Returns None otherwise. """ try: from dbruntime.databricks_repl_context import get_context return get_context() except ImportError: raise ValueError( "Cannot access dbruntime, not running inside a Databricks notebook." ) def get_default_host() -> str: """Gets the default Databricks workspace hostname. Raises an error if the hostname cannot be automatically determined. """ host = os.getenv("DATABRICKS_HOST") if not host: try: host = get_repl_context().browserHostName if not host: raise ValueError("context doesn't contain browserHostName.") except Exception as e: raise ValueError( "host was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_HOST'. Received error: {e}" ) # TODO: support Databricks CLI profile host = host.lstrip("https://").lstrip("http://").rstrip("/") return host def get_default_api_token() -> str: """Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if api_token := os.getenv("DATABRICKS_TOKEN"): return api_token try: api_token = get_repl_context().apiToken if not api_token: raise ValueError("context doesn't contain apiToken.") except Exception as e: raise ValueError( "api_token was not set and cannot be automatically inferred. Set " f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}" ) # TODO: support Databricks CLI profile return api_token class Databricks(LLM): """Databricks serving endpoint or a cluster driver proxy app for LLM. It supports two endpoint types: * **Serving endpoint** (recommended for both production and development). We assume that an LLM was registered and deployed to a serving endpoint. To wrap it as an LLM you must have "Can Query" permission to the endpoint. Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and ``cluster_driver_port``. The expected model signature is: * inputs:: [{"name": "prompt", "type": "string"}, {"name": "stop", "type": "list[string]"}] * outputs: ``[{"type": "string"}]`` * **Cluster driver proxy app** (recommended for interactive development). One can load an LLM on a Databricks interactive cluster and start a local HTTP server on the driver node to serve the model at ``/`` using HTTP POST method with JSON input/output. Please use a port number between ``[3000, 8000]`` and let the server listen to the driver IP address or simply ``0.0.0.0`` instead of localhost only. To wrap it as an LLM you must have "Can Attach To" permission to the cluster. Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``. The expected server schema (using JSON schema) is: * inputs:: {"type": "object", "properties": { "prompt": {"type": "string"}, "stop": {"type": "array", "items": {"type": "string"}}}, "required": ["prompt"]}` * outputs: ``{"type": "string"}`` If the endpoint model signature is different or you want to set extra params, you can use `transform_input_fn` and `transform_output_fn` to apply necessary transformations before and after the query. """ host: str = Field(default_factory=get_default_host) """Databricks workspace hostname. If not provided, the default value is determined by * the ``DATABRICKS_HOST`` environment variable if present, or * the hostname of the current Databricks workspace if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ api_token: str = Field(default_factory=get_default_api_token) """Databricks personal access token. If not provided, the default value is determined by * the ``DATABRICKS_TOKEN`` environment variable if present, or * an automatically generated temporary token if running inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode. """ endpoint_name: Optional[str] = None """Name of the model serving endpoint. You must specify the endpoint name to connect to a model serving endpoint. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_id: Optional[str] = None """ID of the cluster if connecting to a cluster driver proxy app. If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs inside a Databricks notebook attached to an interactive cluster in "single user" or "no isolation shared" mode, the current cluster ID is used as default. You must not set both ``endpoint_name`` and ``cluster_id``. """ cluster_driver_port: Optional[str] = None """The port number used by the HTTP server running on the cluster driver node. The server should listen on the driver IP address or simply ``0.0.0.0`` to connect. We recommend the server using a port number between ``[3000, 8000]``. """ model_kwargs: Optional[Dict[str, Any]] = None """Extra parameters to pass to the endpoint.""" transform_input_fn: Optional[Callable] = None """A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible request object that the endpoint accepts. For example, you can apply a prompt template to the input prompt. """ transform_output_fn: Optional[Callable[..., str]] = None """A function that transforms the output from the endpoint to the generated text. """ _client: _DatabricksClientBase = PrivateAttr() class Config: extra = Extra.forbid underscore_attrs_are_private = True @validator("cluster_id", always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_id.") elif values["endpoint_name"]: return None elif v: return v else: try: if v := get_repl_context().clusterId: return v raise ValueError("Context doesn't contain clusterId.") except Exception as e: raise ValueError( "Neither endpoint_name nor cluster_id was set. " "And the cluster_id cannot be automatically determined. Received" f" error: {e}" ) @validator("cluster_driver_port", always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_driver_port.") elif values["endpoint_name"]: return None elif v is None: raise ValueError( "Must set cluster_driver_port to connect to a cluster driver." ) elif int(v) <= 0: raise ValueError(f"Invalid cluster_driver_port: {v}") else: return v @validator("model_kwargs", always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: if v: assert "prompt" not in v, "model_kwargs must not contain key 'prompt'" assert "stop" not in v, "model_kwargs must not contain key 'stop'" return v def __init__(self, **data: Any): super().__init__(**data) if self.endpoint_name: self._client = _DatabricksServingEndpointClient( host=self.host, api_token=self.api_token, endpoint_name=self.endpoint_name, ) elif self.cluster_id and self.cluster_driver_port: self._client = _DatabricksClusterDriverProxyClient( host=self.host, api_token=self.api_token, cluster_id=self.cluster_id, cluster_driver_port=self.cluster_driver_port, ) else: raise ValueError( "Must specify either endpoint_name or cluster_id/cluster_driver_port." ) @property def _llm_type(self) -> str: """Return type of llm.""" return "databricks" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Queries the LLM endpoint with the given prompt and stop sequence.""" # TODO: support callbacks request = {"prompt": prompt, "stop": stop} request.update(kwargs) if self.model_kwargs: request.update(self.model_kwargs) if self.transform_input_fn: request = self.transform_input_fn(**request) response = self._client.post(request) if self.transform_output_fn: response = self.transform_output_fn(response) return response
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~stripe.py
import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } class StripeLoader(BaseLoader): """Load from `Stripe` API.""" def __init__(self, resource: str, access_token: Optional[str] = None) -> None: """Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token. """ self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) def load(self) -> List[Document]: return self._get_resource()
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~gigachat.py
from __future__ import annotations import logging from functools import cached_property from typing import Any, AsyncIterator, Dict, Iterator, List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM from langchain.load.serializable import Serializable from langchain.pydantic_v1 import root_validator from langchain.schema.output import Generation, GenerationChunk, LLMResult logger = logging.getLogger(__name__) class _BaseGigaChat(Serializable): base_url: Optional[str] = None """ Base API URL """ auth_url: Optional[str] = None """ Auth URL """ credentials: Optional[str] = None """ Auth Token """ scope: Optional[str] = None """ Permission scope for access token """ access_token: Optional[str] = None """ Access token for GigaChat """ model: Optional[str] = None """Model name to use.""" user: Optional[str] = None """ Username for authenticate """ password: Optional[str] = None """ Password for authenticate """ timeout: Optional[float] = None """ Timeout for request """ verify_ssl_certs: Optional[bool] = None """ Check certificates for all requests """ ca_bundle_file: Optional[str] = None cert_file: Optional[str] = None key_file: Optional[str] = None key_file_password: Optional[str] = None # Support for connection to GigaChat through SSL certificates profanity: bool = True """ Check for profanity """ streaming: bool = False """ Whether to stream the results or not. """ temperature: Optional[float] = None """What sampling temperature to use.""" max_tokens: Optional[int] = None """ Maximum number of tokens to generate """ @property def _llm_type(self) -> str: return "giga-chat-model" @property def lc_secrets(self) -> Dict[str, str]: return { "credentials": "GIGACHAT_CREDENTIALS", "access_token": "GIGACHAT_ACCESS_TOKEN", "password": "GIGACHAT_PASSWORD", "key_file_password": "GIGACHAT_KEY_FILE_PASSWORD", } @property def lc_serializable(self) -> bool: return True @cached_property def _client(self) -> Any: """Returns GigaChat API client""" import gigachat return gigachat.GigaChat( base_url=self.base_url, auth_url=self.auth_url, credentials=self.credentials, scope=self.scope, access_token=self.access_token, model=self.model, user=self.user, password=self.password, timeout=self.timeout, verify_ssl_certs=self.verify_ssl_certs, ca_bundle_file=self.ca_bundle_file, cert_file=self.cert_file, key_file=self.key_file, key_file_password=self.key_file_password, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate authenticate data in environment and python package is installed.""" try: import gigachat # noqa: F401 except ImportError: raise ImportError( "Could not import gigachat python package. " "Please install it with `pip install gigachat`." ) return values @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "temperature": self.temperature, "model": self.model, "profanity": self.profanity, "streaming": self.streaming, "max_tokens": self.max_tokens, } class GigaChat(_BaseGigaChat, BaseLLM): """`GigaChat` large language models API. To use, you should pass login and password to access GigaChat API or use token. Example: .. code-block:: python from langchain.llms import GigaChat giga = GigaChat(credentials=..., verify_ssl_certs=False) """ def _build_payload(self, messages: List[str]) -> Dict[str, Any]: payload: Dict[str, Any] = { "messages": [{"role": "user", "content": m} for m in messages], "profanity_check": self.profanity, } if self.temperature is not None: payload["temperature"] = self.temperature if self.max_tokens is not None: payload["max_tokens"] = self.max_tokens if self.model: payload["model"] = self.model if self.verbose: logger.info("Giga request: %s", payload) return payload def _create_llm_result(self, response: Any) -> LLMResult: generations = [] for res in response.choices: finish_reason = res.finish_reason gen = Generation( text=res.message.content, generation_info={"finish_reason": finish_reason}, ) generations.append([gen]) if finish_reason != "stop": logger.warning( "Giga generation stopped with reason: %s", finish_reason, ) if self.verbose: logger.info("Giga response: %s", res.message.content) token_usage = response.usage llm_output = {"token_usage": token_usage, "model_name": response.model} return LLMResult(generations=generations, llm_output=llm_output) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._stream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = self._client.chat(payload) return self._create_llm_result(response) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, stream: Optional[bool] = None, **kwargs: Any, ) -> LLMResult: should_stream = stream if stream is not None else self.streaming if should_stream: generation: Optional[GenerationChunk] = None stream_iter = self._astream( prompts[0], stop=stop, run_manager=run_manager, **kwargs ) async for chunk in stream_iter: if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) payload = self._build_payload(prompts) response = await self._client.achat(payload) return self._create_llm_result(response) def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: payload = self._build_payload([prompt]) for chunk in self._client.stream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: run_manager.on_llm_new_token(content) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: payload = self._build_payload([prompt]) async for chunk in self._client.astream(payload): if chunk.choices: content = chunk.choices[0].delta.content yield GenerationChunk(text=content) if run_manager: await run_manager.on_llm_new_token(content) def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" return round(len(text) / 4.6)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~psychic.py
from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class PsychicLoader(BaseLoader): """Load from `Psychic.dev`.""" def __init__( self, api_key: str, account_id: str, connector_id: Optional[str] = None ): """Initialize with API key, connector id, and account id. Args: api_key: The Psychic API key. account_id: The Psychic account id. connector_id: The Psychic connector id. """ try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.account_id = account_id def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents( connector_id=self.connector_id, account_id=self.account_id ) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs.documents ]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~modern_treasury.py
import json import urllib.request from base64 import b64encode from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_value MODERN_TREASURY_ENDPOINTS = { "payment_orders": "https://app.moderntreasury.com/api/payment_orders", "expected_payments": "https://app.moderntreasury.com/api/expected_payments", "returns": "https://app.moderntreasury.com/api/returns", "incoming_payment_details": "https://app.moderntreasury.com/api/\ incoming_payment_details", "counterparties": "https://app.moderntreasury.com/api/counterparties", "internal_accounts": "https://app.moderntreasury.com/api/internal_accounts", "external_accounts": "https://app.moderntreasury.com/api/external_accounts", "transactions": "https://app.moderntreasury.com/api/transactions", "ledgers": "https://app.moderntreasury.com/api/ledgers", "ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts", "ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions", "events": "https://app.moderntreasury.com/api/events", "invoices": "https://app.moderntreasury.com/api/invoices", } class ModernTreasuryLoader(BaseLoader): """Load from `Modern Treasury`.""" def __init__( self, resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None, ) -> None: """ Args: resource: The Modern Treasury resource to load. organization_id: The Modern Treasury organization ID. It can also be specified via the environment variable "MODERN_TREASURY_ORGANIZATION_ID". api_key: The Modern Treasury API key. It can also be specified via the environment variable "MODERN_TREASURY_API_KEY". """ self.resource = resource organization_id = organization_id or get_from_env( "organization_id", "MODERN_TREASURY_ORGANIZATION_ID" ) api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY") credentials = f"{organization_id}:{api_key}".encode("utf-8") basic_auth_token = b64encode(credentials).decode("utf-8") self.headers = {"Authorization": f"Basic {basic_auth_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_value(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) def load(self) -> List[Document]: return self._get_resource()
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chains~graph_qa~cypher.py
"""Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.graph_store import GraphStore from langchain.pydantic_v1 import Field from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel INTERMEDIATE_STEPS_KEY = "intermediate_steps" def extract_cypher(text: str) -> str: """Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text def construct_schema( structured_schema: Dict[str, Any], include_types: List[str], exclude_types: List[str], ) -> str: """Filter the schema based on included or excluded types""" def filter_func(x: str) -> bool: return x in include_types if include_types else x not in exclude_types filtered_schema = { "node_props": { k: v for k, v in structured_schema.get("node_props", {}).items() if filter_func(k) }, "rel_props": { k: v for k, v in structured_schema.get("rel_props", {}).items() if filter_func(k) }, "relationships": [ r for r in structured_schema.get("relationships", []) if all(filter_func(r[t]) for t in ["start", "end", "type"]) ], } return ( f"Node properties are the following: \n {filtered_schema['node_props']}\n" f"Relationships properties are the following: \n {filtered_schema['rel_props']}" "\nRelationships are: \n" + str( [ f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in filtered_schema["relationships"] ] ) ) class GraphCypherQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: GraphStore = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain graph_schema: str input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" cypher_query_corrector: Optional[CypherQueryCorrector] = None """Optional cypher validation tool""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" @classmethod def from_llm( cls, llm: Optional[BaseLanguageModel] = None, *, qa_prompt: Optional[BasePromptTemplate] = None, cypher_prompt: Optional[BasePromptTemplate] = None, cypher_llm: Optional[BaseLanguageModel] = None, qa_llm: Optional[BaseLanguageModel] = None, exclude_types: List[str] = [], include_types: List[str] = [], validate_cypher: bool = False, qa_llm_kwargs: Optional[Dict[str, Any]] = None, cypher_llm_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> GraphCypherQAChain: """Initialize from LLM.""" if not cypher_llm and not llm: raise ValueError("Either `llm` or `cypher_llm` parameters must be provided") if not qa_llm and not llm: raise ValueError("Either `llm` or `qa_llm` parameters must be provided") if cypher_llm and qa_llm and llm: raise ValueError( "You can specify up to two of 'cypher_llm', 'qa_llm'" ", and 'llm', but not all three simultaneously." ) if cypher_prompt and cypher_llm_kwargs: raise ValueError( "Specifying cypher_prompt and cypher_llm_kwargs together is" " not allowed. Please pass prompt via cypher_llm_kwargs." ) if qa_prompt and qa_llm_kwargs: raise ValueError( "Specifying qa_prompt and qa_llm_kwargs together is" " not allowed. Please pass prompt via qa_llm_kwargs." ) use_qa_llm_kwargs = qa_llm_kwargs if qa_llm_kwargs is not None else {} use_cypher_llm_kwargs = ( cypher_llm_kwargs if cypher_llm_kwargs is not None else {} ) if "prompt" not in use_qa_llm_kwargs: use_qa_llm_kwargs["prompt"] = ( qa_prompt if qa_prompt is not None else CYPHER_QA_PROMPT ) if "prompt" not in use_cypher_llm_kwargs: use_cypher_llm_kwargs["prompt"] = ( cypher_prompt if cypher_prompt is not None else CYPHER_GENERATION_PROMPT ) qa_chain = LLMChain(llm=qa_llm or llm, **use_qa_llm_kwargs) cypher_generation_chain = LLMChain( llm=cypher_llm or llm, **use_cypher_llm_kwargs ) if exclude_types and include_types: raise ValueError( "Either `exclude_types` or `include_types` " "can be provided, but not both" ) graph_schema = construct_schema( kwargs["graph"].get_structured_schema, include_types, exclude_types ) cypher_query_corrector = None if validate_cypher: corrector_schema = [ Schema(el["start"], el["type"], el["end"]) for el in kwargs["graph"].structured_schema.get("relationships") ] cypher_query_corrector = CypherQueryCorrector(corrector_schema) return cls( graph_schema=graph_schema, qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, cypher_query_corrector=cypher_query_corrector, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) # Correct Cypher query if enabled if self.cypher_query_corrector: generated_cypher = self.cypher_query_corrector(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results # Generated Cypher be null if query corrector identifies invalid schema if generated_cypher: context = self.graph.query(generated_cypher)[: self.top_k] else: context = [] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~amazon_api_gateway.py
from typing import Any, Dict, List, Mapping, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra class ContentHandlerAmazonAPIGateway: """Adapter to prepare the inputs from Langchain to a format that LLM model expects. It also provides helper function to extract the generated text from the model response.""" @classmethod def transform_input( cls, prompt: str, model_kwargs: Dict[str, Any] ) -> Dict[str, Any]: return {"inputs": prompt, "parameters": model_kwargs} @classmethod def transform_output(cls, response: Any) -> str: return response.json()[0]["generated_text"] class AmazonAPIGateway(LLM): """Amazon API Gateway to access LLM models hosted on AWS.""" api_url: str """API Gateway URL""" headers: Optional[Dict] = None """API Gateway HTTP Headers to send, e.g. for authentication""" model_kwargs: Optional[Dict] = None """Keyword arguments to pass to the model.""" content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway() """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"api_url": self.api_url, "headers": self.headers}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "amazon_api_gateway" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Amazon API Gateway model. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} payload = self.content_handler.transform_input(prompt, _model_kwargs) try: response = requests.post( self.api_url, headers=self.headers, json=payload, ) text = self.content_handler.transform_output(response) except Exception as error: raise ValueError(f"Error raised by the service: {error}") if stop is not None: text = enforce_stop_tokens(text, stop) return text
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~pai_eas_endpoint.py
import json import logging from typing import Any, Dict, Iterator, List, Mapping, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import root_validator from langchain.schema.output import GenerationChunk from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class PaiEasEndpoint(LLM): """Langchain LLM class to help to access eass llm service. To use this endpoint, must have a deployed eas chat llm service on PAI AliCloud. One can set the environment variable ``eas_service_url`` and ``eas_service_token``. The environment variables can set with your eas service url and service token. Example: .. code-block:: python from langchain.llms.pai_eas_endpoint import PaiEasEndpoint eas_chat_endpoint = PaiEasChatEndpoint( eas_service_url="your_service_url", eas_service_token="your_service_token" ) """ """PAI-EAS Service URL""" eas_service_url: str """PAI-EAS Service TOKEN""" eas_service_token: str """PAI-EAS Service Infer Params""" max_new_tokens: Optional[int] = 512 temperature: Optional[float] = 0.95 top_p: Optional[float] = 0.1 top_k: Optional[int] = 0 stop_sequences: Optional[List[str]] = None """Enable stream chat mode.""" streaming: bool = False """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None version: Optional[str] = "2.0" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["eas_service_url"] = get_from_dict_or_env( values, "eas_service_url", "EAS_SERVICE_URL" ) values["eas_service_token"] = get_from_dict_or_env( values, "eas_service_token", "EAS_SERVICE_TOKEN" ) return values @property def _llm_type(self) -> str: """Return type of llm.""" return "pai_eas_endpoint" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "stop_sequences": [], } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { "eas_service_url": self.eas_service_url, "eas_service_token": self.eas_service_token, **_model_kwargs, } def _invocation_params( self, stop_sequences: Optional[List[str]], **kwargs: Any ) -> dict: params = self._default_params if self.stop_sequences is not None and stop_sequences is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop_sequences is not None: params["stop"] = self.stop_sequences else: params["stop"] = stop_sequences if self.model_kwargs: params.update(self.model_kwargs) return {**params, **kwargs} @staticmethod def _process_response( response: Any, stop: Optional[List[str]], version: Optional[str] ) -> str: if version == "1.0": text = response else: text = response["response"] if stop: text = enforce_stop_tokens(text, stop) return "".join(text) def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: params = self._invocation_params(stop, **kwargs) prompt = prompt.strip() response = None try: if self.streaming: completion = "" for chunk in self._stream(prompt, stop, run_manager, **params): completion += chunk.text return completion else: response = self._call_eas(prompt, params) _stop = params.get("stop") return self._process_response(response, _stop, self.version) except Exception as error: raise ValueError(f"Error raised by the service: {error}") def _call_eas(self, prompt: str = "", params: Dict = {}) -> Any: """Generate text from the eas service.""" headers = { "Content-Type": "application/json", "Authorization": f"{self.eas_service_token}", } if self.version == "1.0": body = { "input_ids": f"{prompt}", } else: body = { "prompt": f"{prompt}", } # add params to body for key, value in params.items(): body[key] = value # make request response = requests.post(self.eas_service_url, headers=headers, json=body) if response.status_code != 200: raise Exception( f"Request failed with status code {response.status_code}" f" and message {response.text}" ) try: return json.loads(response.text) except Exception as e: if isinstance(e, json.decoder.JSONDecodeError): return response.text raise e def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) headers = { "User-Agent": "Test Client", "Authorization": f"{self.eas_service_token}", } if self.version == "1.0": pload = {"input_ids": prompt, **invocation_params} response = requests.post( self.eas_service_url, headers=headers, json=pload, stream=True ) res = GenerationChunk(text=response.text) if run_manager: run_manager.on_llm_new_token(res.text) # yield text, if any yield res else: pload = {"prompt": prompt, "use_stream_chat": "True", **invocation_params} response = requests.post( self.eas_service_url, headers=headers, json=pload, stream=True ) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: data = json.loads(chunk.decode("utf-8")) output = data["response"] # identify stop sequence in generated text, if any stop_seq_found: Optional[str] = None for stop_seq in invocation_params["stop"]: if stop_seq in output: stop_seq_found = stop_seq # identify text to yield text: Optional[str] = None if stop_seq_found: text = output[: output.index(stop_seq_found)] else: text = output # yield text, if any if text: res = GenerationChunk(text=text) yield res if run_manager: run_manager.on_llm_new_token(res.text) # break if stop sequence found if stop_seq_found: break
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~embeddings~mosaicml.py
from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from langchain.pydantic_v1 import BaseModel, Extra, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """MosaicML embedding service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"input_strings": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: if response.status_code == 429: if not is_retry: import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: rate limit exceeded.\nResponse: " f"{response.text}" ) parsed_response = response.json() # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): output_keys = ["data", "output", "outputs"] for key in output_keys: if key in parsed_response: output_item = parsed_response[key] break else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~memory~chat_message_histories~in_memory.py
from typing import List from langchain.pydantic_v1 import BaseModel, Field from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage class ChatMessageHistory(BaseChatMessageHistory, BaseModel): """In memory implementation of chat message history. Stores messages in an in memory list. """ messages: List[BaseMessage] = Field(default_factory=list) def add_message(self, message: BaseMessage) -> None: """Add a self-created message to the store""" self.messages.append(message) def clear(self) -> None: self.messages = []
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~slack_directory.py
import json import zipfile from pathlib import Path from typing import Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class SlackDirectoryLoader(BaseLoader): """Load from a `Slack` directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~llms~test_xinference.py
"""Test Xinference wrapper.""" import time from typing import AsyncGenerator, Tuple import pytest_asyncio from langchain.llms import Xinference @pytest_asyncio.fixture async def setup() -> AsyncGenerator[Tuple[str, str], None]: import xoscar as xo from xinference.deploy.supervisor import start_supervisor_components from xinference.deploy.utils import create_worker_actor_pool from xinference.deploy.worker import start_worker_components pool = await create_worker_actor_pool( f"test://127.0.0.1:{xo.utils.get_next_port()}" ) print(f"Pool running on localhost:{pool.external_address}") endpoint = await start_supervisor_components( pool.external_address, "127.0.0.1", xo.utils.get_next_port() ) await start_worker_components( address=pool.external_address, supervisor_address=pool.external_address ) # wait for the api. time.sleep(3) async with pool: yield endpoint, pool.external_address def test_xinference_llm_(setup: Tuple[str, str]) -> None: from xinference.client import RESTfulClient endpoint, _ = setup client = RESTfulClient(endpoint) model_uid = client.launch_model( model_name="vicuna-v1.3", model_size_in_billions=7, quantization="q4_0" ) llm = Xinference(server_url=endpoint, model_uid=model_uid) answer = llm(prompt="Q: What food can we try in the capital of France? A:") assert isinstance(answer, str) answer = llm( prompt="Q: where can we visit in the capital of France? A:", generate_config={"max_tokens": 1024, "stream": True}, ) assert isinstance(answer, str)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chains~qa_with_sources~map_reduce_prompt.py
# flake8: noqa from langchain.prompts import PromptTemplate question_prompt_template = """Используй следующий фрагмент длинного документа, чтобы увидеть, содержит ли текст информацию, относящуюся к ответу на вопрос. Верни любой релевантный текст дословно. {context} Question: {question} Релевантный текст, если таковой имеется:""" QUESTION_PROMPT = PromptTemplate( template=question_prompt_template, input_variables=["context", "question"] ) combine_prompt_template = """Учитывая следующие извлеченные части длинного документа и вопрос, создай окончательный ответ с ссылками ("SOURCES"). Если ты не знаешь ответа, просто скажи, что не знаешь. Не пытайся выдумать ответ. ВСЕГДА возвращай часть "SOURCES" в своем ответе. Question: Какое государственное/страновое законодательство регулирует толкование контракта? ========= Содержание: Это Соглашение регулируется английским законодательством, и стороны подчиняются исключительной юрисдикции английских судов в отношении любого спора (контрактного или внеконтрактного) по данному Соглашению, за исключением того, что любая из сторон может обратиться в любой суд за получением судебного запрета или иного средства защиты своих прав интеллектуальной собственности. Источник: 28-pl Содержание: Неотзыв. Несоблюдение или задержка в осуществлении любого права или средства правовой защиты по данному Соглашению не составляет отказа от такого (или любого другого) права или средства правовой защиты.\n\n11.7 Разделимость. Недействительность, незаконность или неосуществимость любого условия (или его части) данного Соглашения не влияет на продолжение действия остатка условия (если таковой имеется) и данного Соглашения.\n\n11.8 Нет агентства. За исключением случаев, прямо указанных в противном случае, ничто в данном Соглашении не создает агентства, партнерства или совместного предприятия любого рода между сторонами.\n\n11.9 Нет третьих лиц-бенефициаров. Источник: 30-pl Содержание: (b) если Google верит, в доброй вере, что Дистрибьютор нарушил или заставил Google нарушить любые Антикоррупционные законы (как определено в пункте 8.5) или что такое нарушение вполне вероятно, Источник: 4-pl ========= FINAL ANSWER: Это Соглашение регулируется английским законодательством. SOURCES: 28-pl Question: {question} ========= {summaries} ========= FINAL ANSWER:""" COMBINE_PROMPT = PromptTemplate( template=combine_prompt_template, input_variables=["summaries", "question"] ) EXAMPLE_PROMPT = PromptTemplate( template="Содержание: {page_content}\nИсточник: {source}", input_variables=["page_content", "source"], )
[ "question", "Используй следующий фрагмент длинного документа, чтобы увидеть, содержит ли текст информацию, относящуюся к ответу на вопрос.\nВерни любой релевантный текст дословно.\n{context}\nQuestion: {question}\nРелевантный текст, если таковой имеется:", "Учитывая следующие извлеченные части длинного документа и вопрос, создай окончательный ответ с ссылками (\"SOURCES\").\nЕсли ты не знаешь ответа, просто скажи, что не знаешь. Не пытайся выдумать ответ.\nВСЕГДА возвращай часть \"SOURCES\" в своем ответе.\n\nQuestion: Какое государственное/страновое законодательство регулирует толкование контракта?\n=========\nСодержание: Это Соглашение регулируется английским законодательством, и стороны подчиняются исключительной юрисдикции английских судов в отношении любого спора (контрактного или внеконтрактного) по данному Соглашению, за исключением того, что любая из сторон может обратиться в любой суд за получением судебного запрета или иного средства защиты своих прав интеллектуальной собственности.\nИсточник: 28-pl\nСодержание: Неотзыв. Несоблюдение или задержка в осуществлении любого права или средства правовой защиты по данному Соглашению не составляет отказа от такого (или любого другого) права или средства правовой защиты.\n\n11.7 Разделимость. Недействительность, незаконность или неосуществимость любого условия (или его части) данного Соглашения не влияет на продолжение действия остатка условия (если таковой имеется) и данного Соглашения.\n\n11.8 Нет агентства. За исключением случаев, прямо указанных в противном случае, ничто в данном Соглашении не создает агентства, партнерства или совместного предприятия любого рода между сторонами.\n\n11.9 Нет третьих лиц-бенефициаров.\nИсточник: 30-pl\nСодержание: (b) если Google верит, в доброй вере, что Дистрибьютор нарушил или заставил Google нарушить любые Антикоррупционные законы (как определено в пункте 8.5) или что такое нарушение вполне вероятно,\nИсточник: 4-pl\n=========\nFINAL ANSWER: Это Соглашение регулируется английским законодательством.\nSOURCES: 28-pl\n\nQuestion: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:", "context", "page_content", "Содержание: {page_content}\nИсточник: {source}" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~agents~output_parsers~xml.py
from typing import Union from langchain.agents import AgentOutputParser from langchain.schema import AgentAction, AgentFinish class XMLAgentOutputParser(AgentOutputParser): """Parses tool invocations and final answers in XML format. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. ``` <tool>search</tool> <tool_input>what is 2 + 2</tool_input> ``` If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. ``` <final_answer>Foo</final_answer> ``` """ def parse(self, text: str) -> Union[AgentAction, AgentFinish]: if "</tool>" in text: tool, tool_input = text.split("</tool>") _tool = tool.split("<tool>")[1] _tool_input = tool_input.split("<tool_input>")[1] if "</tool_input>" in _tool_input: _tool_input = _tool_input.split("</tool_input>")[0] return AgentAction(tool=_tool, tool_input=_tool_input, log=text) elif "<final_answer>" in text: _, answer = text.split("<final_answer>") if "</final_answer>" in answer: answer = answer.split("</final_answer>")[0] return AgentFinish(return_values={"output": answer}, log=text) else: raise ValueError def get_format_instructions(self) -> str: raise NotImplementedError @property def _type(self) -> str: return "xml-agent"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~file_management~list_dir.py
import os from typing import Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class DirectoryListingInput(BaseModel): """Input for ListDirectoryTool.""" dir_path: str = Field(default=".", description="Subdirectory to list.") class ListDirectoryTool(BaseFileToolMixin, BaseTool): """Tool that lists files and directories in a specified folder.""" name: str = "list_directory" args_schema: Type[BaseModel] = DirectoryListingInput description: str = "List files and directories in a specified folder" def _run( self, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) try: entries = os.listdir(dir_path_) if entries: return "\n".join(entries) else: return f"No files found in directory {dir_path}" except Exception as e: return "Error: " + str(e) # TODO: Add aiofiles method
[ "List files and directories in a specified folder" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~vectorstores~lancedb.py
from __future__ import annotations import uuid from typing import Any, Iterable, List, Optional from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore class LanceDB(VectorStore): """`LanceDB` vector store. To use, you should have ``lancedb`` python package installed. Example: .. code-block:: python db = lancedb.connect('./lancedb') table = db.open_table('my_table') vectorstore = LanceDB(table, embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') """ def __init__( self, connection: Any, embedding: Embeddings, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", ): """Initialize with Lance DB connection""" try: import lancedb except ImportError: raise ImportError( "Could not import lancedb python package. " "Please install it with `pip install lancedb`." ) if not isinstance(connection, lancedb.db.LanceTable): raise ValueError( "connection should be an instance of lancedb.db.LanceTable, ", f"got {type(connection)}", ) self._connection = connection self._embedding = embedding self._vector_key = vector_key self._id_key = id_key self._text_key = text_key @property def embeddings(self) -> Embeddings: return self._embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Turn texts into embedding and add it to the database Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids of the added texts. """ # Embed texts and create documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embedding.embed_documents(list(texts)) for idx, text in enumerate(texts): embedding = embeddings[idx] metadata = metadatas[idx] if metadatas else {} docs.append( { self._vector_key: embedding, self._id_key: ids[idx], self._text_key: text, **metadata, } ) self._connection.add(docs) return ids def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return documents most similar to the query Args: query: String to query the vectorstore with. k: Number of documents to return. Returns: List of documents most similar to the query. """ embedding = self._embedding.embed_query(query) docs = self._connection.search(embedding).limit(k).to_df() return [ Document( page_content=row[self._text_key], metadata=row[docs.columns != self._text_key], ) for _, row in docs.iterrows() ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection: Any = None, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", **kwargs: Any, ) -> LanceDB: instance = LanceDB( connection, embedding, vector_key, id_key, text_key, ) instance.add_texts(texts, metadatas=metadatas, **kwargs) return instance
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~arxiv.py
from typing import Any, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper class ArxivLoader(BaseLoader): """Load a query result from `Arxiv`. The loader converts the original PDF format into the text. Args: Supports all arguments of `ArxivAPIWrapper`. """ def __init__( self, query: str, doc_content_chars_max: Optional[int] = None, **kwargs: Any ): self.query = query self.client = ArxivAPIWrapper( doc_content_chars_max=doc_content_chars_max, **kwargs ) def load(self) -> List[Document]: return self.client.load(self.query)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~playwright~navigate_back.py
from __future__ import annotations from typing import Optional, Type from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.pydantic_v1 import BaseModel from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class NavigateBackTool(BaseBrowserTool): """Navigate back to the previous page in the browser history.""" name: str = "previous_webpage" description: str = "Navigate back to the previous page in the browser history" args_schema: Type[BaseModel] = BaseModel def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history"
[ "Navigate back to the previous page in the browser history" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~aleph_alpha.py
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chat_loaders~whatsapp.py
import logging import os import re import zipfile from typing import Iterator, List, Union from langchain.chat_loaders.base import BaseChatLoader from langchain.schema import AIMessage, HumanMessage from langchain.schema.chat import ChatSession logger = logging.getLogger(__name__) class WhatsAppChatLoader(BaseChatLoader): """Load `WhatsApp` conversations from a dump zip file or directory.""" def __init__(self, path: str): """Initialize the WhatsAppChatLoader. Args: path (str): Path to the exported WhatsApp chat zip directory, folder, or file. To generate the dump, open the chat, click the three dots in the top right corner, and select "More". Then select "Export chat" and choose "Without media". """ self.path = path ignore_lines = [ "This message was deleted", "<Media omitted>", "image omitted", "Messages and calls are end-to-end encrypted. No one outside of this chat," " not even WhatsApp, can read or listen to them.", ] self._ignore_lines = re.compile( r"(" + "|".join([r"\u200E*" + line for line in ignore_lines]) + r")", flags=re.IGNORECASE, ) self._message_line_regex = re.compile( r"\u200E*\[?(\d{1,2}/\d{1,2}/\d{2,4}, \d{1,2}:\d{2}:\d{2} (?:AM|PM))\]?[ \u200E]*([^:]+): (.+)", # noqa flags=re.IGNORECASE, ) def _load_single_chat_session(self, file_path: str) -> ChatSession: """Load a single chat session from a file. Args: file_path (str): Path to the chat file. Returns: ChatSession: The loaded chat session. """ with open(file_path, "r", encoding="utf-8") as file: txt = file.read() # Split messages by newlines, but keep multi-line messages grouped chat_lines: List[str] = [] current_message = "" for line in txt.split("\n"): if self._message_line_regex.match(line): if current_message: chat_lines.append(current_message) current_message = line else: current_message += " " + line.strip() if current_message: chat_lines.append(current_message) results: List[Union[HumanMessage, AIMessage]] = [] for line in chat_lines: result = self._message_line_regex.match(line.strip()) if result: timestamp, sender, text = result.groups() if not self._ignore_lines.match(text.strip()): results.append( HumanMessage( role=sender, content=text, additional_kwargs={ "sender": sender, "events": [{"message_time": timestamp}], }, ) ) else: logger.debug(f"Could not parse line: {line}") return ChatSession(messages=results) def _iterate_files(self, path: str) -> Iterator[str]: """Iterate over the files in a directory or zip file. Args: path (str): Path to the directory or zip file. Yields: str: The path to each file. """ if os.path.isfile(path): yield path elif os.path.isdir(path): for root, _, files in os.walk(path): for file in files: if file.endswith(".txt"): yield os.path.join(root, file) elif zipfile.is_zipfile(path): with zipfile.ZipFile(path) as zip_file: for file in zip_file.namelist(): if file.endswith(".txt"): yield zip_file.extract(file) def lazy_load(self) -> Iterator[ChatSession]: """Lazy load the messages from the chat file and yield them as chat sessions. Yields: Iterator[ChatSession]: The loaded chat sessions. """ yield self._load_single_chat_session(self.path)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~unit_tests~document_loaders~parsers~language~test_javascript.py
import unittest import pytest from langchain.document_loaders.parsers.language.javascript import JavaScriptSegmenter @pytest.mark.requires("esprima") class TestJavaScriptSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """const os = require('os'); function hello(text) { console.log(text); } class Simple { constructor() { this.a = 1; } } hello("Hello!");""" self.expected_simplified_code = """const os = require('os'); // Code for: function hello(text) { // Code for: class Simple { hello("Hello!");""" self.expected_extracted_code = [ "function hello(text) {\n console.log(text);\n}", "class Simple {\n constructor() {\n this.a = 1;\n }\n}", ] def test_extract_functions_classes(self) -> None: segmenter = JavaScriptSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = JavaScriptSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~unit_tests~storage~test_upstash_redis.py
"""Light weight unit test that attempts to import UpstashRedisStore. """ import pytest @pytest.mark.requires("upstash_redis") def test_import_storage() -> None: from langchain.storage.upstash_redis import UpstashRedisStore # noqa
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~sharepoint.py
"""Loader that loads data from Sharepoint Document Library""" from __future__ import annotations from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base_o365 import ( O365BaseLoader, _FileType, ) from langchain.document_loaders.parsers.registry import get_parser from langchain.pydantic_v1 import Field class SharePointLoader(O365BaseLoader): """Load from `SharePoint`.""" document_library_id: str = Field(...) """ The ID of the SharePoint document library to load data from.""" folder_path: Optional[str] = None """ The path to the folder to load data from.""" object_ids: Optional[List[str]] = None """ The IDs of the objects to load data from.""" @property def _file_types(self) -> Sequence[_FileType]: """Return supported file types.""" return _FileType.DOC, _FileType.DOCX, _FileType.PDF @property def _scopes(self) -> List[str]: """Return required scopes.""" return ["sharepoint", "basic"] def lazy_load(self) -> Iterator[Document]: """Load documents lazily. Use this when working at a large scale.""" try: from O365.drive import Drive, Folder except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) drive = self._auth().storage().get_drive(self.document_library_id) if not isinstance(drive, Drive): raise ValueError(f"There isn't a Drive with id {self.document_library_id}.") blob_parser = get_parser("default") if self.folder_path: target_folder = drive.get_item_by_path(self.folder_path) if not isinstance(target_folder, Folder): raise ValueError(f"There isn't a folder with path {self.folder_path}.") for blob in self._load_from_folder(target_folder): yield from blob_parser.lazy_parse(blob) if self.object_ids: for blob in self._load_from_object_ids(drive, self.object_ids): yield from blob_parser.lazy_parse(blob) def load(self) -> List[Document]: """Load all documents.""" return list(self.lazy_load())
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~document_loaders~test_nuclia.py
import json import os from typing import Any from unittest import mock from langchain.document_loaders.nuclia import NucliaLoader from langchain.tools.nuclia.tool import NucliaUnderstandingAPI def fakerun(**args: Any) -> Any: def run(self: Any, **args: Any) -> str: data = { "extracted_text": [{"body": {"text": "Hello World"}}], "file_extracted_data": [{"language": "en"}], "field_metadata": [ { "metadata": { "metadata": { "paragraphs": [ {"end": 66, "sentences": [{"start": 1, "end": 67}]} ] } } } ], } return json.dumps(data) return run @mock.patch.dict(os.environ, {"NUCLIA_NUA_KEY": "_a_key_"}) def test_nuclia_loader() -> None: with mock.patch( "langchain.tools.nuclia.tool.NucliaUnderstandingAPI._run", new_callable=fakerun ): nua = NucliaUnderstandingAPI(enable_ml=False) loader = NucliaLoader("/whatever/file.mp3", nua) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == "Hello World" assert docs[0].metadata["file"]["language"] == "en" assert ( len(docs[0].metadata["metadata"]["metadata"]["metadata"]["paragraphs"]) == 1 )
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~agents~output_parsers~react_single_input.py
import re from typing import Union from langchain.agents.agent import AgentOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.schema import AgentAction, AgentFinish, OutputParserException FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action:' after 'Thought:" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" ) FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( "Parsing LLM output produced both a final answer and a parse-able action:" ) class ReActSingleInputOutputParser(AgentOutputParser): """Parses ReAct-style LLM calls that have a single tool input. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. ``` Thought: agent thought here Action: search Action Input: what is the temperature in SF? ``` If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. ``` Thought: agent thought here Final Answer: The temperature is 100 degrees ``` """ def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if action_match: if includes_answer: raise OutputParserException( f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" ) action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(" ") tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) elif includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: raise OutputParserException(f"Could not parse LLM output: `{text}`") @property def _type(self) -> str: return "react-single-input"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~file_management~delete.py
import os from typing import Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileDeleteInput(BaseModel): """Input for DeleteFileTool.""" file_path: str = Field(..., description="Path of the file to delete") class DeleteFileTool(BaseFileToolMixin, BaseTool): """Tool that deletes a file.""" name: str = "file_delete" args_schema: Type[BaseModel] = FileDeleteInput description: str = "Delete a file" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not file_path_.exists(): return f"Error: no such file or directory: {file_path}" try: os.remove(file_path_) return f"File deleted successfully: {file_path}." except Exception as e: return "Error: " + str(e) # TODO: Add aiofiles method
[ "Delete a file" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~utilities~wikipedia.py
"""Util that calls Wikipedia.""" import logging from typing import Any, Dict, List, Optional from langchain.pydantic_v1 import BaseModel, root_validator from langchain.schema import Document logger = logging.getLogger(__name__) WIKIPEDIA_MAX_QUERY_LENGTH = 300 class WikipediaAPIWrapper(BaseModel): """Wrapper around WikipediaAPI. To use, you should have the ``wikipedia`` python package installed. This wrapper will use the Wikipedia API to conduct searches and fetch page summaries. By default, it will return the page summaries of the top-k results. It limits the Document content by doc_content_chars_max. """ wiki_client: Any #: :meta private: top_k_results: int = 3 lang: str = "ru" load_all_available_meta: bool = False doc_content_chars_max: int = 4000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import wikipedia wikipedia.set_lang(values["lang"]) values["wiki_client"] = wikipedia except ImportError: raise ImportError( "Could not import wikipedia python package. " "Please install it with `pip install wikipedia`." ) return values def run(self, query: str) -> str: """Run Wikipedia search and get page summaries.""" page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH]) summaries = [] for page_title in page_titles[: self.top_k_results]: if wiki_page := self._fetch_page(page_title): if summary := self._formatted_page_summary(page_title, wiki_page): summaries.append(summary) if not summaries: return "No good Wikipedia Search Result was found" return "\n\n".join(summaries)[: self.doc_content_chars_max] @staticmethod def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]: return f"Page: {page_title}\nSummary: {wiki_page.summary}" def _page_to_document(self, page_title: str, wiki_page: Any) -> Document: main_meta = { "title": page_title, "summary": wiki_page.summary, "source": wiki_page.url, } add_meta = ( { "categories": wiki_page.categories, "page_url": wiki_page.url, "image_urls": wiki_page.images, "related_titles": wiki_page.links, "parent_id": wiki_page.parent_id, "references": wiki_page.references, "revision_id": wiki_page.revision_id, "sections": wiki_page.sections, } if self.load_all_available_meta else {} ) doc = Document( page_content=wiki_page.content[: self.doc_content_chars_max], metadata={ **main_meta, **add_meta, }, ) return doc def _fetch_page(self, page: str) -> Optional[str]: try: return self.wiki_client.page(title=page, auto_suggest=False) except ( self.wiki_client.exceptions.PageError, self.wiki_client.exceptions.DisambiguationError, ): return None def load(self, query: str) -> List[Document]: """ Run Wikipedia search and get the article text plus the meta information. See Returns: a list of documents. """ page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH]) docs = [] for page_title in page_titles[: self.top_k_results]: if wiki_page := self._fetch_page(page_title): if doc := self._page_to_document(page_title, wiki_page): docs.append(doc) return docs
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~evaluation~qa~eval_prompt.py
# flake8: noqa from langchain.prompts import PromptTemplate template = """Ты учитель, проверяющий тест. Тебе дан вопрос, ответ ученика и правильный ответ, и тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ. Пример формата: ВОПРОС: вопрос здесь ОТВЕТ УЧЕНИКА: ответ ученика здесь ПРАВИЛЬНЫЙ ОТВЕТ: правильный ответ здесь ОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь Оценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! ВОПРОС: {query} ОТВЕТ УЧЕНИКА: {result} ПРАВИЛЬНЫЙ ОТВЕТ: {answer} ОЦЕНКА:""" PROMPT = PromptTemplate( input_variables=["query", "result", "answer"], template=template ) context_template = """Ты учитель, проверяющий тест. Тебе дан вопрос, контекст, к которому относится вопрос, и ответ ученика. Тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ, исходя из контекста. Пример формата: ВОПРОС: вопрос здесь КОНТЕКСТ: контекст, к которому относится вопрос, здесь ОТВЕТ УЧЕНИКА: ответ ученика здесь ОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь Оценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! ВОПРОС: {query} КОНТЕКСТ: {context} ОТВЕТ УЧЕНИКА: {result} ОЦЕНКА:""" CONTEXT_PROMPT = PromptTemplate( input_variables=["query", "context", "result"], template=context_template ) cot_template = """Ты учитель, проверяющий тест. Тебе дан вопрос, контекст, к которому относится вопрос, и ответ ученика. Тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ, исходя из контекста. Опиши пошагово свое рассуждение, чтобы убедиться, что твой вывод верен. Избегай простого указания правильного ответа сразу. Пример формата: ВОПРОС: вопрос здесь КОНТЕКСТ: контекст, к которому относится вопрос, здесь ОТВЕТ УЧЕНИКА: ответ ученика здесь ОБЪЯСНЕНИЕ: пошаговое рассуждение здесь ОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь Оценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! ВОПРОС: {query} КОНТЕКСТ: {context} ОТВЕТ УЧЕНИКА: {result} ОБЪЯСНЕНИЕ:""" COT_PROMPT = PromptTemplate( input_variables=["query", "context", "result"], template=cot_template ) template = """Ты сравниваешь представленный ответ с ответом эксперта на заданный вопрос по SQL. Вот данные: [НАЧАЛО ДАННЫХ] *** [Вопрос]: {query} *** [Эксперт]: {answer} *** [Ответ]: {result} *** [КОНЕЦ ДАННЫХ] Сравни содержание и правильность представленного SQL с ответом эксперта. Игнорируй любые различия в пробелах, стиле или именах столбцов вывода. Представленный ответ может быть либо правильным, либо неправильным. Определи, какой случай применим. Сначала подробно объясни сходства или различия между ответом эксперта и представленным ответом, игнорируя поверхностные аспекты, такие как пробелы, стиль или имена столбцов вывода. Не указывай окончательный ответ в своем первоначальном объяснении. Затем ответь либо "ПРАВИЛЬНЫЙ", либо "НЕПРАВИЛЬНЫЙ" (без кавычек или знаков препинания) на отдельной строке. Это должно соответствовать тому, являются ли представленный SQL и ответ эксперта семантически одинаковыми или различными. Затем повтори свой окончательный ответ на новой строке.""" SQL_PROMPT = PromptTemplate( input_variables=["query", "answer", "result"], template=template )
[ "Ты сравниваешь представленный ответ с ответом эксперта на заданный вопрос по SQL. Вот данные:\n[НАЧАЛО ДАННЫХ]\n***\n[Вопрос]: {query}\n***\n[Эксперт]: {answer}\n***\n[Ответ]: {result}\n***\n[КОНЕЦ ДАННЫХ]\nСравни содержание и правильность представленного SQL с ответом эксперта. Игнорируй любые различия в пробелах, стиле или именах столбцов вывода. Представленный ответ может быть либо правильным, либо неправильным. Определи, какой случай применим. Сначала подробно объясни сходства или различия между ответом эксперта и представленным ответом, игнорируя поверхностные аспекты, такие как пробелы, стиль или имена столбцов вывода. Не указывай окончательный ответ в своем первоначальном объяснении. Затем ответь либо \"ПРАВИЛЬНЫЙ\", либо \"НЕПРАВИЛЬНЫЙ\" (без кавычек или знаков препинания) на отдельной строке. Это должно соответствовать тому, являются ли представленный SQL и ответ эксперта семантически одинаковыми или различными. Затем повтори свой окончательный ответ на новой строке.", "Ты учитель, проверяющий тест.\nТебе дан вопрос, контекст, к которому относится вопрос, и ответ ученика. Тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ, исходя из контекста.\n\nПример формата:\nВОПРОС: вопрос здесь\nКОНТЕКСТ: контекст, к которому относится вопрос, здесь\nОТВЕТ УЧЕНИКА: ответ ученика здесь\nОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь\n\nОценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! \n\nВОПРОС: {query}\nКОНТЕКСТ: {context}\nОТВЕТ УЧЕНИКА: {result}\nОЦЕНКА:", "Ты учитель, проверяющий тест.\nТебе дан вопрос, контекст, к которому относится вопрос, и ответ ученика. Тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ, исходя из контекста.\nОпиши пошагово свое рассуждение, чтобы убедиться, что твой вывод верен. Избегай простого указания правильного ответа сразу.\n\nПример формата:\nВОПРОС: вопрос здесь\nКОНТЕКСТ: контекст, к которому относится вопрос, здесь\nОТВЕТ УЧЕНИКА: ответ ученика здесь\nОБЪЯСНЕНИЕ: пошаговое рассуждение здесь\nОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь\n\nОценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! \n\nВОПРОС: {query}\nКОНТЕКСТ: {context}\nОТВЕТ УЧЕНИКА: {result}\nОБЪЯСНЕНИЕ:", "context", "answer", "Ты учитель, проверяющий тест.\nТебе дан вопрос, ответ ученика и правильный ответ, и тебе нужно оценить ответ ученика как ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ.\n\nПример формата:\nВОПРОС: вопрос здесь\nОТВЕТ УЧЕНИКА: ответ ученика здесь\nПРАВИЛЬНЫЙ ОТВЕТ: правильный ответ здесь\nОЦЕНКА: ПРАВИЛЬНЫЙ или НЕПРАВИЛЬНЫЙ здесь\n\nОценивай ответы учеников ТОЛЬКО на основе их фактической точности. Игнорируй различия в пунктуации и формулировках между ответом ученика и правильным ответом. Это нормально, если ответ ученика содержит больше информации, чем правильный ответ, при условии, что он не содержит противоречивых утверждений. Начни! \n\nВОПРОС: {query}\nОТВЕТ УЧЕНИКА: {result}\nПРАВИЛЬНЫЙ ОТВЕТ: {answer}\nОЦЕНКА:" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~remote_retriever.py
from typing import List, Optional import aiohttp import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document class RemoteLangChainRetriever(BaseRetriever): """`LangChain API` retriever.""" url: str """URL of the remote LangChain API.""" headers: Optional[dict] = None """Headers to use for the request.""" input_key: str = "message" """Key to use for the input in the request.""" response_key: str = "response" """Key to use for the response in the request.""" page_content_key: str = "page_content" """Key to use for the page content in the response.""" metadata_key: str = "metadata" """Key to use for the metadata in the response.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: response = requests.post( self.url, json={self.input_key: query}, headers=self.headers ) result = response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.url, headers=self.headers, json={self.input_key: query} ) as response: result = await response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~edenai~ocr_identityparser.py
from __future__ import annotations import logging from typing import Optional from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools.edenai.edenai_base_tool import EdenaiTool logger = logging.getLogger(__name__) class EdenAiParsingIDTool(EdenaiTool): """Tool that queries the Eden AI Identity parsing API. for api reference check edenai documentation: https://docs.edenai.co/reference/ocr_identity_parser_create. To use, you should have the environment variable ``EDENAI_API_KEY`` set with your API token. You can find your token here: https://app.edenai.run/admin/account/settings """ name = "edenai_identity_parsing" description = ( "A wrapper around edenai Services Identity parsing. " "Useful for when you have to extract information from an ID Document " "Input should be the string url of the document to parse." ) feature = "ocr" subfeature = "identity_parser" language: Optional[str] = None """ language of the text passed to the model. """ def _parse_response(self, response: list) -> str: formatted_list: list = [] if len(response) == 1: self._parse_json_multilevel( response[0]["extracted_data"][0], formatted_list ) else: for entry in response: if entry.get("provider") == "eden-ai": self._parse_json_multilevel( entry["extracted_data"][0], formatted_list ) return "\n".join(formatted_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" query_params = { "file_url": query, "language": self.language, "attributes_as_list": False, } return self._call_eden_ai(query_params)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chat_models~ernie.py
import logging import threading from typing import Any, Dict, List, Mapping, Optional import requests from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import BaseChatModel from langchain.pydantic_v1 import root_validator from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, ) from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} else: raise ValueError(f"Got unknown type {message}") return message_dict class ErnieBotChat(BaseChatModel): """`ERNIE-Bot` large language model. ERNIE-Bot is a large language model developed by Baidu, covering a huge amount of Chinese data. To use, you should have the `ernie_client_id` and `ernie_client_secret` set, or set the environment variable `ERNIE_CLIENT_ID` and `ERNIE_CLIENT_SECRET`. Note: access_token will be automatically generated based on client_id and client_secret, and will be regenerated after expiration (30 days). Default model is `ERNIE-Bot-turbo`, currently supported models are `ERNIE-Bot-turbo`, `ERNIE-Bot` Example: .. code-block:: python from langchain.chat_models import ErnieBotChat chat = ErnieBotChat(model_name='ERNIE-Bot') """ ernie_api_base: Optional[str] = None """Baidu application custom endpoints""" ernie_client_id: Optional[str] = None """Baidu application client id""" ernie_client_secret: Optional[str] = None """Baidu application client secret""" access_token: Optional[str] = None """access token is generated by client id and client secret, setting this value directly will cause an error""" model_name: str = "ERNIE-Bot-turbo" """model name of ernie, default is `ERNIE-Bot-turbo`. Currently supported `ERNIE-Bot-turbo`, `ERNIE-Bot`""" request_timeout: Optional[int] = 60 """request timeout for chat http requests""" streaming: Optional[bool] = False """streaming mode. not supported yet.""" top_p: Optional[float] = 0.8 temperature: Optional[float] = 0.95 penalty_score: Optional[float] = 1 _lock = threading.Lock() @root_validator() def validate_environment(cls, values: Dict) -> Dict: values["ernie_api_base"] = get_from_dict_or_env( values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com" ) values["ernie_client_id"] = get_from_dict_or_env( values, "ernie_client_id", "ERNIE_CLIENT_ID", ) values["ernie_client_secret"] = get_from_dict_or_env( values, "ernie_client_secret", "ERNIE_CLIENT_SECRET", ) return values def _chat(self, payload: object) -> dict: base_url = f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat" model_paths = { "ERNIE-Bot-turbo": "eb-instant", "ERNIE-Bot": "completions", "ERNIE-Bot-4": "completions_pro", "BLOOMZ-7B": "bloomz_7b1", "Llama-2-7b-chat": "llama_2_7b", "Llama-2-13b-chat": "llama_2_13b", "Llama-2-70b-chat": "llama_2_70b", } if self.model_name in model_paths: url = f"{base_url}/{model_paths[self.model_name]}" else: raise ValueError(f"Got unknown model_name {self.model_name}") resp = requests.post( url, timeout=self.request_timeout, headers={ "Content-Type": "application/json", }, params={"access_token": self.access_token}, json=payload, ) return resp.json() def _refresh_access_token_with_lock(self) -> None: with self._lock: logger.debug("Refreshing access token") base_url: str = f"{self.ernie_api_base}/oauth/2.0/token" resp = requests.post( base_url, timeout=10, headers={ "Content-Type": "application/json", "Accept": "application/json", }, params={ "grant_type": "client_credentials", "client_id": self.ernie_client_id, "client_secret": self.ernie_client_secret, }, ) self.access_token = str(resp.json().get("access_token")) def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: if self.streaming: raise ValueError("`streaming` option currently unsupported.") if not self.access_token: self._refresh_access_token_with_lock() payload = { "messages": [_convert_message_to_dict(m) for m in messages], "top_p": self.top_p, "temperature": self.temperature, "penalty_score": self.penalty_score, **kwargs, } logger.debug(f"Payload for ernie api is {payload}") resp = self._chat(payload) if resp.get("error_code"): if resp.get("error_code") == 111: logger.debug("access_token expired, refresh it") self._refresh_access_token_with_lock() resp = self._chat(payload) else: raise ValueError(f"Error from ErnieChat api response: {resp}") return self._create_chat_result(resp) def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [ ChatGeneration(message=AIMessage(content=response.get("result"))) ] token_usage = response.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) @property def _llm_type(self) -> str: return "ernie-bot-chat"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~graphs~neo4j_graph.py
from typing import Any, Dict, List, Optional from langchain.graphs.graph_document import GraphDocument from langchain.graphs.graph_store import GraphStore from langchain.utils import get_from_env node_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "node" WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {labels: nodeLabels, properties: properties} AS output """ rel_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship" WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {type: nodeLabels, properties: properties} AS output """ rel_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE type = "RELATIONSHIP" AND elementType = "node" UNWIND other AS other_node RETURN {start: label, type: property, end: toString(other_node)} AS output """ class Neo4jGraph(GraphStore): """Neo4j wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, database: str = "neo4j", ) -> None: """Create a new Neo4j graph wrapper instance.""" try: import neo4j except ImportError: raise ValueError( "Could not import neo4j python package. " "Please install it with `pip install neo4j`." ) url = get_from_env("url", "NEO4J_URI", url) username = get_from_env("username", "NEO4J_USERNAME", username) password = get_from_env("password", "NEO4J_PASSWORD", password) database = get_from_env("database", "NEO4J_DATABASE", database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema: str = "" self.structured_schema: Dict[str, Any] = {} # Verify connection try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct" ) except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct" ) # Set schema try: self.refresh_schema() except neo4j.exceptions.ClientError: raise ValueError( "Could not use APOC procedures. " "Please ensure the APOC plugin is installed in Neo4j and that " "'apoc.meta.data()' is allowed in Neo4j configuration " ) @property def get_schema(self) -> str: """Returns the schema of the Graph""" return self.schema @property def get_structured_schema(self) -> Dict[str, Any]: """Returns the structured schema of the Graph""" return self.structured_schema def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query Neo4j database.""" from neo4j.exceptions import CypherSyntaxError with self._driver.session(database=self._database) as session: try: data = session.run(query, params) return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError(f"Generated Cypher Statement is not valid\n{e}") def refresh_schema(self) -> None: """ Refreshes the Neo4j graph schema information. """ node_properties = [el["output"] for el in self.query(node_properties_query)] rel_properties = [el["output"] for el in self.query(rel_properties_query)] relationships = [el["output"] for el in self.query(rel_query)] self.structured_schema = { "node_props": {el["labels"]: el["properties"] for el in node_properties}, "rel_props": {el["type"]: el["properties"] for el in rel_properties}, "relationships": relationships, } self.schema = f""" Node properties are the following: {node_properties} Relationship properties are the following: {rel_properties} The relationships are the following: {[f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in relationships]} """ def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """ Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: include_docs_query = ( "CREATE (d:Document) " "SET d.text = $document.page_content " "SET d += $document.metadata " "WITH d " ) # Import nodes self.query( ( f"{include_docs_query if include_source else ''}" "UNWIND $data AS row " "CALL apoc.merge.node([row.type], {id: row.id}, " "row.properties, {}) YIELD node " f"{'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}" "RETURN distinct 'done' AS result" ), { "data": [el.__dict__ for el in document.nodes], "document": document.source.__dict__, }, ) # Import relationships self.query( "UNWIND $data AS row " "CALL apoc.merge.node([row.source_label], {id: row.source}," "{}, {}) YIELD node as source " "CALL apoc.merge.node([row.target_label], {id: row.target}," "{}, {}) YIELD node as target " "CALL apoc.merge.relationship(source, row.type, " "{}, row.properties, target) YIELD rel " "RETURN distinct 'done'", { "data": [ { "source": el.source.id, "source_label": el.source.type, "target": el.target.id, "target_label": el.target.type, "type": el.type.replace(" ", "_").upper(), "properties": el.properties, } for el in document.relationships ] }, )
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~vectorstores~vespa.py
from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.vectorstores.base import VectorStore, VectorStoreRetriever class VespaStore(VectorStore): """ `Vespa` vector store. To use, you should have the python client library ``pyvespa`` installed. Example: .. code-block:: python from langchain.vectorstores import VespaStore from langchain.embeddings.openai import OpenAIEmbeddings from vespa.application import Vespa # Create a vespa client dependent upon your application, # e.g. either connecting to Vespa Cloud or a local deployment # such as Docker. Please refer to the PyVespa documentation on # how to initialize the client. vespa_app = Vespa(url="...", port=..., application_package=...) # You need to instruct LangChain on which fields to use for embeddings vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", metadata_fields=["date", "rating", "author"] ) embedding_function = OpenAIEmbeddings() vectorstore = VespaStore(vespa_app, embedding_function, **vespa_config) """ def __init__( self, app: Any, embedding_function: Optional[Embeddings] = None, page_content_field: Optional[str] = None, embedding_field: Optional[str] = None, input_field: Optional[str] = None, metadata_fields: Optional[List[str]] = None, ) -> None: """ Initialize with a PyVespa client. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "Could not import Vespa python package. " "Please install it with `pip install pyvespa`." ) if not isinstance(app, Vespa): raise ValueError( f"app should be an instance of vespa.application.Vespa, got {type(app)}" ) self._vespa_app = app self._embedding_function = embedding_function self._page_content_field = page_content_field self._embedding_field = embedding_field self._input_field = input_field self._metadata_fields = metadata_fields def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """ Add texts to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = None if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(list(texts)) if ids is None: ids = [str(f"{i+1}") for i, _ in enumerate(texts)] batch = [] for i, text in enumerate(texts): fields: Dict[str, Union[str, List[float]]] = {} if self._page_content_field is not None: fields[self._page_content_field] = text if self._embedding_field is not None and embeddings is not None: fields[self._embedding_field] = embeddings[i] if metadatas is not None and self._metadata_fields is not None: for metadata_field in self._metadata_fields: if metadata_field in metadatas[i]: fields[metadata_field] = metadatas[i][metadata_field] batch.append({"id": ids[i], "fields": fields}) results = self._vespa_app.feed_batch(batch) for result in results: if not (str(result.status_code).startswith("2")): raise RuntimeError( f"Could not add document to Vespa. " f"Error code: {result.status_code}. " f"Message: {result.json['message']}" ) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if ids is None: return False batch = [{"id": id} for id in ids] result = self._vespa_app.delete_batch(batch) return sum([0 if r.status_code == 200 else 1 for r in result]) == 0 def _create_query( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> Dict: hits = k doc_embedding_field = self._embedding_field input_embedding_field = self._input_field ranking_function = kwargs["ranking"] if "ranking" in kwargs else "default" filter = kwargs["filter"] if "filter" in kwargs else None approximate = kwargs["approximate"] if "approximate" in kwargs else False approximate = "true" if approximate else "false" yql = "select * from sources * where " yql += f"{{targetHits: {hits}, approximate: {approximate}}}" yql += f"nearestNeighbor({doc_embedding_field}, {input_embedding_field})" if filter is not None: yql += f" and {filter}" query = { "yql": yql, f"input.query({input_embedding_field})": query_embedding, "ranking": ranking_function, "hits": hits, } return query def similarity_search_by_vector_with_score( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Performs similarity search from a embeddings vector. Args: query_embedding: Embeddings vector to search for. k: Number of results to return. custom_query: Use this custom query instead default query (kwargs) kwargs: other vector store specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if "custom_query" in kwargs: query = kwargs["custom_query"] else: query = self._create_query(query_embedding, k, **kwargs) try: response = self._vespa_app.query(body=query) except Exception as e: raise RuntimeError( f"Could not retrieve data from Vespa: " f"{e.args[0][0]['summary']}. " f"Error: {e.args[0][0]['message']}" ) if not str(response.status_code).startswith("2"): raise RuntimeError( f"Could not retrieve data from Vespa. " f"Error code: {response.status_code}. " f"Message: {response.json['message']}" ) root = response.json["root"] if "errors" in root: import json raise RuntimeError(json.dumps(root["errors"])) if response is None or response.hits is None: return [] docs = [] for child in response.hits: page_content = child["fields"][self._page_content_field] score = child["relevance"] metadata = {"id": child["id"]} if self._metadata_fields is not None: for field in self._metadata_fields: metadata[field] = child["fields"].get(field) doc = Document(page_content=page_content, metadata=metadata) docs.append((doc, score)) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_by_vector_with_score(embedding, k, **kwargs) return [r[0] for r in results] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: query_emb = [] if self._embedding_function is not None: query_emb = self._embedding_function.embed_query(query) return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_with_score(query, k, **kwargs) return [r[0] for r in results] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search not implemented") def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search by vector not implemented") @classmethod def from_texts( cls: Type[VespaStore], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> VespaStore: vespa = cls(embedding_function=embedding, **kwargs) vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids) return vespa def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: return super().as_retriever(**kwargs)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~reddit.py
from __future__ import annotations from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import praw def _dependable_praw_import() -> praw: try: import praw except ImportError: raise ImportError( "praw package not found, please install it with `pip install praw`" ) return praw class RedditPostsLoader(BaseLoader): """Load `Reddit` posts. Read posts on a subreddit. First, you need to go to https://www.reddit.com/prefs/apps/ and create your application """ def __init__( self, client_id: str, client_secret: str, user_agent: str, search_queries: Sequence[str], mode: str, categories: Sequence[str] = ["new"], number_posts: Optional[int] = 10, ): """ Initialize with client_id, client_secret, user_agent, search_queries, mode, categories, number_posts. Example: https://www.reddit.com/r/learnpython/ Args: client_id: Reddit client id. client_secret: Reddit client secret. user_agent: Reddit user agent. search_queries: The search queries. mode: The mode. categories: The categories. Default: ["new"] number_posts: The number of posts. Default: 10 """ self.client_id = client_id self.client_secret = client_secret self.user_agent = user_agent self.search_queries = search_queries self.mode = mode self.categories = categories self.number_posts = number_posts def load(self) -> List[Document]: """Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit( client_id=self.client_id, client_secret=self.client_secret, user_agent=self.user_agent, ) results: List[Document] = [] if self.mode == "subreddit": for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) elif self.mode == "username": for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode" ) return results def _subreddit_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: subreddit = reddit.subreddit(search_query) method = getattr(subreddit, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, ) def _user_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: user = reddit.redditor(search_query) method = getattr(user.submissions, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, )
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~obs_directory.py
# coding:utf-8 from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.obs_file import OBSFileLoader class OBSDirectoryLoader(BaseLoader): """Load from `Huawei OBS directory`.""" def __init__( self, bucket: str, endpoint: str, config: Optional[dict] = None, prefix: str = "", ): """Initialize the OBSDirectoryLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. endpoint (str): The endpoint URL of your OBS bucket. config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. prefix (str, optional): The prefix to be added to the OBS key. Defaults to "". Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSDirectoryLoader: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } ``` directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix") """ # noqa: E501 try: from obs import ObsClient except ImportError: raise ImportError( "Could not import esdk-obs-python python package. " "Please install it with `pip install esdk-obs-python`." ) if not config: config = dict() if config.get("get_token_from_ecs"): self.client = ObsClient(server=endpoint, security_provider_policy="ECS") else: self.client = ObsClient( access_key_id=config.get("ak"), secret_access_key=config.get("sk"), security_token=config.get("token"), server=endpoint, ) self.bucket = bucket self.prefix = prefix def load(self) -> List[Document]: """Load documents.""" max_num = 1000 mark = None docs = [] while True: resp = self.client.listObjects( self.bucket, prefix=self.prefix, marker=mark, max_keys=max_num ) if resp.status < 300: for content in resp.body.contents: loader = OBSFileLoader(self.bucket, content.key, client=self.client) docs.extend(loader.load()) if resp.body.is_truncated is True: mark = resp.body.next_marker else: break return docs
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~unit_tests~graphs~test_neptune_graph.py
def test_import() -> None: from langchain.graphs import NeptuneGraph # noqa: F401
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~kendra.py
import re from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Union from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document from langchain.pydantic_v1 import BaseModel, Extra, root_validator, validator from langchain.schema import BaseRetriever def clean_excerpt(excerpt: str) -> str: """Clean an excerpt from Kendra. Args: excerpt: The excerpt to clean. Returns: The cleaned excerpt. """ if not excerpt: return excerpt res = re.sub(r"\s+", " ", excerpt).replace("...", "") return res def combined_text(item: "ResultItem") -> str: """Combine a ResultItem title and excerpt into a single string. Args: item: the ResultItem of a Kendra search. Returns: A combined text of the title and excerpt of the given item. """ text = "" title = item.get_title() if title: text += f"Document Title: {title}\n" excerpt = clean_excerpt(item.get_excerpt()) if excerpt: text += f"Document Excerpt: \n{excerpt}\n" return text DocumentAttributeValueType = Union[str, int, List[str], None] """Possible types of a DocumentAttributeValue. Dates are also represented as str. """ # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Highlight(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Information that highlights the keywords in the excerpt.""" BeginOffset: int """The zero-based location in the excerpt where the highlight starts.""" EndOffset: int """The zero-based location in the excerpt where the highlight ends.""" TopAnswer: Optional[bool] """Indicates whether the result is the best one.""" Type: Optional[str] """The highlight type: STANDARD or THESAURUS_SYNONYM.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class TextWithHighLights(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Text with highlights.""" Text: str """The text.""" Highlights: Optional[Any] """The highlights.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class AdditionalResultAttributeValue( # type: ignore[call-arg] BaseModel, extra=Extra.allow ): """Value of an additional result attribute.""" TextWithHighlightsValue: TextWithHighLights """The text with highlights value.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class AdditionalResultAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Additional result attribute.""" Key: str """The key of the attribute.""" ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"] """The type of the value.""" Value: AdditionalResultAttributeValue """The value of the attribute.""" def get_value_text(self) -> str: return self.Value.TextWithHighlightsValue.Text # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class DocumentAttributeValue(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Value of a document attribute.""" DateValue: Optional[str] """The date expressed as an ISO 8601 string.""" LongValue: Optional[int] """The long value.""" StringListValue: Optional[List[str]] """The string list value.""" StringValue: Optional[str] """The string value.""" @property def value(self) -> DocumentAttributeValueType: """The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute. """ if self.DateValue: return self.DateValue if self.LongValue: return self.LongValue if self.StringListValue: return self.StringListValue if self.StringValue: return self.StringValue return None # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class DocumentAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """Document attribute.""" Key: str """The key of the attribute.""" Value: DocumentAttributeValue """The value of the attribute.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class ResultItem(BaseModel, ABC, extra=Extra.allow): # type: ignore[call-arg] """Base class of a result item.""" Id: Optional[str] """The ID of the relevant result item.""" DocumentId: Optional[str] """The document ID.""" DocumentURI: Optional[str] """The document URI.""" DocumentAttributes: Optional[List[DocumentAttribute]] = [] """The document attributes.""" @abstractmethod def get_title(self) -> str: """Document title.""" @abstractmethod def get_excerpt(self) -> str: """Document excerpt or passage original content as retrieved by Kendra.""" def get_additional_metadata(self) -> dict: """Document additional metadata dict. This returns any extra metadata except these: * result_id * document_id * source * title * excerpt * document_attributes """ return {} def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]: """Document attributes dict.""" return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])} def to_doc( self, page_content_formatter: Callable[["ResultItem"], str] = combined_text ) -> Document: """Converts this item to a Document.""" page_content = page_content_formatter(self) metadata = self.get_additional_metadata() metadata.update( { "result_id": self.Id, "document_id": self.DocumentId, "source": self.DocumentURI, "title": self.get_title(), "excerpt": self.get_excerpt(), "document_attributes": self.get_document_attributes_dict(), } ) return Document(page_content=page_content, metadata=metadata) class QueryResultItem(ResultItem): """Query API result item.""" DocumentTitle: TextWithHighLights """The document title.""" FeedbackToken: Optional[str] """Identifies a particular result from a particular query.""" Format: Optional[str] """ If the Type is ANSWER, then format is either: * TABLE: a table excerpt is returned in TableExcerpt; * TEXT: a text excerpt is returned in DocumentExcerpt. """ Type: Optional[str] """Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER""" AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = [] """One or more additional attributes associated with the result.""" DocumentExcerpt: Optional[TextWithHighLights] """Excerpt of the document text.""" def get_title(self) -> str: return self.DocumentTitle.Text def get_attribute_value(self) -> str: if not self.AdditionalAttributes: return "" if not self.AdditionalAttributes[0]: return "" else: return self.AdditionalAttributes[0].get_value_text() def get_excerpt(self) -> str: if ( self.AdditionalAttributes and self.AdditionalAttributes[0].Key == "AnswerText" ): excerpt = self.get_attribute_value() elif self.DocumentExcerpt: excerpt = self.DocumentExcerpt.Text else: excerpt = "" return excerpt def get_additional_metadata(self) -> dict: additional_metadata = {"type": self.Type} return additional_metadata class RetrieveResultItem(ResultItem): """Retrieve API result item.""" DocumentTitle: Optional[str] """The document title.""" Content: Optional[str] """The content of the item.""" def get_title(self) -> str: return self.DocumentTitle or "" def get_excerpt(self) -> str: return self.Content or "" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class QueryResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """`Amazon Kendra Query API` search result. It is composed of: * Relevant suggested answers: either a text excerpt or table excerpt. * Matching FAQs or questions-answer from your FAQ file. * Documents including an excerpt of each document with its title. """ ResultItems: List[QueryResultItem] """The result items.""" # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class RetrieveResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg] """`Amazon Kendra Retrieve API` search result. It is composed of: * relevant passages or text excerpts given an input query. """ QueryId: str """The ID of the query.""" ResultItems: List[RetrieveResultItem] """The result items.""" class AmazonKendraRetriever(BaseRetriever): """`Amazon Kendra Index` retriever. Args: index_id: Kendra index id region_name: The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config. credentials_profile_name: The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. top_k: No of results to return attribute_filter: Additional filtering of results based on metadata See: https://docs.aws.amazon.com/kendra/latest/APIReference page_content_formatter: generates the Document page_content allowing access to all result item attributes. By default, it uses the item's title and excerpt. client: boto3 client for Kendra user_context: Provides information about the user context See: https://docs.aws.amazon.com/kendra/latest/APIReference Example: .. code-block:: python retriever = AmazonKendraRetriever( index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03" ) """ index_id: str region_name: Optional[str] = None credentials_profile_name: Optional[str] = None top_k: int = 3 attribute_filter: Optional[Dict] = None page_content_formatter: Callable[[ResultItem], str] = combined_text client: Any user_context: Optional[Dict] = None @validator("top_k") def validate_top_k(cls, value: int) -> int: if value < 0: raise ValueError(f"top_k ({value}) cannot be negative.") return value @root_validator(pre=True) def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]: if values.get("client") is not None: return values try: import boto3 if values.get("credentials_profile_name"): session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values.get("region_name"): client_params["region_name"] = values["region_name"] values["client"] = session.client("kendra", **client_params) return values except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e def _kendra_query(self, query: str) -> Sequence[ResultItem]: kendra_kwargs = { "IndexId": self.index_id, "QueryText": query.strip(), "PageSize": self.top_k, } if self.attribute_filter is not None: kendra_kwargs["AttributeFilter"] = self.attribute_filter if self.user_context is not None: kendra_kwargs["UserContext"] = self.user_context response = self.client.retrieve(**kendra_kwargs) r_result = RetrieveResult.parse_obj(response) if r_result.ResultItems: return r_result.ResultItems # Retrieve API returned 0 results, fall back to Query API response = self.client.query(**kendra_kwargs) q_result = QueryResult.parse_obj(response) return q_result.ResultItems def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]: top_docs = [ item.to_doc(self.page_content_formatter) for item in result_items[: self.top_k] ] return top_docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Run search on Kendra index and get top k documents Example: .. code-block:: python docs = retriever.get_relevant_documents('This is my query') """ result_items = self._kendra_query(query) top_k_docs = self._get_top_k_docs(result_items) return top_k_docs
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~memory~chat_message_histories~mongodb.py
import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict logger = logging.getLogger(__name__) DEFAULT_DBNAME = "chat_history" DEFAULT_COLLECTION_NAME = "message_store" class MongoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in MongoDB. Args: connection_string: connection string to connect to MongoDB session_id: arbitrary key that is used to store the messages of a single chat session. database_name: name of the database to use collection_name: name of the collection to use """ def __init__( self, connection_string: str, session_id: str, database_name: str = DEFAULT_DBNAME, collection_name: str = DEFAULT_COLLECTION_NAME, ): from pymongo import MongoClient, errors self.connection_string = connection_string self.session_id = session_id self.database_name = database_name self.collection_name = collection_name try: self.client: MongoClient = MongoClient(connection_string) except errors.ConnectionFailure as error: logger.error(error) self.db = self.client[database_name] self.collection = self.db[collection_name] self.collection.create_index("SessionId") @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({"SessionId": self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document["History"]) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None: """Append the message to the record in MongoDB""" from pymongo import errors try: self.collection.insert_one( { "SessionId": self.session_id, "History": json.dumps( _message_to_dict(message), ensure_ascii=False ), } ) except errors.WriteError as err: logger.error(err) def clear(self) -> None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({"SessionId": self.session_id}) except errors.WriteError as err: logger.error(err)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~ctranslate2.py
from typing import Any, Dict, List, Optional, Union from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import BaseLLM from langchain.pydantic_v1 import Field, root_validator from langchain.schema.output import Generation, LLMResult class CTranslate2(BaseLLM): """CTranslate2 language model.""" model_path: str = "" """Path to the CTranslate2 model directory.""" tokenizer_name: str = "" """Name of the original Hugging Face model needed to load the proper tokenizer.""" device: str = "cpu" """Device to use (possible values are: cpu, cuda, auto).""" device_index: Union[int, List[int]] = 0 """Device IDs where to place this generator on.""" compute_type: Union[str, Dict[str, str]] = "default" """ Model computation type or a dictionary mapping a device name to the computation type (possible values are: default, auto, int8, int8_float32, int8_float16, int8_bfloat16, int16, float16, bfloat16, float32). """ max_length: int = 512 """Maximum generation length.""" sampling_topk: int = 1 """Randomly sample predictions from the top K candidates.""" sampling_topp: float = 1 """Keep the most probable tokens whose cumulative probability exceeds this value.""" sampling_temperature: float = 1 """Sampling temperature to generate more random samples.""" client: Any #: :meta private: tokenizer: Any #: :meta private: ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict) """ Holds any model parameters valid for `ctranslate2.Generator` call not explicitly specified. """ @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: import ctranslate2 except ImportError: raise ImportError( "Could not import ctranslate2 python package. " "Please install it with `pip install ctranslate2`." ) try: import transformers except ImportError: raise ImportError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) values["client"] = ctranslate2.Generator( model_path=values["model_path"], device=values["device"], device_index=values["device_index"], compute_type=values["compute_type"], **values["ctranslate2_kwargs"], ) values["tokenizer"] = transformers.AutoTokenizer.from_pretrained( values["tokenizer_name"] ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters.""" return { "max_length": self.max_length, "sampling_topk": self.sampling_topk, "sampling_topp": self.sampling_topp, "sampling_temperature": self.sampling_temperature, } def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: # build sampling parameters params = {**self._default_params, **kwargs} # call the model encoded_prompts = self.tokenizer(prompts)["input_ids"] tokenized_prompts = [ self.tokenizer.convert_ids_to_tokens(encoded_prompt) for encoded_prompt in encoded_prompts ] results = self.client.generate_batch(tokenized_prompts, **params) sequences = [result.sequences_ids[0] for result in results] decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences] generations = [] for text in decoded_sequences: generations.append([Generation(text=text)]) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "ctranslate2"
[ "input_ids" ]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~tot~controller.py
from typing import Tuple from langchain_experimental.tot.memory import ToTDFSMemory from langchain_experimental.tot.thought import ThoughtValidity class ToTController: """ Tree of Thought (ToT) controller. This is a version of a ToT controller, dubbed in the paper as a "Simple Controller". It has one parameter `c` which is the number of children to explore for each thought. """ def __init__(self, c: int = 3): """ Initialize the controller. Args: c: The number of children to explore at each node. """ self.c = c def __call__(self, memory: ToTDFSMemory) -> Tuple[str, ...]: next_thought = memory.top() parent_thought = memory.top_parent() validity = ( ThoughtValidity.VALID_INTERMEDIATE if next_thought is None else next_thought.validity ) # 1 if the current partial solution is invalid, backtrack to the parent # thought. if validity == ThoughtValidity.INVALID: memory.pop() next_thought = memory.top() if next_thought and len(next_thought.children) >= self.c: memory.pop() # 2 if the current partial solution is valid but C children were # explored and yet failed to find a final solution, backtrack to the # parent thought. elif ( validity == ThoughtValidity.VALID_INTERMEDIATE and parent_thought and len(parent_thought.children) >= self.c ): memory.pop(2) return tuple(thought.text for thought in memory.current_path())
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~playwright~click.py
from __future__ import annotations from typing import Optional, Type from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class ClickToolInput(BaseModel): """Input for ClickTool.""" selector: str = Field(..., description="CSS selector for the element to click") class ClickTool(BaseBrowserTool): """Tool for clicking on an element with the given CSS selector.""" name: str = "click_element" description: str = "Click on an element with the given CSS selector" args_schema: Type[BaseModel] = ClickToolInput visible_only: bool = True """Whether to consider only visible elements.""" playwright_strict: bool = False """Whether to employ Playwright's strict mode when clicking on elements.""" playwright_timeout: float = 1_000 """Timeout (in ms) for Playwright to wait for element to be ready.""" def _selector_effective(self, selector: str) -> str: if not self.visible_only: return selector return f"{selector} >> visible=1" def _run( self, selector: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.sync_api import TimeoutError as PlaywrightTimeoutError try: page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" async def _arun( self, selector: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.async_api import TimeoutError as PlaywrightTimeoutError try: await page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'"
[ "Click on an element with the given CSS selector" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~callbacks~confident_callback.py
# flake8: noqa import os import warnings from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult class DeepEvalCallbackHandler(BaseCallbackHandler): """Callback Handler that logs into deepeval. Args: implementation_name: name of the `implementation` in deepeval metrics: A list of metrics Raises: ImportError: if the `deepeval` package is not installed. Examples: >>> from langchain.llms import OpenAI >>> from langchain.callbacks import DeepEvalCallbackHandler >>> from deepeval.metrics import AnswerRelevancy >>> metric = AnswerRelevancy(minimum_score=0.3) >>> deepeval_callback = DeepEvalCallbackHandler( ... implementation_name="exampleImplementation", ... metrics=[metric], ... ) >>> llm = OpenAI( ... temperature=0, ... callbacks=[deepeval_callback], ... verbose=True, ... openai_api_key="API_KEY_HERE", ... ) >>> llm.generate([ ... "What is the best evaluation tool out there? (no bias at all)", ... ]) "Deepeval, no doubt about it." """ REPO_URL: str = "https://github.com/confident-ai/deepeval" ISSUES_URL: str = f"{REPO_URL}/issues" BLOG_URL: str = "https://docs.confident-ai.com" # noqa: E501 def __init__( self, metrics: List[Any], implementation_name: Optional[str] = None, ) -> None: """Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportError: if the `deepeval` package is not installed. ConnectionError: if the connection to deepeval fails. """ super().__init__() # Import deepeval (not via `import_deepeval` to keep hints in IDEs) try: import deepeval # ignore: F401,I001 except ImportError: raise ImportError( """To use the deepeval callback manager you need to have the `deepeval` Python package installed. Please install it with `pip install deepeval`""" ) if os.path.exists(".deepeval"): warnings.warn( """You are currently not logging anything to the dashboard, we recommend using `deepeval login`.""" ) # Set the deepeval variables self.implementation_name = implementation_name self.metrics = metrics warnings.warn( ( "The `DeepEvalCallbackHandler` is currently in beta and is subject to" " change based on updates to `langchain`. Please report any issues to" f" {self.ISSUES_URL} as an `integration` issue." ), ) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Store the prompts""" self.prompts = prompts def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log records to deepeval when an LLM ends.""" from deepeval.metrics.answer_relevancy import AnswerRelevancy from deepeval.metrics.bias_classifier import UnBiasedMetric from deepeval.metrics.metric import Metric from deepeval.metrics.toxic_classifier import NonToxicMetric for metric in self.metrics: for i, generation in enumerate(response.generations): # Here, we only measure the first generation's output output = generation[0].text query = self.prompts[i] if isinstance(metric, AnswerRelevancy): result = metric.measure( output=output, query=query, ) print(f"Answer Relevancy: {result}") elif isinstance(metric, UnBiasedMetric): score = metric.measure(output) print(f"Bias Score: {score}") elif isinstance(metric, NonToxicMetric): score = metric.measure(output) print(f"Toxic Score: {score}") else: raise ValueError( f"""Metric {metric.__name__} is not supported by deepeval callbacks.""" ) def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM outputs an error.""" pass def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing when chain starts""" pass def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing when chain ends.""" pass def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when LLM chain outputs an error.""" pass def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing when tool outputs an error.""" pass def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing""" pass def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing""" pass
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~embeddings~localai.py
from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]): import openai raise openai.error.APIError("LocalAI API returned an empty embedding") return response def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response) return await _async_embed_with_retry(**kwargs) class LocalAIEmbeddings(BaseModel, Embeddings): """LocalAI embedding models. Since LocalAI and OpenAI have 1:1 compatibility between APIs, this class uses the ``openai`` Python package's ``openai.Embedding`` as its client. Thus, you should have the ``openai`` python package installed, and defeat the environment variable ``OPENAI_API_KEY`` by setting to a random string. You also need to specify ``OPENAI_API_BASE`` to point to your LocalAI service endpoint. Example: .. code-block:: python from langchain.embeddings import LocalAIEmbeddings openai = LocalAIEmbeddings( openai_api_key="random-string", openai_api_base="http://localhost:8080" ) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for LocalAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the LocalAI request.""" headers: Any = None show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )["data"][ 0 ]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # call _embedding_func for each text return [self._embedding_func(text, engine=self.deployment) for text in texts] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = [] for text in texts: response = await self._aembedding_func(text, engine=self.deployment) embeddings.append(response) return embeddings def embed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding async def aembed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~output_parsers~list.py
from __future__ import annotations import re from abc import abstractmethod from typing import List from langchain.schema import BaseOutputParser class ListOutputParser(BaseOutputParser[List[str]]): """Parse the output of an LLM call to a list.""" @property def _type(self) -> str: return "list" @abstractmethod def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" class CommaSeparatedListOutputParser(ListOutputParser): """Parse the output of an LLM call to a comma-separated list.""" @classmethod def is_lc_serializable(cls) -> bool: return True def get_format_instructions(self) -> str: return " ответ напиши через запятую одной строкой!" def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" if ", " not in text and "\n" in text: text = text.replace("\n", ", ") return text.strip().split(", ") @property def _type(self) -> str: return "comma-separated-list" class NumberedListOutputParser(ListOutputParser): """Parse a numbered list.""" def get_format_instructions(self) -> str: return ( "Your response should be a numbered list with each item on a new line. " "For example: \n\n1. foo\n\n2. bar\n\n3. baz" ) def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" pattern = r"\d+\.\s([^\n]+)" # Extract the text of each item matches = re.findall(pattern, text) return matches @property def _type(self) -> str: return "numbered-list" class MarkdownListOutputParser(ListOutputParser): """Parse a markdown list.""" def get_format_instructions(self) -> str: return "Your response should be a markdown list, " "eg: `- foo\n- bar\n- baz`" def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" pattern = r"-\s([^\n]+)" return re.findall(pattern, text) @property def _type(self) -> str: return "markdown-list"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~chains~graph_qa~hugegraph.py
"""Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( CYPHER_QA_PROMPT, GREMLIN_GENERATION_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.hugegraph import HugeGraph from langchain.pydantic_v1 import Field from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel class HugeGraphQAChain(Chain): """Chain for question-answering against a graph by generating gremlin statements. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ graph: HugeGraph = Field(exclude=True) gremlin_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT, **kwargs: Any, ) -> HugeGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt) return cls( qa_chain=qa_chain, gremlin_generation_chain=gremlin_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate gremlin statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_gremlin = self.gremlin_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_gremlin, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_gremlin) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~memory~chat_message_histories~firestore.py
"""Firestore Chat Message History.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, List, Optional from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage, messages_from_dict, messages_to_dict logger = logging.getLogger(__name__) if TYPE_CHECKING: from google.cloud.firestore import Client, DocumentReference def _get_firestore_client() -> Client: try: import firebase_admin from firebase_admin import firestore except ImportError: raise ImportError( "Could not import firebase-admin python package. " "Please install it with `pip install firebase-admin`." ) # For multiple instances, only initialize the app once. try: firebase_admin.get_app() except ValueError as e: logger.debug("Initializing Firebase app: %s", e) firebase_admin.initialize_app() return firestore.client() class FirestoreChatMessageHistory(BaseChatMessageHistory): """Chat message history backed by Google Firestore.""" def __init__( self, collection_name: str, session_id: str, user_id: str, firestore_client: Optional[Client] = None, ): """ Initialize a new instance of the FirestoreChatMessageHistory class. :param collection_name: The name of the collection to use. :param session_id: The session ID for the chat.. :param user_id: The user ID for the chat. """ self.collection_name = collection_name self.session_id = session_id self.user_id = user_id self._document: Optional[DocumentReference] = None self.messages: List[BaseMessage] = [] self.firestore_client = firestore_client or _get_firestore_client() self.prepare_firestore() def prepare_firestore(self) -> None: """Prepare the Firestore client. Use this function to make sure your database is ready. """ self._document = self.firestore_client.collection( self.collection_name ).document(self.session_id) self.load_messages() def load_messages(self) -> None: """Retrieve the messages from Firestore""" if not self._document: raise ValueError("Document not initialized") doc = self._document.get() if doc.exists: data = doc.to_dict() if "messages" in data and len(data["messages"]) > 0: self.messages = messages_from_dict(data["messages"]) def add_message(self, message: BaseMessage) -> None: self.messages.append(message) self.upsert_messages() def upsert_messages(self, new_message: Optional[BaseMessage] = None) -> None: """Update the Firestore document.""" if not self._document: raise ValueError("Document not initialized") self._document.set( { "id": self.session_id, "user_id": self.user_id, "messages": messages_to_dict(self.messages), } ) def clear(self) -> None: """Clear session memory from this memory and Firestore.""" self.messages = [] if self._document: self._document.delete()
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~contextual_compression.py
from typing import Any, List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import BaseRetriever, Document class ContextualCompressionRetriever(BaseRetriever): """Retriever that wraps a base retriever and compresses the results.""" base_compressor: BaseDocumentCompressor """Compressor for compressing retrieved documents.""" base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: Sequence of relevant documents """ docs = self.base_retriever.get_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = self.base_compressor.compress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return [] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ docs = await self.base_retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = await self.base_compressor.acompress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return []
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~embeddings~nlpcloud.py
from typing import Any, Dict, List from langchain.pydantic_v1 import BaseModel, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env class NLPCloudEmbeddings(BaseModel, Embeddings): """NLP Cloud embedding models. To use, you should have the nlpcloud python package installed Example: .. code-block:: python from langchain.embeddings import NLPCloudEmbeddings embeddings = NLPCloudEmbeddings() """ model_name: str # Define model_name as a class attribute gpu: bool # Define gpu as a class attribute client: Any #: :meta private: def __init__( self, model_name: str = "paraphrase-multilingual-mpnet-base-v2", gpu: bool = False, **kwargs: Any ) -> None: super().__init__(model_name=model_name, gpu=gpu, **kwargs) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=values["gpu"], lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using NLP Cloud. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self.client.embeddings(texts)["embeddings"] def embed_query(self, text: str) -> List[float]: """Embed a query using NLP Cloud. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.embeddings([text])["embeddings"][0]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~integration_tests~document_loaders~test_xml.py
import os from pathlib import Path from langchain.document_loaders import UnstructuredXMLLoader EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples" def test_unstructured_xml_loader() -> None: """Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DIRECTORY, "factbook.xml") loader = UnstructuredXMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
[]
2024-01-10
ai-forever/gigachain
libs~experimental~langchain_experimental~graph_transformers~diffbot.py
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import requests from langchain.graphs.graph_document import GraphDocument, Node, Relationship from langchain.schema import Document from langchain.utils import get_from_env def format_property_key(s: str) -> str: words = s.split() if not words: return s first_word = words[0].lower() capitalized_words = [word.capitalize() for word in words[1:]] return "".join([first_word] + capitalized_words) class NodesList: """ Manages a list of nodes with associated properties. Attributes: nodes (Dict[Tuple, Any]): Stores nodes as keys and their properties as values. Each key is a tuple where the first element is the node ID and the second is the node type. """ def __init__(self) -> None: self.nodes: Dict[Tuple[Union[str, int], str], Any] = dict() def add_node_property( self, node: Tuple[Union[str, int], str], properties: Dict[str, Any] ) -> None: """ Adds or updates node properties. If the node does not exist in the list, it's added along with its properties. If the node already exists, its properties are updated with the new values. Args: node (Tuple): A tuple containing the node ID and node type. properties (Dict): A dictionary of properties to add or update for the node. """ if node not in self.nodes: self.nodes[node] = properties else: self.nodes[node].update(properties) def return_node_list(self) -> List[Node]: """ Returns the nodes as a list of Node objects. Each Node object will have its ID, type, and properties populated. Returns: List[Node]: A list of Node objects. """ nodes = [ Node(id=key[0], type=key[1], properties=self.nodes[key]) for key in self.nodes ] return nodes # Properties that should be treated as node properties instead of relationships FACT_TO_PROPERTY_TYPE = [ "Date", "Number", "Job title", "Cause of death", "Organization type", "Academic title", ] schema_mapping = [ ("HEADQUARTERS", "ORGANIZATION_LOCATIONS"), ("RESIDENCE", "PERSON_LOCATION"), ("ALL_PERSON_LOCATIONS", "PERSON_LOCATION"), ("CHILD", "HAS_CHILD"), ("PARENT", "HAS_PARENT"), ("CUSTOMERS", "HAS_CUSTOMER"), ("SKILLED_AT", "INTERESTED_IN"), ] class SimplifiedSchema: """ Provides functionality for working with a simplified schema mapping. Attributes: schema (Dict): A dictionary containing the mapping to simplified schema types. """ def __init__(self) -> None: """Initializes the schema dictionary based on the predefined list.""" self.schema = dict() for row in schema_mapping: self.schema[row[0]] = row[1] def get_type(self, type: str) -> str: """ Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type. """ try: return self.schema[type] except KeyError: return type class DiffbotGraphTransformer: """Transforms documents into graph documents using Diffbot's NLP API. A graph document transformation system takes a sequence of Documents and returns a sequence of Graph Documents. Example: .. code-block:: python class DiffbotGraphTransformer(BaseGraphDocumentTransformer): def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[GraphDocument]: results = [] for document in documents: raw_results = self.nlp_request(document.page_content) graph_document = self.process_response(raw_results, document) results.append(graph_document) return results async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: raise NotImplementedError """ def __init__( self, diffbot_api_key: Optional[str] = None, fact_confidence_threshold: float = 0.7, include_qualifiers: bool = True, include_evidence: bool = True, simplified_schema: bool = True, ) -> None: """ Initialize the graph transformer with various options. Args: diffbot_api_key (str): The API key for Diffbot's NLP services. fact_confidence_threshold (float): Minimum confidence level for facts to be included. include_qualifiers (bool): Whether to include qualifiers in the relationships. include_evidence (bool): Whether to include evidence for the relationships. simplified_schema (bool): Whether to use a simplified schema for relationships. """ self.diffbot_api_key = diffbot_api_key or get_from_env( "diffbot_api_key", "DIFFBOT_API_KEY" ) self.fact_threshold_confidence = fact_confidence_threshold self.include_qualifiers = include_qualifiers self.include_evidence = include_evidence self.simplified_schema = None if simplified_schema: self.simplified_schema = SimplifiedSchema() def nlp_request(self, text: str) -> Dict[str, Any]: """ Make an API request to the Diffbot NLP endpoint. Args: text (str): The text to be processed. Returns: Dict[str, Any]: The JSON response from the API. """ # Relationship extraction only works for English payload = { "content": text, "lang": "en", } FIELDS = "facts" HOST = "nl.diffbot.com" url = ( f"https://{HOST}/v1/?fields={FIELDS}&" f"token={self.diffbot_api_key}&language=en" ) result = requests.post(url, data=payload) return result.json() def process_response( self, payload: Dict[str, Any], document: Document ) -> GraphDocument: """ Transform the Diffbot NLP response into a GraphDocument. Args: payload (Dict[str, Any]): The JSON response from Diffbot's NLP API. document (Document): The original document. Returns: GraphDocument: The transformed document as a graph. """ # Return empty result if there are no facts if "facts" not in payload or not payload["facts"]: return GraphDocument(nodes=[], relationships=[], source=document) # Nodes are a custom class because we need to deduplicate nodes_list = NodesList() # Relationships are a list because we don't deduplicate nor anything else relationships = list() for record in payload["facts"]: # Skip if the fact is below the threshold confidence if record["confidence"] < self.fact_threshold_confidence: continue # TODO: It should probably be treated as a node property if not record["value"]["allTypes"]: continue # Define source node source_id = ( record["entity"]["allUris"][0] if record["entity"]["allUris"] else record["entity"]["name"] ) source_label = record["entity"]["allTypes"][0]["name"].capitalize() source_name = record["entity"]["name"] source_node = Node(id=source_id, type=source_label) nodes_list.add_node_property( (source_id, source_label), {"name": source_name} ) # Define target node target_id = ( record["value"]["allUris"][0] if record["value"]["allUris"] else record["value"]["name"] ) target_label = record["value"]["allTypes"][0]["name"].capitalize() target_name = record["value"]["name"] # Some facts are better suited as node properties if target_label in FACT_TO_PROPERTY_TYPE: nodes_list.add_node_property( (source_id, source_label), {format_property_key(record["property"]["name"]): target_name}, ) else: # Define relationship # Define target node object target_node = Node(id=target_id, type=target_label) nodes_list.add_node_property( (target_id, target_label), {"name": target_name} ) # Define relationship type rel_type = record["property"]["name"].replace(" ", "_").upper() if self.simplified_schema: rel_type = self.simplified_schema.get_type(rel_type) # Relationship qualifiers/properties rel_properties = dict() relationship_evidence = [el["passage"] for el in record["evidence"]][0] if self.include_evidence: rel_properties.update({"evidence": relationship_evidence}) if self.include_qualifiers and record.get("qualifiers"): for property in record["qualifiers"]: prop_key = format_property_key(property["property"]["name"]) rel_properties[prop_key] = property["value"]["name"] relationship = Relationship( source=source_node, target=target_node, type=rel_type, properties=rel_properties, ) relationships.append(relationship) return GraphDocument( nodes=nodes_list.return_node_list(), relationships=relationships, source=document, ) def convert_to_graph_documents( self, documents: Sequence[Document] ) -> List[GraphDocument]: """Convert a sequence of documents into graph documents. Args: documents (Sequence[Document]): The original documents. **kwargs: Additional keyword arguments. Returns: Sequence[GraphDocument]: The transformed documents as graphs. """ results = [] for document in documents: raw_results = self.nlp_request(document.page_content) graph_document = self.process_response(raw_results, document) results.append(graph_document) return results
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~agents~format_scratchpad~log.py
from typing import List, Tuple from langchain.schema.agent import AgentAction def format_log_to_str( intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "Thought: ", ) -> str: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~parsers~language~javascript.py
from typing import Any, List from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter class JavaScriptSegmenter(CodeSegmenter): """Code segmenter for JavaScript.""" def __init__(self, code: str): super().__init__(code) self.source_lines = self.code.splitlines() try: import esprima # noqa: F401 except ImportError: raise ImportError( "Could not import esprima Python package. " "Please install it with `pip install esprima`." ) def is_valid(self) -> bool: import esprima try: esprima.parseScript(self.code) return True except esprima.Error: return False def _extract_code(self, node: Any) -> str: start = node.loc.start.line - 1 end = node.loc.end.line return "\n".join(self.source_lines[start:end]) def extract_functions_classes(self) -> List[str]: import esprima tree = esprima.parseScript(self.code, loc=True) functions_classes = [] for node in tree.body: if isinstance( node, (esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration), ): functions_classes.append(self._extract_code(node)) return functions_classes def simplify_code(self) -> str: import esprima tree = esprima.parseScript(self.code, loc=True) simplified_lines = self.source_lines[:] for node in tree.body: if isinstance( node, (esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration), ): start = node.loc.start.line - 1 simplified_lines[start] = f"// Code for: {simplified_lines[start]}" for line_num in range(start + 1, node.loc.end.line): simplified_lines[line_num] = None # type: ignore return "\n".join(line for line in simplified_lines if line is not None)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~requests.py
"""DEPRECATED: Kept for backwards compatibility.""" from langchain.utilities import Requests, RequestsWrapper, TextRequestsWrapper __all__ = [ "Requests", "RequestsWrapper", "TextRequestsWrapper", ]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~llamacpp.py
from __future__ import annotations import logging from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.pydantic_v1 import Field, root_validator from langchain.schema.output import GenerationChunk from langchain.utils import get_pydantic_field_names from langchain.utils.utils import build_extra_kwargs if TYPE_CHECKING: from llama_cpp import LlamaGrammar logger = logging.getLogger(__name__) class LlamaCpp(LLM): """llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" rope_freq_scale: float = 1.0 """Scale factor for rope sampling.""" rope_freq_base: float = 10000.0 """Base frequency for rope sampling.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Any additional parameters to pass to llama_cpp.Llama.""" streaming: bool = True """Whether to stream the results, token by token.""" grammar_path: Optional[Union[str, Path]] = None """ grammar_path: Path to the .gbnf file that defines formal grammars for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ grammar: Optional[Union[str, LlamaGrammar]] = None """ grammar: formal grammar for constraining model outputs. For instance, the grammar can be used to force the model to generate valid JSON or to speak exclusively in emojis. At most one of grammar_path and grammar should be passed in. """ verbose: bool = True """Print verbose output to stderr.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" try: from llama_cpp import Llama, LlamaGrammar except ImportError: raise ImportError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) model_path = values["model_path"] model_param_names = [ "rope_freq_scale", "rope_freq_base", "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", "verbose", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] model_params.update(values["model_kwargs"]) try: values["client"] = Llama(model_path, **model_params) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) if values["grammar"] and values["grammar_path"]: grammar = values["grammar"] grammar_path = values["grammar_path"] raise ValueError( "Can only pass in one of grammar and grammar_path. Received " f"{grammar=} and {grammar_path=}." ) elif isinstance(values["grammar"], str): values["grammar"] = LlamaGrammar.from_string(values["grammar"]) elif values["grammar_path"]: values["grammar"] = LlamaGrammar.from_file(values["grammar_path"]) else: pass return values @root_validator(pre=True) def build_model_kwargs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) values["model_kwargs"] = build_extra_kwargs( extra, values, all_required_field_names ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" params = { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } if self.grammar: params["grammar"] = self.grammar return params @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llamacpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for chunk in self._stream( prompt=prompt, stop=stop, run_manager=run_manager, **kwargs, ): combined_text_output += chunk.text return combined_text_output else: params = self._get_parameters(stop) params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: """Yields results objects as they are generated in real time. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = {**self._get_parameters(stop), **kwargs} result = self.client(prompt=prompt, stream=True, **params) for part in result: logprobs = part["choices"][0].get("logprobs", None) chunk = GenerationChunk( text=part["choices"][0]["text"], generation_info={"logprobs": logprobs}, ) yield chunk if run_manager: run_manager.on_llm_new_token( token=chunk.text, verbose=self.verbose, log_probs=logprobs ) def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~tests~unit_tests~chat_models~test_gigachat.py
# flake8: noqa: I001 from typing import Any, AsyncGenerator, Iterable, List import pytest from pytest_mock import MockerFixture from gigachat.models import ( ChatCompletion, ChatCompletionChunk, Choices, ChoicesChunk, Messages, MessagesChunk, Usage, ) from langchain.chat_models.gigachat import ( GigaChat, _convert_dict_to_message, _convert_message_to_dict, ) from langchain.schema.messages import ( AIMessage, AIMessageChunk, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from ..callbacks.fake_callback_handler import ( FakeAsyncCallbackHandler, FakeCallbackHandler, ) def test__convert_dict_to_message_system() -> None: message = Messages(role="system", content="foo") expected = SystemMessage(content="foo") actual = _convert_dict_to_message(message) assert actual == expected def test__convert_dict_to_message_human() -> None: message = Messages(role="user", content="foo") expected = HumanMessage(content="foo") actual = _convert_dict_to_message(message) assert actual == expected def test__convert_dict_to_message_ai() -> None: message = Messages(role="assistant", content="foo") expected = AIMessage(content="foo") actual = _convert_dict_to_message(message) assert actual == expected def test__convert_dict_to_message_type_error() -> None: message = Messages(role="user", content="foo") message.role = "bar" with pytest.raises(TypeError): _convert_dict_to_message(message) def test__convert_message_to_dict_system() -> None: message = SystemMessage(content="foo") expected = Messages(role="system", content="foo") actual = _convert_message_to_dict(message) assert actual == expected def test__convert_message_to_dict_human() -> None: message = HumanMessage(content="foo") expected = Messages(role="user", content="foo") actual = _convert_message_to_dict(message) assert actual == expected def test__convert_message_to_dict_ai() -> None: message = AIMessage(content="foo") expected = Messages(role="assistant", content="foo") actual = _convert_message_to_dict(message) assert actual == expected @pytest.mark.parametrize("role", ("system", "user", "assistant")) def test__convert_message_to_dict_chat(role: str) -> None: message = ChatMessage(role=role, content="foo") expected = Messages(role=role, content="foo") actual = _convert_message_to_dict(message) assert actual == expected def test__convert_message_to_dict_type_error() -> None: message = FunctionMessage(name="bar", content="foo") with pytest.raises(TypeError): _convert_message_to_dict(message) @pytest.fixture def chat_completion() -> ChatCompletion: return ChatCompletion( choices=[ Choices( message=Messages( role="assistant", content="Bar Baz", ), index=0, finish_reason="stop", ), ], created=1678878333, model="GigaChat:v1.2.19.2", usage=Usage( prompt_tokens=18, completion_tokens=68, total_tokens=86, ), object="chat.completion", ) @pytest.fixture def chat_completion_stream() -> List[ChatCompletionChunk]: return [ ChatCompletionChunk( choices=[ ChoicesChunk( delta=MessagesChunk(content="Bar Baz"), index=0, ), ], created=1695802242, model="GigaChat:v1.2.19.2", object="chat.completion", ), ChatCompletionChunk( choices=[ ChoicesChunk( delta=MessagesChunk(content=" Stream"), index=0, finish_reason="stop", ), ], created=1695802242, model="GigaChat:v1.2.19.2", object="chat.completion", ), ] @pytest.fixture def patch_gigachat( mocker: MockerFixture, chat_completion: ChatCompletion, chat_completion_stream: List[ChatCompletionChunk], ) -> None: print(type(mocker)) mock = mocker.Mock() mock.chat.return_value = chat_completion mock.stream.return_value = chat_completion_stream mocker.patch("gigachat.GigaChat", return_value=mock) @pytest.fixture def patch_gigachat_achat( mocker: MockerFixture, chat_completion: ChatCompletion ) -> None: async def return_value_coroutine(value: Any) -> Any: return value mock = mocker.Mock() mock.achat.return_value = return_value_coroutine(chat_completion) mocker.patch("gigachat.GigaChat", return_value=mock) @pytest.fixture def patch_gigachat_astream( mocker: MockerFixture, chat_completion_stream: List[ChatCompletionChunk] ) -> None: async def return_value_async_generator(value: Iterable) -> AsyncGenerator: for chunk in value: yield chunk mock = mocker.Mock() mock.astream.return_value = return_value_async_generator(chat_completion_stream) mocker.patch("gigachat.GigaChat", return_value=mock) def test_gigachat_predict(patch_gigachat: None) -> None: expected = "Bar Baz" llm = GigaChat() actual = llm.predict("bar") assert actual == expected def test_gigachat_predict_stream(patch_gigachat: None) -> None: expected = "Bar Baz Stream" llm = GigaChat() callback_handler = FakeCallbackHandler() actual = llm.predict("bar", stream=True, callbacks=[callback_handler]) assert actual == expected assert callback_handler.llm_streams == 2 @pytest.mark.asyncio() async def test_gigachat_apredict(patch_gigachat_achat: None) -> None: expected = "Bar Baz" llm = GigaChat() actual = await llm.apredict("bar") assert actual == expected @pytest.mark.asyncio() async def test_gigachat_apredict_stream(patch_gigachat_astream: None) -> None: expected = "Bar Baz Stream" llm = GigaChat() callback_handler = FakeAsyncCallbackHandler() actual = await llm.apredict("bar", stream=True, callbacks=[callback_handler]) assert actual == expected assert callback_handler.llm_streams == 2 def test_gigachat_stream(patch_gigachat: None) -> None: expected = [AIMessageChunk(content="Bar Baz"), AIMessageChunk(content=" Stream")] llm = GigaChat() actual = [chunk for chunk in llm.stream("bar")] assert actual == expected @pytest.mark.asyncio() async def test_gigachat_astream(patch_gigachat_astream: None) -> None: expected = [AIMessageChunk(content="Bar Baz"), AIMessageChunk(content=" Stream")] llm = GigaChat() actual = [chunk async for chunk in llm.astream("bar")] assert actual == expected
[ "foo" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~snowflake_loader.py
from __future__ import annotations from typing import Any, Dict, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class SnowflakeLoader(BaseLoader): """Load from `Snowflake` API. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, user: str, password: str, account: str, warehouse: str, role: str, database: str, schema: str, parameters: Optional[Dict[str, Any]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): """Initialize Snowflake document loader. Args: query: The query to run in Snowflake. user: Snowflake user. password: Snowflake password. account: Snowflake account. warehouse: Snowflake warehouse. role: Snowflake role. database: Snowflake database schema: Snowflake schema parameters: Optional. Parameters to pass to the query. page_content_columns: Optional. Columns written to Document `page_content`. metadata_columns: Optional. Columns written to Document `metadata`. """ self.query = query self.user = user self.password = password self.account = account self.warehouse = warehouse self.role = role self.database = database self.schema = schema self.parameters = parameters self.page_content_columns = ( page_content_columns if page_content_columns is not None else ["*"] ) self.metadata_columns = metadata_columns if metadata_columns is not None else [] def _execute_query(self) -> List[Dict[str, Any]]: try: import snowflake.connector except ImportError as ex: raise ImportError( "Could not import snowflake-connector-python package. " "Please install it with `pip install snowflake-connector-python`." ) from ex conn = snowflake.connector.connect( user=self.user, password=self.password, account=self.account, warehouse=self.warehouse, role=self.role, database=self.database, schema=self.schema, parameters=self.parameters, ) try: cur = conn.cursor() cur.execute("USE DATABASE " + self.database) cur.execute("USE SCHEMA " + self.schema) cur.execute(self.query, self.parameters) query_result = cur.fetchall() column_names = [column[0] for column in cur.description] query_result = [dict(zip(column_names, row)) for row in query_result] except Exception as e: print(f"An error occurred: {e}") query_result = [] finally: cur.close() return query_result def _get_columns( self, query_result: List[Dict[str, Any]] ) -> Tuple[List[str], List[str]]: page_content_columns = ( self.page_content_columns if self.page_content_columns else [] ) metadata_columns = self.metadata_columns if self.metadata_columns else [] if page_content_columns is None and query_result: page_content_columns = list(query_result[0].keys()) if metadata_columns is None: metadata_columns = [] return page_content_columns or [], metadata_columns def lazy_load(self) -> Iterator[Document]: query_result = self._execute_query() if isinstance(query_result, Exception): print(f"An error occurred during the query: {query_result}") return [] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc def load(self) -> List[Document]: """Load data into document objects.""" return list(self.lazy_load())
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~news.py
"""Loader that uses unstructured to load HTML files.""" import logging from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class NewsURLLoader(BaseLoader): """Load news articles from URLs using `Unstructured`. Args: urls: URLs to load. Each is loaded into its own document. text_mode: If True, extract text from URL and use that for page content. Otherwise, extract raw HTML. nlp: If True, perform NLP on the extracted contents, like providing a summary and extracting keywords. continue_on_failure: If True, continue loading documents even if loading fails for a particular URL. show_progress_bar: If True, use tqdm to show a loading progress bar. Requires tqdm to be installed, ``pip install tqdm``. **newspaper_kwargs: Any additional named arguments to pass to newspaper.Article(). Example: .. code-block:: python from langchain.document_loaders import NewsURLLoader loader = NewsURLLoader( urls=["<url-1>", "<url-2>"], ) docs = loader.load() Newspaper reference: https://newspaper.readthedocs.io/en/latest/ """ def __init__( self, urls: List[str], text_mode: bool = True, nlp: bool = False, continue_on_failure: bool = True, show_progress_bar: bool = False, **newspaper_kwargs: Any, ) -> None: """Initialize with file path.""" try: import newspaper # noqa:F401 self.__version = newspaper.__version__ except ImportError: raise ImportError( "newspaper package not found, please install it with " "`pip install newspaper3k`" ) self.urls = urls self.text_mode = text_mode self.nlp = nlp self.continue_on_failure = continue_on_failure self.newspaper_kwargs = newspaper_kwargs self.show_progress_bar = show_progress_bar def load(self) -> List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter) def lazy_load(self) -> Iterator[Document]: try: from newspaper import Article except ImportError as e: raise ImportError( "Cannot import newspaper, please install with `pip install newspaper3k`" ) from e for url in self.urls: try: article = Article(url, **self.newspaper_kwargs) article.download() article.parse() if self.nlp: article.nlp() except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") continue else: raise e metadata = { "title": getattr(article, "title", ""), "link": getattr(article, "url", getattr(article, "canonical_link", "")), "authors": getattr(article, "authors", []), "language": getattr(article, "meta_lang", ""), "description": getattr(article, "meta_description", ""), "publish_date": getattr(article, "publish_date", ""), } if self.text_mode: content = article.text else: content = article.html if self.nlp: metadata["keywords"] = getattr(article, "keywords", []) metadata["summary"] = getattr(article, "summary", "") yield Document(page_content=content, metadata=metadata)
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~vectorstores~zilliz.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain.schema.embeddings import Embeddings from langchain.vectorstores.milvus import Milvus logger = logging.getLogger(__name__) class Zilliz(Milvus): """`Zilliz` vector store. You need to have `pymilvus` installed and a running Zilliz database. See the following documentation for how to run a Zilliz instance: https://docs.zilliz.com/docs/create-cluster IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. Args: embedding_function (Embeddings): Function used to embed the text. collection_name (str): Which Zilliz collection to use. Defaults to "LangChainCollection". connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. consistency_level (str): The consistency level to use for a collection. Defaults to "Session". index_params (Optional[dict]): Which index params to use. Defaults to HNSW/AUTOINDEX depending on service. search_params (Optional[dict]): Which search params to use. Defaults to default of index. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. The connection args used for this class comes in the form of a dict, here are a few of the options: address (str): The actual address of Zilliz instance. Example address: "localhost:19530" uri (str): The uri of Zilliz instance. Example uri: "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", host (str): The host of Zilliz instance. Default at "localhost", PyMilvus will fill in the default host if only port is provided. port (str/int): The port of Zilliz instance. Default at 19530, PyMilvus will fill in the default port if only host is provided. user (str): Use which user to connect to Zilliz instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. token (str): API key, for serverless clusters which can be used as replacements for user and password. secure (bool): Default is false. If set to true, tls will be enabled. client_key_path (str): If use tls two-way authentication, need to write the client.key path. client_pem_path (str): If use tls two-way authentication, need to write the client.pem path. ca_pem_path (str): If use tls two-way authentication, need to write the ca.pem path. server_pem_path (str): If use tls one-way authentication, need to write the server.pem path. server_name (str): If use tls, need to write the common name. Example: .. code-block:: python from langchain.vectorstores import Zilliz from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a Zilliz instance milvus_store = Milvus( embedding_function = embedding, collection_name = "LangChainCollection", connection_args = { "uri": "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", "user": "temp", "password": "temp", "token": "temp", # API key as replacements for user and password "secure": True } drop_old: True, ) Raises: ValueError: If the pymilvus python package is not installed. """ def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default AutoIndex based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely Milvus self-hosted except MilvusException: # Use HNSW based index self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name ) raise e @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: Optional[Dict[str, Any]] = None, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Zilliz: """Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Zilliz: Zilliz Vector Store """ vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args or {}, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~html_bs.py
import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class BSHTMLLoader(BaseLoader): """Load `HTML` files and parse them with `beautiful soup`.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: The path to the file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when calling get_text on the soup. """ try: import bs4 # noqa:F401 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator def load(self) -> List[Document]: """Load HTML document into document objects.""" from bs4 import BeautifulSoup with open(self.file_path, "r", encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~github.py
from abc import ABC from datetime import datetime from typing import Dict, Iterator, List, Literal, Optional, Union import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.pydantic_v1 import BaseModel, root_validator, validator from langchain.utils import get_from_dict_or_env class BaseGitHubLoader(BaseLoader, BaseModel, ABC): """Load `GitHub` repository Issues.""" repo: str """Name of repository""" access_token: str """Personal access token - see https://github.com/settings/tokens?type=beta""" github_api_url: str = "https://api.github.com" """URL of GitHub API""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that access token exists in environment.""" values["access_token"] = get_from_dict_or_env( values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN" ) return values @property def headers(self) -> Dict[str, str]: return { "Accept": "application/vnd.github+json", "Authorization": f"Bearer {self.access_token}", } class GitHubIssuesLoader(BaseGitHubLoader): """Load issues of a GitHub repository.""" include_prs: bool = True """If True include Pull Requests in results, otherwise ignore them.""" milestone: Union[int, Literal["*", "none"], None] = None """If integer is passed, it should be a milestone's number field. If the string '*' is passed, issues with any milestone are accepted. If the string 'none' is passed, issues without milestones are returned. """ state: Optional[Literal["open", "closed", "all"]] = None """Filter on issue state. Can be one of: 'open', 'closed', 'all'.""" assignee: Optional[str] = None """Filter on assigned user. Pass 'none' for no user and '*' for any user.""" creator: Optional[str] = None """Filter on the user that created the issue.""" mentioned: Optional[str] = None """Filter on a user that's mentioned in the issue.""" labels: Optional[List[str]] = None """Label names to filter one. Example: bug,ui,@high.""" sort: Optional[Literal["created", "updated", "comments"]] = None """What to sort results by. Can be one of: 'created', 'updated', 'comments'. Default is 'created'.""" direction: Optional[Literal["asc", "desc"]] = None """The direction to sort the results by. Can be one of: 'asc', 'desc'.""" since: Optional[str] = None """Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.""" @validator("since") def validate_since(cls, v: Optional[str]) -> Optional[str]: if v: try: datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ") except ValueError: raise ValueError( "Invalid value for 'since'. Expected a date string in " f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}" ) return v def lazy_load(self) -> Iterator[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ url: Optional[str] = self.url while url: response = requests.get(url, headers=self.headers) response.raise_for_status() issues = response.json() for issue in issues: doc = self.parse_issue(issue) if not self.include_prs and doc.metadata["is_pull_request"]: continue yield doc if response.links and response.links.get("next"): url = response.links["next"]["url"] else: url = None def load(self) -> List[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ return list(self.lazy_load()) def parse_issue(self, issue: dict) -> Document: """Create Document objects from a list of GitHub issues.""" metadata = { "url": issue["html_url"], "title": issue["title"], "creator": issue["user"]["login"], "created_at": issue["created_at"], "comments": issue["comments"], "state": issue["state"], "labels": [label["name"] for label in issue["labels"]], "assignee": issue["assignee"]["login"] if issue["assignee"] else None, "milestone": issue["milestone"]["title"] if issue["milestone"] else None, "locked": issue["locked"], "number": issue["number"], "is_pull_request": "pull_request" in issue, } content = issue["body"] if issue["body"] is not None else "" return Document(page_content=content, metadata=metadata) @property def query_params(self) -> str: """Create query parameters for GitHub API.""" labels = ",".join(self.labels) if self.labels else self.labels query_params_dict = { "milestone": self.milestone, "state": self.state, "assignee": self.assignee, "creator": self.creator, "mentioned": self.mentioned, "labels": labels, "sort": self.sort, "direction": self.direction, "since": self.since, } query_params_list = [ f"{k}={v}" for k, v in query_params_dict.items() if v is not None ] query_params = "&".join(query_params_list) return query_params @property def url(self) -> str: """Create URL for GitHub API.""" return f"{self.github_api_url}/repos/{self.repo}/issues?{self.query_params}"
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~tools~playwright~current_page.py
from __future__ import annotations from typing import Optional, Type from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.pydantic_v1 import BaseModel from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page class CurrentWebPageTool(BaseBrowserTool): """Tool for getting the URL of the current webpage.""" name: str = "current_webpage" description: str = "Returns the URL of the current page" args_schema: Type[BaseModel] = BaseModel def _run( self, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) return str(page.url) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) return str(page.url)
[ "Returns the URL of the current page" ]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~vespa_retriever.py
from __future__ import annotations import json from typing import Any, Dict, List, Literal, Optional, Sequence, Union from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document class VespaRetriever(BaseRetriever): """`Vespa` retriever.""" app: Any """Vespa application to query.""" body: Dict """Body of the query.""" content_field: str """Name of the content field.""" metadata_fields: Sequence[str] """Names of the metadata fields.""" def _query(self, body: Dict) -> List[Document]: response = self.app.query(body) if not str(response.status_code).startswith("2"): raise RuntimeError( "Could not retrieve data from Vespa. Error code: {}".format( response.status_code ) ) root = response.json["root"] if "errors" in root: raise RuntimeError(json.dumps(root["errors"])) docs = [] for child in response.hits: page_content = child["fields"].pop(self.content_field, "") if self.metadata_fields == "*": metadata = child["fields"] else: metadata = {mf: child["fields"].get(mf) for mf in self.metadata_fields} metadata["id"] = child["id"] docs.append(Document(page_content=page_content, metadata=metadata)) return docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: body = self.body.copy() body["query"] = query return self._query(body) def get_relevant_documents_with_filter( self, query: str, *, _filter: Optional[str] = None ) -> List[Document]: body = self.body.copy() _filter = f" and {_filter}" if _filter else "" body["yql"] = body["yql"] + _filter body["query"] = query return self._query(body) @classmethod def from_params( cls, url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal["*"]] = (), sources: Union[Sequence[str], Literal["*"], None] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any, ) -> VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "pyvespa is not installed, please install with `pip install pyvespa`" ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( "yql should only be specified if both sources and _filter are not " "specified." ) else: if metadata_fields == "*": _fields = "*" body["summary"] = "short" else: _fields = ", ".join([content_field] + list(metadata_fields or [])) _sources = ", ".join(sources) if isinstance(sources, Sequence) else "*" _filter = f" and {_filter}" if _filter else "" yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}" body["yql"] = yql if k: body["hits"] = k return cls( app=app, body=body, content_field=content_field, metadata_fields=metadata_fields, )
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~llms~minimax.py
"""Wrapper around Minimax APIs.""" from __future__ import annotations import logging from typing import ( Any, Dict, List, Optional, ) import requests from langchain.callbacks.manager import ( CallbackManagerForLLMRun, ) from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import BaseModel, Field, root_validator from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class _MinimaxEndpointClient(BaseModel): """An API client that talks to a Minimax llm endpoint.""" host: str group_id: str api_key: str api_url: str @root_validator(pre=True, allow_reuse=True) def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] group_id = values["group_id"] api_url = f"{host}/v1/text/chatcompletion?GroupId={group_id}" values["api_url"] = api_url return values def post(self, request: Any) -> Any: headers = {"Authorization": f"Bearer {self.api_key}"} response = requests.post(self.api_url, headers=headers, json=request) # TODO: error handling and automatic retries if not response.ok: raise ValueError(f"HTTP {response.status_code} error: {response.text}") if response.json()["base_resp"]["status_code"] > 0: raise ValueError( f"API {response.json()['base_resp']['status_code']}" f" error: {response.json()['base_resp']['status_msg']}" ) return response.json()["reply"] class MinimaxCommon(BaseModel): """Common parameters for Minimax large language models.""" _client: _MinimaxEndpointClient model: str = "abab5.5-chat" """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.7 """A non-negative float that tunes the degree of randomness in generation.""" top_p: float = 0.95 """Total probability mass of tokens to consider at each step.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" minimax_api_host: Optional[str] = None minimax_group_id: Optional[str] = None minimax_api_key: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["minimax_api_key"] = get_from_dict_or_env( values, "minimax_api_key", "MINIMAX_API_KEY" ) values["minimax_group_id"] = get_from_dict_or_env( values, "minimax_group_id", "MINIMAX_GROUP_ID" ) # Get custom api url from environment. values["minimax_api_host"] = get_from_dict_or_env( values, "minimax_api_host", "MINIMAX_API_HOST", default="https://api.minimax.chat", ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model, "tokens_to_generate": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, **self.model_kwargs, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "minimax" def __init__(self, **data: Any): super().__init__(**data) self._client = _MinimaxEndpointClient( host=self.minimax_api_host, api_key=self.minimax_api_key, group_id=self.minimax_group_id, ) class Minimax(MinimaxCommon, LLM): """Wrapper around Minimax large language models. To use, you should have the environment variable ``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key, or pass them as a named parameter to the constructor. Example: . code-block:: python from langchain.llms.minimax import Minimax minimax = Minimax(model="<model_name>", minimax_api_key="my-api-key", minimax_group_id="my-group-id") """ def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: r"""Call out to Minimax's completion endpoint to chat Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = minimax("Tell me a joke.") """ request = self._default_params request["messages"] = [{"sender_type": "USER", "text": prompt}] request.update(kwargs) text = self._client.post(request) if stop is not None: # This is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~vectorstores~rocksetdb.py
from __future__ import annotations import logging from enum import Enum from typing import Any, Iterable, List, Optional, Tuple from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore logger = logging.getLogger(__name__) class Rockset(VectorStore): """`Rockset` vector store. To use, you should have the `rockset` python package installed. Note that to use this, the collection being used must already exist in your Rockset instance. You must also ensure you use a Rockset ingest transformation to apply `VECTOR_ENFORCE` on the column being used to store `embedding_key` in the collection. See: https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details Everything below assumes `commons` Rockset workspace. Example: .. code-block:: python from langchain.vectorstores import Rockset from langchain.embeddings.openai import OpenAIEmbeddings import rockset # Make sure you use the right host (region) for your Rockset instance # and APIKEY has both read-write access to your collection. rs = rockset.RocksetClient(host=rockset.Regions.use1a1, api_key="***") collection_name = "langchain_demo" embeddings = OpenAIEmbeddings() vectorstore = Rockset(rs, collection_name, embeddings, "description", "description_embedding") """ def __init__( self, client: Any, embeddings: Embeddings, collection_name: str, text_key: str, embedding_key: str, workspace: str = "commons", ): """Initialize with Rockset client. Args: client: Rockset client object collection: Rockset collection to insert docs / query embeddings: Langchain Embeddings object to use to generate embedding for given text. text_key: column in Rockset collection to use to store the text embedding_key: column in Rockset collection to use to store the embedding. Note: We must apply `VECTOR_ENFORCE()` on this column via Rockset ingest transformation. """ try: from rockset import RocksetClient except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) # TODO: check that `collection_name` exists in rockset. Create if not. self._client = client self._collection_name = collection_name self._embeddings = embeddings self._text_key = text_key self._embedding_key = embedding_key self._workspace = workspace try: self._client.set_application("langchain") except AttributeError: # ignore pass @property def embeddings(self) -> Embeddings: return self._embeddings def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. batch_size: Send documents in batches to rockset. Returns: List of ids from adding the texts into the vectorstore. """ batch: list[dict] = [] stored_ids = [] for i, text in enumerate(texts): if len(batch) == batch_size: stored_ids += self._write_documents_to_rockset(batch) batch = [] doc = {} if metadatas and len(metadatas) > i: doc = metadatas[i] if ids and len(ids) > i: doc["_id"] = ids[i] doc[self._text_key] = text doc[self._embedding_key] = self._embeddings.embed_query(text) batch.append(doc) if len(batch) > 0: stored_ids += self._write_documents_to_rockset(batch) batch = [] return stored_ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, collection_name: str = "", text_key: str = "", embedding_key: str = "", ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Rockset: """Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ # Sanitize inputs assert client is not None, "Rockset Client cannot be None" assert collection_name, "Collection name cannot be empty" assert text_key, "Text key name cannot be empty" assert embedding_key, "Embedding key cannot be empty" rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset # Rockset supports these vector distance functions. class DistanceFunction(Enum): COSINE_SIM = "COSINE_SIM" EUCLIDEAN_DIST = "EUCLIDEAN_DIST" DOT_PRODUCT = "DOT_PRODUCT" # how to sort results for "similarity" def order_by(self) -> str: if self.value == "EUCLIDEAN_DIST": return "ASC" return "DESC" def similarity_search_with_relevance_scores( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Same as `similarity_search_with_relevance_scores` but doesn't return the scores. """ return self.similarity_search_by_vector( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Accepts a query_embedding (vector), and returns documents with similar embeddings along with their relevance scores.""" q_str = self._build_query_sql(embedding, distance_func, k, where_str) try: query_response = self._client.Queries.query(sql={"query": q_str}) except Exception as e: logger.error("Exception when querying Rockset: %s\n", e) return [] finalResult: list[Tuple[Document, float]] = [] for document in query_response.results: metadata = {} assert isinstance( document, dict ), "document should be of type `dict[str,Any]`. But found: `{}`".format( type(document) ) for k, v in document.items(): if k == self._text_key: assert isinstance( v, str ), "page content stored in column `{}` must be of type `str`. \ But found: `{}`".format( self._text_key, type(v) ) page_content = v elif k == "dist": assert isinstance( v, float ), "Computed distance between vectors must of type `float`. \ But found {}".format( type(v) ) score = v elif k not in ["_id", "_event_time", "_meta"]: # These columns are populated by Rockset when documents are # inserted. No need to return them in metadata dict. metadata[k] = v finalResult.append( (Document(page_content=page_content, metadata=metadata), score) ) return finalResult # Helper functions def _build_query_sql( self, query_embedding: List[float], distance_func: DistanceFunction, k: int = 4, where_str: Optional[str] = None, ) -> str: """Builds Rockset SQL query to query similar vectors to query_vector""" q_embedding_str = ",".join(map(str, query_embedding)) distance_str = f"""{distance_func.value}({self._embedding_key}, \ [{q_embedding_str}]) as dist""" where_str = f"WHERE {where_str}\n" if where_str else "" return f"""\ SELECT * EXCEPT({self._embedding_key}), {distance_str} FROM {self._workspace}.{self._collection_name} {where_str}\ ORDER BY dist {distance_func.order_by()} LIMIT {str(k)} """ def _write_documents_to_rockset(self, batch: List[dict]) -> List[str]: add_doc_res = self._client.Documents.add_documents( collection=self._collection_name, data=batch, workspace=self._workspace ) return [doc_status._id for doc_status in add_doc_res.data] def delete_texts(self, ids: List[str]) -> None: """Delete a list of docs from the Rockset collection""" try: from rockset.models import DeleteDocumentsRequestData except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) self._client.Documents.delete_documents( collection=self._collection_name, data=[DeleteDocumentsRequestData(id=i) for i in ids], workspace=self._workspace, )
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~parsers~txt.py
"""Module for parsing text files..""" from typing import Iterator from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document class TextParser(BaseBlobParser): """Parser for text blobs.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~document_loaders~tencent_cos_directory.py
from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader class TencentCOSDirectoryLoader(BaseLoader): """Load from `Tencent Cloud COS` directory.""" def __init__(self, conf: Any, bucket: str, prefix: str = ""): """Initialize with COS config, bucket and prefix. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param prefix(str): prefix. """ self.conf = conf self.bucket = bucket self.prefix = prefix def load(self) -> List[Document]: return list(self.lazy_load()) def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ImportError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) client = CosS3Client(self.conf) contents = [] marker = "" while True: response = client.list_objects( Bucket=self.bucket, Prefix=self.prefix, Marker=marker, MaxKeys=1000 ) if "Contents" in response: contents.extend(response["Contents"]) if response["IsTruncated"] == "false": break marker = response["NextMarker"] for content in contents: if content["Key"].endswith("/"): continue loader = TencentCOSFileLoader(self.conf, self.bucket, content["Key"]) yield loader.load()[0]
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~agents~types.py
from typing import Dict, Type, Union from langchain.agents.agent import BaseSingleActionAgent from langchain.agents.agent_types import AgentType from langchain.agents.chat.base import ChatAgent from langchain.agents.conversational.base import ConversationalAgent from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent from langchain.agents.react.base import ReActDocstoreAgent from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent from langchain.agents.structured_chat.base import StructuredChatAgent AGENT_TYPE = Union[Type[BaseSingleActionAgent], Type[OpenAIMultiFunctionsAgent]] AGENT_TO_CLASS: Dict[AgentType, AGENT_TYPE] = { AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent, AgentType.REACT_DOCSTORE: ReActDocstoreAgent, AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent, AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent, AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent, AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent, AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION: StructuredChatAgent, AgentType.OPENAI_FUNCTIONS: OpenAIFunctionsAgent, AgentType.OPENAI_MULTI_FUNCTIONS: OpenAIMultiFunctionsAgent, }
[]
2024-01-10
ai-forever/gigachain
libs~langchain~langchain~retrievers~arxiv.py
from typing import List from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.utilities.arxiv import ArxivAPIWrapper class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """`Arxiv` retriever. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load(query=query)
[]