date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~playwright~get_elements.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
class GetElementsTool(BaseBrowserTool):
"""Tool for getting elements in the current web page matching a CSS selector."""
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
async def _arun(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~graphql.py | import json
from typing import Any, Callable, Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
class GraphQLAPIWrapper(BaseModel):
"""Wrapper around GraphQL API.
To use, you should have the ``gql`` python package installed.
This wrapper will use the GraphQL API to conduct queries.
"""
custom_headers: Optional[Dict[str, str]] = None
graphql_endpoint: str
gql_client: Any #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
"Could not import gql python package. "
f"Try installing it with `pip install gql`. Received error: {e}"
)
headers = values.get("custom_headers")
transport = RequestsHTTPTransport(
url=values["graphql_endpoint"],
headers=headers,
)
client = Client(transport=transport, fetch_schema_from_transport=True)
values["gql_client"] = client
values["gql_function"] = gql
return values
def run(self, query: str) -> str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2)
def _execute_query(self, query: str) -> Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~bageldb.py | from __future__ import annotations
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
if TYPE_CHECKING:
import bagel
import bagel.config
from bagel.api.types import ID, OneOrMany, Where, WhereDocument
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import xor_args
DEFAULT_K = 5
def _results_to_docs(results: Any) -> List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Bagel(VectorStore):
"""``BagelDB.ai`` vector store.
To use, you should have the ``betabageldb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Bagel
vectorstore = Bagel(cluster_name="langchain_store")
"""
_LANGCHAIN_DEFAULT_CLUSTER_NAME = "langchain"
def __init__(
self,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
embedding_function: Optional[Embeddings] = None,
cluster_metadata: Optional[Dict] = None,
client: Optional[bagel.Client] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with bagel client"""
try:
import bagel
import bagel.config
except ImportError:
raise ImportError("Please install bagel `pip install betabageldb`.")
if client is not None:
self._client_settings = client_settings
self._client = client
else:
if client_settings:
_client_settings = client_settings
else:
_client_settings = bagel.config.Settings(
bagel_api_impl="rest",
bagel_server_host="api.bageldb.ai",
)
self._client_settings = _client_settings
self._client = bagel.Client(_client_settings)
self._cluster = self._client.get_or_create_cluster(
name=cluster_name,
metadata=cluster_metadata,
)
self.override_relevance_score_fn = relevance_score_fn
self._embedding_function = embedding_function
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_cluster(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Query the BagelDB cluster based on the provided parameters."""
try:
import bagel # noqa: F401
except ImportError:
raise ImportError("Please install bagel `pip install betabageldb`.")
return self._cluster.find(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
**kwargs,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> List[str]:
"""
Add texts along with their corresponding embeddings and optional
metadata to the BagelDB cluster.
Args:
texts (Iterable[str]): Texts to be added.
embeddings (Optional[List[float]]): List of embeddingvectors
metadatas (Optional[List[dict]]): Optional list of metadatas.
ids (Optional[List[str]]): List of unique ID for the texts.
Returns:
List[str]: List of unique ID representing the added texts.
"""
# creating unique ids if None
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
texts = list(texts)
if self._embedding_function and embeddings is None and texts:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, metadata in enumerate(metadatas):
if metadata:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
self._cluster.upsert(
embeddings=embeddings_with_metadatas,
metadatas=metadatas,
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._cluster.upsert(
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
metadatas = [{}] * len(texts)
self._cluster.upsert(
embeddings=embeddings,
documents=texts,
metadatas=metadatas,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""
Run a similarity search with BagelDB.
Args:
query (str): The query text to search for similar documents/texts.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Metadata filters to narrow down.
Returns:
List[Document]: List of documents objects representing
the documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, where=where)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run a similarity search with BagelDB and return documents with their
corresponding similarity scores.
Args:
query (str): The query text to search for similar documents.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Filter using metadata.
Returns:
List[Tuple[Document, float]]: List of tuples, each containing a
Document object representing a similar document and its
corresponding similarity score.
"""
results = self.__query_cluster(query_texts=[query], n_results=k, where=where)
return _results_to_docs_and_scores(results)
@classmethod
def from_texts(
cls: Type[Bagel],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
cluster_metadata: Optional[Dict] = None,
client: Optional[bagel.Client] = None,
text_embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> Bagel:
"""
Create and initialize a Bagel instance from list of texts.
Args:
texts (List[str]): List of text content to be added.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
cluster_metadata (Optional[Dict]): Metadata of the cluster.
embeddings (Optional[Embeddings]): List of embedding.
metadatas (Optional[List[dict]]): List of metadata.
ids (Optional[List[str]]): List of unique ID. Defaults to None.
client (Optional[bagel.Client]): Bagel client instance.
Returns:
Bagel: Bagel vectorstore.
"""
bagel_cluster = cls(
cluster_name=cluster_name,
embedding_function=embedding,
client_settings=client_settings,
client=client,
cluster_metadata=cluster_metadata,
**kwargs,
)
_ = bagel_cluster.add_texts(
texts=texts, embeddings=text_embeddings, metadatas=metadatas, ids=ids
)
return bagel_cluster
def delete_cluster(self) -> None:
"""Delete the cluster."""
self._client.delete_cluster(self._cluster.name)
def similarity_search_by_vector_with_relevance_scores(
self,
query_embeddings: List[float],
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
"""
results = self.__query_cluster(
query_embeddings=query_embeddings, n_results=k, where=where
)
return _results_to_docs_and_scores(results)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
where: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector."""
results = self.__query_cluster(
query_embeddings=embedding, n_results=k, where=where
)
return _results_to_docs(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
Select and return the appropriate relevance score function based
on the distance metric used in the BagelDB cluster.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._cluster.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function for distance"
f" metric of type: {distance}. Consider providing"
" relevance_score_fn to Bagel constructor."
)
@classmethod
def from_documents(
cls: Type[Bagel],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
cluster_name: str = _LANGCHAIN_DEFAULT_CLUSTER_NAME,
client_settings: Optional[bagel.config.Settings] = None,
client: Optional[bagel.Client] = None,
cluster_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Bagel:
"""
Create a Bagel vectorstore from a list of documents.
Args:
documents (List[Document]): List of Document objects to add to the
Bagel vectorstore.
embedding (Optional[List[float]]): List of embedding.
ids (Optional[List[str]]): List of IDs. Defaults to None.
cluster_name (str): The name of the BagelDB cluster.
client_settings (Optional[bagel.config.Settings]): Client settings.
client (Optional[bagel.Client]): Bagel client instance.
cluster_metadata (Optional[Dict]): Metadata associated with the
Bagel cluster. Defaults to None.
Returns:
Bagel: Bagel vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
cluster_name=cluster_name,
client_settings=client_settings,
client=client,
cluster_metadata=cluster_metadata,
**kwargs,
)
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the cluster.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
self._cluster.update(
ids=[document_id],
documents=[text],
metadatas=[metadata],
)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection."""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._cluster.get(**kwargs)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
Delete by IDs.
Args:
ids: List of ids to delete.
"""
self._cluster.delete(ids=ids)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~cache.py | """
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by reducing the number of API calls you make
to the LLM provider.
Cache directly competes with Memory. See documentation for Pros and Cons.
**Class hierarchy:**
.. code-block::
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
"""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import uuid
import warnings
from datetime import timedelta
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, Row, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.llms.base import LLM, get_prompts
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.schema import ChatGeneration, Generation
from langchain.schema.cache import RETURN_VAL_TYPE, BaseCache
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
from cassandra.cluster import Session as CassandraSession
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
Warning: would not work well with arbitrary subclasses of `Generation`
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
def _dumps_generations(generations: RETURN_VAL_TYPE) -> str:
"""
Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation`
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: a single string representing a list of generations.
This function (+ its counterpart `_loads_generations`) rely on
the dumps/loads pair with Reviver, so are able to deal
with all subclasses of Generation.
Each item in the list can be `dumps`ed to a string,
then we make the whole list of strings into a json-dumped.
"""
return json.dumps([dumps(_item) for _item in generations])
def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]:
"""
Deserialization of a string into a generic RETURN_VAL_TYPE
(i.e. a sequence of `Generation`).
See `_dumps_generations`, the inverse of this function.
Args:
generations_str (str): A string representing a list of generations.
Compatible with the legacy cache-blob format
Does not raise exceptions for malformed entries, just logs a warning
and returns none: the caller should be prepared for such a cache miss.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
generations = [loads(_item_str) for _item_str in json.loads(generations_str)]
return generations
except (json.JSONDecodeError, TypeError):
# deferring the (soft) handling to after the legacy-format attempt
pass
try:
gen_dicts = json.loads(generations_str)
# not relying on `_load_generations_from_json` (which could disappear):
generations = [Generation(**generation_dict) for generation_dict in gen_dicts]
logger.warning(
f"Legacy 'Generation' cached blob encountered: '{generations_str}'"
)
return generations
except (json.JSONDecodeError, TypeError):
logger.warning(
f"Malformed/unparsable cached blob encountered: '{generations_str}'"
)
return None
class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
class UpstashRedisCache(BaseCache):
"""Cache that uses Upstash Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of UpstashRedisCache.
This method initializes an object with Upstash Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of an Upstash Redis
client class, allowing the object to interact with Upstash Redis
server for caching purposes.
Parameters:
redis_: An instance of Upstash Redis client class
(e.g., Redis) used for caching.
This allows the object to communicate with
Redis server for caching operations on.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from upstash_redis import Redis
except ImportError:
raise ValueError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Upstash Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"UpstashRedisCache supports caching of normal LLM generations, "
f"got {type(gen)}"
)
if isinstance(gen, ChatGeneration):
warnings.warn(
"NOTE: Generation has not been cached. UpstashRedisCache does not"
" support caching ChatModel outputs."
)
return
# Write to a HASH
key = self._key(prompt, llm_string)
mapping = {
str(idx): generation.text for idx, generation in enumerate(return_val)
}
self.redis.hset(key=key, values=mapping)
if self.ttl is not None:
self.redis.expire(key, self.ttl)
def clear(self, **kwargs: Any) -> None:
"""
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
"""
asynchronous = kwargs.get("asynchronous", False)
if asynchronous:
asynchronous = "ASYNC"
else:
asynchronous = "SYNC"
self.redis.flushdb(flush_type=asynchronous)
class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
def __init__(self, redis_: Any, *, ttl: Optional[int] = None):
"""
Initialize an instance of RedisCache.
This method initializes an object with Redis caching capabilities.
It takes a `redis_` parameter, which should be an instance of a Redis
client class, allowing the object to interact with a Redis
server for caching purposes.
Parameters:
redis_ (Any): An instance of a Redis client class
(e.g., redis.Redis) used for caching.
This allows the object to communicate with a
Redis server for caching operations.
ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
If provided, it sets the time duration for how long cached
items will remain valid. If not provided, cached items will not
have an automatic expiration.
"""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
self.ttl = ttl
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
try:
generations.append(loads(text))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
# Write to a Redis HASH
key = self._key(prompt, llm_string)
with self.redis.pipeline() as pipe:
pipe.hset(
key,
mapping={
str(idx): dumps(generation)
for idx, generation in enumerate(return_val)
},
)
if self.ttl is not None:
pipe.expire(key, self.ttl)
pipe.execute()
def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
DEFAULT_SCHEMA = {
"content_key": "prompt",
"text": [
{"name": "prompt"},
],
"extra": [{"name": "return_val"}, {"name": "llm_string"}],
}
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
from langchain.globals import set_llm_cache
from langchain.cache import RedisSemanticCache
from langchain.embeddings import OpenAIEmbeddings
set_llm_cache(RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
))
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
schema=cast(Dict, self.DEFAULT_SCHEMA),
)
except ValueError:
redis = RedisVectorstore(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
index_schema=cast(Dict, self.DEFAULT_SCHEMA),
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index_if_not_exist(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations: List = []
# Read from a Hash
results = llm_cache.similarity_search(
query=prompt,
k=1,
distance_threshold=self.score_threshold,
)
if results:
for document in results:
try:
generations.extend(loads(document.metadata["return_val"]))
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations.extend(
_load_generations_from_json(document.metadata["return_val"])
)
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
llm_cache = self._get_llm_cache(llm_string)
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": dumps([g for g in return_val]),
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
from langchain.globals import set_llm_cache
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
set_llm_cache(GPTCache(init_gptcache))
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
@classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
api_key: Optional[str] = None,
auth_token: Optional[str] = None, # for backwards compatibility
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
# Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility
try:
api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
except ValueError:
api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY")
credentials = CredentialProvider.from_string(api_key)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
CASSANDRA_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_cache"
CASSANDRA_CACHE_DEFAULT_TTL_SECONDS = None
class CassandraCache(BaseCache):
"""
Cache that uses Cassandra / Astra DB as a backend.
It uses a single Cassandra table.
The lookup keys (which get to form the primary key) are:
- prompt, a string
- llm_string, a deterministic str representation of the model parameters.
(needed to prevent collisions same-prompt-different-model collisions)
"""
def __init__(
self,
session: Optional[CassandraSession] = None,
keyspace: Optional[str] = None,
table_name: str = CASSANDRA_CACHE_DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = CASSANDRA_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize with a ready session and a keyspace name.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
table_name (str): name of the Cassandra table to use as cache
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
"""
try:
from cassio.table import ElasticCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
self.kv_cache = ElasticCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
keys=["llm_string", "prompt"],
primary_key_type=["TEXT", "TEXT"],
ttl_seconds=self.ttl_seconds,
skip_provisioning=skip_provisioning,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
item = self.kv_cache.get(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
if item is not None:
generations = _loads_generations(item["body_blob"])
# this protects against malformed cached items:
if generations is not None:
return generations
else:
return None
else:
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
blob = _dumps_generations(return_val)
self.kv_cache.put(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
body_blob=blob,
)
def delete_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> None:
"""
A wrapper around `delete` with the LLM being passed.
In case the llm(prompt) calls have a `stop` param, you should pass it here
"""
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.delete(prompt, llm_string=llm_string)
def delete(self, prompt: str, llm_string: str) -> None:
"""Evict from cache if there's an entry."""
return self.kv_cache.delete(
llm_string=_hash(llm_string),
prompt=_hash(prompt),
)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. This is for all LLMs at once."""
self.kv_cache.clear()
CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC = "dot"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD = 0.85
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_semantic_cache"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS = None
CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE = 16
class CassandraSemanticCache(BaseCache):
"""
Cache that uses Cassandra as a vector-store backend for semantic
(i.e. similarity-based) lookup.
It uses a single (vector) Cassandra table and stores, in principle,
cached values from several LLMs, so the LLM's llm_string is part
of the rows' primary keys.
The similarity is based on one of several distance metrics (default: "dot").
If choosing another metric, the default threshold is to be re-tuned accordingly.
"""
def __init__(
self,
session: Optional[CassandraSession],
keyspace: Optional[str],
embedding: Embeddings,
table_name: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME,
distance_metric: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC,
score_threshold: float = CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD,
ttl_seconds: Optional[int] = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
):
"""
Initialize the cache with all relevant parameters.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
embedding (Embedding): Embedding provider for semantic
encoding and search.
table_name (str): name of the Cassandra (vector) table
to use as cache
distance_metric (str, 'dot'): which measure to adopt for
similarity searches
score_threshold (optional float): numeric value to use as
cutoff for the similarity searches
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from cassio.table import MetadataVectorCassandraTable
except (ImportError, ModuleNotFoundError):
raise ValueError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session = session
self.keyspace = keyspace
self.embedding = embedding
self.table_name = table_name
self.distance_metric = distance_metric
self.score_threshold = score_threshold
self.ttl_seconds = ttl_seconds
# The contract for this class has separate lookup and update:
# in order to spare some embedding calculations we cache them between
# the two calls.
# Note: each instance of this class has its own `_get_embedding` with
# its own lru.
@lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) -> List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.table = MetadataVectorCassandraTable(
session=self.session,
keyspace=self.keyspace,
table=self.table_name,
primary_key_type=["TEXT"],
vector_dimension=self.embedding_dimension,
ttl_seconds=self.ttl_seconds,
metadata_indexing=("allow", {"_llm_string_hash"}),
skip_provisioning=skip_provisioning,
)
def _get_embedding_dimension(self) -> int:
return len(self._get_embedding(text="This is a sample sentence."))
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
embedding_vector = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
body = _dumps_generations(return_val)
metadata = {
"_prompt": prompt,
"_llm_string_hash": llm_string_hash,
}
row_id = f"{_hash(prompt)}-{llm_string_hash}"
#
self.table.put(
body_blob=body,
vector=embedding_vector,
row_id=row_id,
metadata=metadata,
)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
def lookup_with_id(
self, prompt: str, llm_string: str
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry)
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
hits = list(
self.table.metric_ann_search(
vector=prompt_embedding,
metadata={"_llm_string_hash": _hash(llm_string)},
n=1,
metric=self.distance_metric,
metric_threshold=self.score_threshold,
)
)
if hits:
hit = hits[0]
generations = _loads_generations(hit["body_blob"])
if generations is not None:
# this protects against malformed cached items:
return (
hit["row_id"],
generations,
)
else:
return None
else:
return None
def lookup_with_id_through_llm(
self, prompt: str, llm: LLM, stop: Optional[List[str]] = None
) -> Optional[Tuple[str, RETURN_VAL_TYPE]]:
llm_string = get_prompts(
{**llm.dict(), **{"stop": stop}},
[],
)[1]
return self.lookup_with_id(prompt, llm_string=llm_string)
def delete_by_document_id(self, document_id: str) -> None:
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.table.delete(row_id=document_id)
def clear(self, **kwargs: Any) -> None:
"""Clear the *whole* semantic cache."""
self.table.clear()
class FullMd5LLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_md5_llm_cache"
id = Column(String, primary_key=True)
prompt_md5 = Column(String, index=True)
llm = Column(String, index=True)
idx = Column(Integer, index=True)
prompt = Column(String)
response = Column(String)
class SQLAlchemyMd5Cache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(
self, engine: Engine, cache_schema: Type[FullMd5LLMCache] = FullMd5LLMCache
):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [loads(row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
self._delete_previous(prompt, llm_string)
prompt_md5 = self.get_md5(prompt)
items = [
self.cache_schema(
id=str(uuid.uuid1()),
prompt=prompt,
prompt_md5=prompt_md5,
llm=llm_string,
response=dumps(gen),
idx=i,
)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def _delete_previous(self, prompt: str, llm_string: str) -> None:
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session, session.begin():
rows = session.execute(stmt).fetchall()
for item in rows:
session.delete(item)
def _search_rows(self, prompt: str, llm_string: str) -> List[Row]:
prompt_pd5 = self.get_md5(prompt)
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore
.where(self.cache_schema.llm == llm_string)
.where(self.cache_schema.prompt == prompt)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
return session.execute(stmt).fetchall()
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.execute(self.cache_schema.delete())
@staticmethod
def get_md5(input_string: str) -> str:
return hashlib.md5(input_string.encode()).hexdigest()
| [
"prompt"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~rtf.py | """Loads rich text files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
class UnstructuredRTFLoader(UnstructuredFileLoader):
"""Load `RTF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredRTFLoader
loader = UnstructuredRTFLoader(
"example.rtf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rtf
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
min_unstructured_version = "0.5.12"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning rtf files is only supported in "
f"unstructured>={min_unstructured_version}."
)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~buffer.py | from typing import Any, Dict, List, Optional
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.pydantic_v1 import root_validator
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
import warnings
from typing import Any, Dict, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded if not
already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks
max_size (int, optional): The maximum size limit in bytes for each chunk
Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list of sentences
Note:
This function validates the maximum sentence size based on service limits
using the 'toxicity_init_validate' function. It uses the NLTK sentence
tokenizer to split the paragraph into sentences.
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = []
current_chunk = [] # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size or
# current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Check the toxicity of a given text prompt using AWS Comprehend service
and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
if config:
from langchain_experimental.comprehend_moderation.base_moderation_enums import ( # noqa: E501
BaseModerationActions,
)
toxicity_found = False
action = config.get("action", BaseModerationActions.STOP)
if action not in [
BaseModerationActions.STOP,
BaseModerationActions.ALLOW,
]:
raise ValueError("Action can either be stop or allow")
threshold = config.get("threshold", 0.5) if config else 0.5
toxicity_labels = config.get("labels", []) if config else []
if action == BaseModerationActions.STOP:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label
and (
not toxicity_labels
or label["Name"] in toxicity_labels
)
and label["Score"] >= threshold
):
toxicity_found = True
break
if action == BaseModerationActions.ALLOW:
if not toxicity_labels:
warnings.warn(
"You have allowed toxic content without specifying "
"any toxicity labels."
)
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
else:
if response["ResultList"]:
detected_toxic_labels = list()
for item in response["ResultList"]:
detected_toxic_labels.extend(item["Labels"])
if any(item["Score"] >= 0.5 for item in detected_toxic_labels):
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~dalle_image_generator.py | """Utility that calls OpenAI's Dall-E Image Generator."""
from typing import Any, Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class DallEAPIWrapper(BaseModel):
"""Wrapper for OpenAI's DALL-E Image Generator.
Docs for using:
1. pip install openai
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any #: :meta private:
openai_api_key: Optional[str] = None
"""number of images to generate"""
n: int = 1
"""size of image to generate"""
size: str = "1024x1024"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _dalle_image_url(self, prompt: str) -> str:
params = {"prompt": prompt, "n": self.n, "size": self.size}
response = self.client.create(**params)
return response["data"][0]["url"]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
values["client"] = openai.Image
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
return values
def run(self, query: str) -> str:
"""Run query through OpenAI and parse result."""
image_url = self._dalle_image_url(query)
if image_url is None or image_url == "":
# We don't want to return the assumption alone if answer is empty
return "No image was generated"
else:
return image_url
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~chat_models~test_konko.py | """Evaluate ChatKonko Interface."""
from typing import Any
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.konko import ChatKonko
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_konko_chat_test() -> None:
"""Evaluate basic ChatKonko functionality."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content="Hi")
chat_response = chat_instance([msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_chat_test_openai() -> None:
"""Evaluate basic ChatKonko functionality."""
chat_instance = ChatKonko(max_tokens=10, model="gpt-3.5-turbo")
msg = HumanMessage(content="Hi")
chat_response = chat_instance([msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_model_test() -> None:
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(model="alpha")
assert chat_instance.model == "alpha"
chat_instance = ChatKonko(model="beta")
assert chat_instance.model == "beta"
def test_konko_available_model_test() -> None:
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(max_tokens=10, n=2)
res = chat_instance.get_available_models()
assert isinstance(res, set)
def test_konko_system_msg_test() -> None:
"""Evaluate ChatKonko's handling of system messages."""
chat_instance = ChatKonko(max_tokens=10)
sys_msg = SystemMessage(content="Initiate user chat.")
user_msg = HumanMessage(content="Hi there")
chat_response = chat_instance([sys_msg, user_msg])
assert isinstance(chat_response, BaseMessage)
assert isinstance(chat_response.content, str)
def test_konko_generation_test() -> None:
"""Check ChatKonko's generation ability."""
chat_instance = ChatKonko(max_tokens=10, n=2)
msg = HumanMessage(content="Hi")
gen_response = chat_instance.generate([[msg], [msg]])
assert isinstance(gen_response, LLMResult)
assert len(gen_response.generations) == 2
for gen_list in gen_response.generations:
assert len(gen_list) == 2
for gen in gen_list:
assert isinstance(gen, ChatGeneration)
assert isinstance(gen.text, str)
assert gen.text == gen.message.content
def test_konko_multiple_outputs_test() -> None:
"""Test multiple completions with ChatKonko."""
chat_instance = ChatKonko(max_tokens=10, n=5)
msg = HumanMessage(content="Hi")
gen_response = chat_instance._generate([msg])
assert isinstance(gen_response, ChatResult)
assert len(gen_response.generations) == 5
for gen in gen_response.generations:
assert isinstance(gen.message, BaseMessage)
assert isinstance(gen.message.content, str)
def test_konko_streaming_callback_test() -> None:
"""Evaluate streaming's token callback functionality."""
callback_instance = FakeCallbackHandler()
callback_mgr = CallbackManager([callback_instance])
chat_instance = ChatKonko(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_mgr,
verbose=True,
)
msg = HumanMessage(content="Hi")
chat_response = chat_instance([msg])
assert callback_instance.llm_streams > 0
assert isinstance(chat_response, BaseMessage)
def test_konko_streaming_info_test() -> None:
"""Ensure generation details are retained during streaming."""
class TestCallback(FakeCallbackHandler):
data_store: dict = {}
def on_llm_end(self, *args: Any, **kwargs: Any) -> Any:
self.data_store["generation"] = args[0]
callback_instance = TestCallback()
callback_mgr = CallbackManager([callback_instance])
chat_instance = ChatKonko(
max_tokens=2,
temperature=0,
callback_manager=callback_mgr,
)
list(chat_instance.stream("hey"))
gen_data = callback_instance.data_store["generation"]
assert gen_data.generations[0][0].text == " Hey"
def test_konko_llm_model_name_test() -> None:
"""Check if llm_output has model info."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content="Hi")
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output["model_name"] == chat_instance.model
def test_konko_streaming_model_name_test() -> None:
"""Check model info during streaming."""
chat_instance = ChatKonko(max_tokens=10, streaming=True)
msg = HumanMessage(content="Hi")
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output["model_name"] == chat_instance.model
def test_konko_streaming_param_validation_test() -> None:
"""Ensure correct token callback during streaming."""
with pytest.raises(ValueError):
ChatKonko(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_konko_additional_args_test() -> None:
"""Evaluate extra arguments for ChatKonko."""
chat_instance = ChatKonko(extra=3, max_tokens=10)
assert chat_instance.max_tokens == 10
assert chat_instance.model_kwargs == {"extra": 3}
chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2})
assert chat_instance.model_kwargs == {"extra": 3, "addition": 2}
with pytest.raises(ValueError):
ChatKonko(extra=3, model_kwargs={"extra": 2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={"temperature": 0.2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={"model": "text-davinci-003"})
def test_konko_token_streaming_test() -> None:
"""Check token streaming for ChatKonko."""
chat_instance = ChatKonko(max_tokens=10)
for token in chat_instance.stream("Just a test"):
assert isinstance(token.content, str)
| [
"Hi there",
"Hi",
"Initiate user chat."
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~mrkl~output_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class MRKLOutputParser(AgentOutputParser):
"""MRKL Output parser for the chat agent."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
text = re.sub(r"Observation:.*", "", text, 0, re.MULTILINE | re.DOTALL)
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match and includes_answer:
if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)):
# if final answer is before the hallucination, return final answer
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
end_index = text.find("\n\n", start_index)
return AgentFinish(
{"output": text[start_index:end_index].strip()}, text[:end_index]
)
else:
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
)
if action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "mrkl"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~mongodb.py | import asyncio
import logging
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MongodbLoader(BaseLoader):
"""Load MongoDB documents."""
def __init__(
self,
connection_string: str,
db_name: str,
collection_name: str,
*,
filter_criteria: Optional[Dict] = None,
) -> None:
try:
from motor.motor_asyncio import AsyncIOMotorClient
except ImportError as e:
raise ImportError(
"Cannot import from motor, please install with `pip install motor`."
) from e
if not connection_string:
raise ValueError("connection_string must be provided.")
if not db_name:
raise ValueError("db_name must be provided.")
if not collection_name:
raise ValueError("collection_name must be provided.")
self.client = AsyncIOMotorClient(connection_string)
self.db_name = db_name
self.collection_name = collection_name
self.filter_criteria = filter_criteria or {}
self.db = self.client.get_database(db_name)
self.collection = self.db.get_collection(collection_name)
def load(self) -> List[Document]:
"""Load data into Document objects.
Attention:
This implementation starts an asyncio event loop which
will only work if running in a sync env. In an async env, it should
fail since there is already an event loop running.
This code should be updated to kick off the event loop from a separate
thread if running within an async context.
"""
return asyncio.run(self.aload())
async def aload(self) -> List[Document]:
"""Load data into Document objects."""
result = []
total_docs = await self.collection.count_documents(self.filter_criteria)
async for doc in self.collection.find(self.filter_criteria):
metadata = {
"database": self.db_name,
"collection": self.collection_name,
}
result.append(Document(page_content=str(doc), metadata=metadata))
if len(result) != total_docs:
logger.warning(
f"Only partial collection of documents returned. Loaded {len(result)} "
f"docs, expected {total_docs}."
)
return result
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~graph_qa~falkordb.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT
from langchain.chains.llm import LLMChain
from langchain.graphs import FalkorDBGraph
from langchain.pydantic_v1 import Field
from langchain.schema import BasePromptTemplate
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def extract_cypher(text: str) -> str:
"""
Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
# The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```"
# Find all matches in the input text
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
class FalkorDBQAChain(Chain):
"""Chain for question-answering against a graph by generating Cypher statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: FalkorDBGraph = Field(exclude=True)
cypher_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
top_k: int = 10
"""Number of results to return from the query"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the graph directly."""
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@property
def _chain_type(self) -> str:
return "graph_cypher_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT,
**kwargs: Any,
) -> FalkorDBQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(
qa_chain=qa_chain,
cypher_generation_chain=cypher_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run(
{"question": question, "schema": self.graph.schema}, callbacks=callbacks
)
# Extract Cypher code if it is wrapped in backticks
generated_cypher = extract_cypher(generated_cypher)
_run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_cypher, color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"query": generated_cypher})
# Retrieve and limit the number of results
context = self.graph.query(generated_cypher)[: self.top_k]
if self.return_direct:
final_result = context
else:
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
intermediate_steps.append({"context": context})
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~llm_requests.py | """Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utilities.requests import TextRequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain):
"""Chain that requests a URL and then uses an LLM to parse results.
**Security Note**: This chain can make GET requests to arbitrary URLs,
including internal URLs.
Control access to who can run this chain and what network access
this chain has.
See https://python.langchain.com/docs/security for more information.
"""
llm_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(
default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS),
exclude=True,
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"Could not import bs4 python package. "
"Please install it with `pip install bs4`."
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict(
callbacks=_run_manager.get_child(), **other_keys
)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~schema~runnable~test_utils.py | import sys
from typing import Callable
import pytest
from langchain.schema.runnable.utils import (
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
@pytest.mark.parametrize(
"func, expected_source",
[
(lambda x: x * 2, "lambda x: x * 2"),
(lambda a, b: a + b, "lambda a, b: a + b"),
(lambda x: x if x > 0 else 0, "lambda x: x if x > 0 else 0"),
],
)
def test_get_lambda_source(func: Callable, expected_source: str) -> None:
"""Test get_lambda_source function"""
source = get_lambda_source(func)
assert source == expected_source
@pytest.mark.parametrize(
"text,prefix,expected_output",
[
("line 1\nline 2\nline 3", "1", "line 1\n line 2\n line 3"),
("line 1\nline 2\nline 3", "ax", "line 1\n line 2\n line 3"),
],
)
def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) -> None:
"""Test indent_lines_after_first function"""
indented_text = indent_lines_after_first(text, prefix)
assert indented_text == expected_output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~test_nebulagraph.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain.graphs import NebulaGraph
class TestNebulaGraph(unittest.TestCase):
def setUp(self) -> None:
self.space = "test_space"
self.username = "test_user"
self.password = "test_password"
self.address = "test_address"
self.port = 1234
self.session_pool_size = 10
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_init(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
self.assertEqual(nebula_graph.space, self.space)
self.assertEqual(nebula_graph.username, self.username)
self.assertEqual(nebula_graph.password, self.password)
self.assertEqual(nebula_graph.address, self.address)
self.assertEqual(nebula_graph.port, self.port)
self.assertEqual(nebula_graph.session_pool_size, self.session_pool_size)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_get_session_pool(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
session_pool = nebula_graph._get_session_pool()
self.assertIsInstance(session_pool, MagicMock)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_del(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
nebula_graph.__del__()
mock_session_pool.return_value.close.assert_called_once()
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_execute(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
query = "SELECT * FROM test_table"
result = nebula_graph.execute(query)
self.assertIsInstance(result, MagicMock)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_refresh_schema(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
nebula_graph.refresh_schema()
self.assertNotEqual(nebula_graph.get_schema, "")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~docstore~arbitrary_fn.py | from typing import Callable, Union
from langchain.docstore.base import Docstore
from langchain.schema import Document
class DocstoreFn(Docstore):
"""Langchain Docstore via arbitrary lookup function.
This is useful when:
* it's expensive to construct an InMemoryDocstore/dict
* you retrieve documents from remote sources
* you just want to reuse existing objects
"""
def __init__(
self,
lookup_fn: Callable[[str], Union[Document, str]],
):
self._lookup_fn = lookup_fn
def search(self, search: str) -> Document:
"""Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
"""
r = self._lookup_fn(search)
if isinstance(r, str):
# NOTE: assume the search string is the source ID
return Document(page_content=r, metadata={"source": search})
elif isinstance(r, Document):
return r
raise ValueError(f"Unexpected type of document {type(r)}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~retrieval_qa~prompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
prompt_template = """Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.
{context}
Question: {question}
Полезный ответ:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
| [
"question",
"Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.\n\n{context}\n\nQuestion: {question}\nПолезный ответ:",
"context"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~trubrics_callback.py | import os
from typing import Any, Dict, List, Optional
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class TrubricsCallbackHandler(BaseCallbackHandler):
"""
Callback handler for Trubrics.
Args:
project: a trubrics project, default project is "default"
email: a trubrics account email, can equally be set in env variables
password: a trubrics account password, can equally be set in env variables
**kwargs: all other kwargs are parsed and set to trubrics prompt variables,
or added to the `metadata` dict
"""
def __init__(
self,
project: str = "default",
email: Optional[str] = None,
password: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__()
try:
from trubrics import Trubrics
except ImportError:
raise ImportError(
"The TrubricsCallbackHandler requires installation of "
"the trubrics package. "
"Please install it with `pip install trubrics`."
)
self.trubrics = Trubrics(
project=project,
email=email or os.environ["TRUBRICS_EMAIL"],
password=password or os.environ["TRUBRICS_PASSWORD"],
)
self.config_model: dict = {}
self.prompt: Optional[str] = None
self.messages: Optional[list] = None
self.trubrics_kwargs: Optional[dict] = kwargs if kwargs else None
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
self.prompt = prompts[0]
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
self.messages = [_convert_message_to_dict(message) for message in messages[0]]
self.prompt = self.messages[-1]["content"]
def on_llm_end(self, response: LLMResult, run_id: UUID, **kwargs: Any) -> None:
tags = ["langchain"]
user_id = None
session_id = None
metadata: dict = {"langchain_run_id": run_id}
if self.messages:
metadata["messages"] = self.messages
if self.trubrics_kwargs:
if self.trubrics_kwargs.get("tags"):
tags.append(*self.trubrics_kwargs.pop("tags"))
user_id = self.trubrics_kwargs.pop("user_id", None)
session_id = self.trubrics_kwargs.pop("session_id", None)
metadata.update(self.trubrics_kwargs)
for generation in response.generations:
self.trubrics.log_prompt(
config_model={
"model": response.llm_output.get("model_name")
if response.llm_output
else "NA"
},
prompt=self.prompt,
generation=generation[0].text,
user_id=user_id,
session_id=session_id,
tags=tags,
metadata=metadata,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~chat_message_histories~postgres.py | import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
class PostgresChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Postgres database."""
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = (
f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
)
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query,
(
self.session_id,
json.dumps(_message_to_dict(message), ensure_ascii=False),
),
)
self.connection.commit()
def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~mhtml.py | import email
import logging
from typing import Dict, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MHTMLLoader(BaseLoader):
"""Parse `MHTML` files with `BeautifulSoup`."""
def __init__(
self,
file_path: str,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""Initialise with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object.
Args:
file_path: Path to file to load.
open_encoding: The encoding to use when opening the file.
bs_kwargs: Any kwargs to pass to the BeautifulSoup object.
get_text_separator: The separator to use when getting the text
from the soup.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
def load(self) -> List[Document]:
from bs4 import BeautifulSoup
"""Load MHTML document into document objects."""
with open(self.file_path, "r", encoding=self.open_encoding) as f:
message = email.message_from_string(f.read())
parts = message.get_payload()
if not isinstance(parts, list):
parts = [message]
for part in parts:
if part.get_content_type() == "text/html":
html = part.get_payload(decode=True).decode()
soup = BeautifulSoup(html, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
return []
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~hugginggpt~task_executor.py | import copy
import uuid
from typing import Dict, List
import numpy as np
from langchain.tools.base import BaseTool
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import Plan
class Task:
def __init__(self, task: str, id: int, dep: List[int], args: Dict, tool: BaseTool):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
self.status = "pending"
self.message = ""
self.result = ""
def __str__(self) -> str:
return f"{self.task}({self.args})"
def save_product(self) -> None:
import cv2
if self.task == "video_generator":
# ndarray to video
product = np.array(self.product)
nframe, height, width, _ = product.shape
video_filename = uuid.uuid4().hex[:6] + ".mp4"
fps = 30 # Frames per second
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
video_out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height))
for frame in self.product:
video_out.write(frame)
video_out.release()
self.result = video_filename
elif self.task == "image_generator":
# PIL.Image to image
filename = uuid.uuid4().hex[:6] + ".png"
self.product.save(filename) # type: ignore
self.result = filename
def completed(self) -> bool:
return self.status == "completed"
def failed(self) -> bool:
return self.status == "failed"
def pending(self) -> bool:
return self.status == "pending"
def run(self) -> str:
from diffusers.utils import load_image
try:
new_args = copy.deepcopy(self.args)
for k, v in new_args.items():
if k == "image":
new_args["image"] = load_image(v)
if self.task in ["video_generator", "image_generator", "text_reader"]:
self.product = self.tool(**new_args)
else:
self.result = self.tool(**new_args)
except Exception as e:
self.status = "failed"
self.message = str(e)
self.status = "completed"
self.save_product()
return self.result
class TaskExecutor:
"""Load tools to execute tasks."""
def __init__(self, plan: Plan):
self.plan = plan
self.tasks = []
self.id_task_map = {}
self.status = "pending"
for step in self.plan.steps:
task = Task(step.task, step.id, step.dep, step.args, step.tool)
self.tasks.append(task)
self.id_task_map[step.id] = task
def completed(self) -> bool:
return all(task.completed() for task in self.tasks)
def failed(self) -> bool:
return any(task.failed() for task in self.tasks)
def pending(self) -> bool:
return any(task.pending() for task in self.tasks)
def check_dependency(self, task: Task) -> bool:
for dep_id in task.dep:
if dep_id == -1:
continue
dep_task = self.id_task_map[dep_id]
if dep_task.failed() or dep_task.pending():
return False
return True
def update_args(self, task: Task) -> None:
for dep_id in task.dep:
if dep_id == -1:
continue
dep_task = self.id_task_map[dep_id]
for k, v in task.args.items():
if f"<resource-{dep_id}>" in v:
task.args[k].replace(f"<resource-{dep_id}>", dep_task.result)
def run(self) -> str:
for task in self.tasks:
print(f"running {task}")
if task.pending() and self.check_dependency(task):
self.update_args(task)
task.run()
if self.completed():
self.status = "completed"
elif self.failed():
self.status = "failed"
else:
self.status = "pending"
return self.status
def __str__(self) -> str:
result = ""
for task in self.tasks:
result += f"{task}\n"
result += f"status: {task.status}\n"
if task.failed():
result += f"message: {task.message}\n"
if task.completed():
result += f"result: {task.result}\n"
return result
def __repr__(self) -> str:
return self.__str__()
def describe(self) -> str:
return self.__str__()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~directory.py | import concurrent
import logging
import random
from pathlib import Path
from typing import Any, List, Optional, Type, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.html_bs import BSHTMLLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
]
logger = logging.getLogger(__name__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
class DirectoryLoader(BaseLoader):
"""Load from a directory."""
def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader,
loader_kwargs: Union[dict, None] = None,
recursive: bool = False,
show_progress: bool = False,
use_multithreading: bool = False,
max_concurrency: int = 4,
*,
sample_size: int = 0,
randomize_sample: bool = False,
sample_seed: Union[int, None] = None,
):
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
sample_size: The maximum number of files you would like to load from the
directory.
randomize_sample: Suffle the files to get a random sample.
sample_seed: set the seed of the random shuffle for reporoducibility.
"""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
self.sample_size = sample_size
self.randomize_sample = randomize_sample
self.sample_seed = sample_seed
def load_file(
self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]
) -> None:
"""Load a file.
Args:
item: File path.
path: Directory path.
docs: List of documents to append to.
pbar: Progress bar. Defaults to None.
"""
if item.is_file():
if _is_visible(item.relative_to(path)) or self.load_hidden:
try:
logger.debug(f"Processing file: {str(item)}")
sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load()
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(f"Error loading file {str(item)}: {e}")
else:
raise e
finally:
if pbar:
pbar.update(1)
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
if not p.exists():
raise FileNotFoundError(f"Directory not found: '{self.path}'")
if not p.is_dir():
raise ValueError(f"Expected directory, got file: '{self.path}'")
docs: List[Document] = []
items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob))
if self.sample_size > 0:
if self.randomize_sample:
randomizer = (
random.Random(self.sample_seed) if self.sample_seed else random
)
randomizer.shuffle(items) # type: ignore
items = items[: min(len(items), self.sample_size)]
pbar = None
if self.show_progress:
try:
from tqdm import tqdm
pbar = tqdm(total=len(items))
except ImportError as e:
logger.warning(
"To log the progress of DirectoryLoader you need to install tqdm, "
"`pip install tqdm`"
)
if self.silent_errors:
logger.warning(e)
else:
raise ImportError(
"To log the progress of DirectoryLoader "
"you need to install tqdm, "
"`pip install tqdm`"
)
if self.use_multithreading:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_concurrency
) as executor:
executor.map(lambda i: self.load_file(i, p, docs, pbar), items)
else:
for i in items:
self.load_file(i, p, docs, pbar)
if pbar:
pbar.close()
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~chat_message_histories~zep.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
if TYPE_CHECKING:
from zep_python import Memory, MemorySearchResult, Message, NotFoundError
logger = logging.getLogger(__name__)
class ZepChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that uses Zep as a backend.
Recommended usage::
# Set up Zep Chat History
zep_chat_history = ZepChatMessageHistory(
session_id=session_id,
url=ZEP_API_URL,
api_key=<your_api_key>,
)
# Use a standard ConversationBufferMemory to encapsulate the Zep chat history
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=zep_chat_history
)
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions and more, see:
https://docs.getzep.com/deployment/quickstart/
This class is a thin wrapper around the zep-python package. Additional
Zep functionality is exposed via the `zep_summary` and `zep_messages`
properties.
For more information on the zep-python package, see:
https://github.com/getzep/zep-python
"""
def __init__(
self,
session_id: str,
url: str = "http://localhost:8000",
api_key: Optional[str] = None,
) -> None:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url, api_key=api_key)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
messages: List[BaseMessage] = []
# Extract summary, if present, and messages
if zep_memory.summary:
if len(zep_memory.summary.content) > 0:
messages.append(SystemMessage(content=zep_memory.summary.content))
if zep_memory.messages:
msg: Message
for msg in zep_memory.messages:
metadata: Dict = {
"uuid": msg.uuid,
"created_at": msg.created_at,
"token_count": msg.token_count,
"metadata": msg.metadata,
}
if msg.role == "ai":
messages.append(
AIMessage(content=msg.content, additional_kwargs=metadata)
)
else:
messages.append(
HumanMessage(content=msg.content, additional_kwargs=metadata)
)
return messages
@property
def zep_messages(self) -> List[Message]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
return zep_memory.messages
@property
def zep_summary(self) -> Optional[str]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
def _get_memory(self) -> Optional[Memory]:
"""Retrieve memory from Zep"""
from zep_python import NotFoundError
try:
zep_memory: Memory = self.zep_client.memory.get_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Returning None"
)
return None
return zep_memory
def add_user_message(
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(HumanMessage(content=message), metadata=metadata)
def add_ai_message(
self, message: str, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
metadata: Optional metadata to attach to the message.
"""
self.add_message(AIMessage(content=message), metadata=metadata)
def add_message(
self, message: BaseMessage, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Append the message to the Zep memory history"""
from zep_python import Memory, Message
zep_message = Message(
content=message.content, role=message.type, metadata=metadata
)
zep_memory = Memory(messages=[zep_message])
self.zep_client.memory.add_memory(self.session_id, zep_memory)
def search(
self, query: str, metadata: Optional[Dict] = None, limit: Optional[int] = None
) -> List[MemorySearchResult]:
"""Search Zep memory for messages matching the query"""
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
return self.zep_client.memory.search_memory(
self.session_id, payload, limit=limit
)
def clear(self) -> None:
"""Clear session memory from Zep. Note that Zep is long-term storage for memory
and this is not advised unless you have specific data retention requirements.
"""
try:
self.zep_client.memory.delete_memory(self.session_id)
except NotFoundError:
logger.warning(
f"Session {self.session_id} not found in Zep. Skipping delete."
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~parsers~docai.py | """Module contains a PDF parser based on Document AI from Google Cloud.
You need to install two libraries to use this parser:
pip install google-cloud-documentai
pip install google-cloud-documentai-toolbox
"""
import logging
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.utilities.vertexai import get_client_info
from langchain.utils.iter import batch_iterate
if TYPE_CHECKING:
from google.api_core.operation import Operation
from google.cloud.documentai import DocumentProcessorServiceClient
logger = logging.getLogger(__name__)
@dataclass
class DocAIParsingResults:
"""A dataclass to store Document AI parsing results."""
source_path: str
parsed_path: str
class DocAIParser(BaseBlobParser):
"""`Google Cloud Document AI` parser.
For a detailed explanation of Document AI, refer to the product documentation.
https://cloud.google.com/document-ai/docs/overview
"""
def __init__(
self,
*,
client: Optional["DocumentProcessorServiceClient"] = None,
location: Optional[str] = None,
gcs_output_path: Optional[str] = None,
processor_name: Optional[str] = None,
):
"""Initializes the parser.
Args:
client: a DocumentProcessorServiceClient to use
location: a Google Cloud location where a Document AI processor is located
gcs_output_path: a path on Google Cloud Storage to store parsing results
processor_name: full resource name of a Document AI processor or processor
version
You should provide either a client or location (and then a client
would be instantiated).
"""
if bool(client) == bool(location):
raise ValueError(
"You must specify either a client or a location to instantiate "
"a client."
)
if processor_name and not processor_name.isalnum():
raise ValueError(
f"Processor name {processor_name} has a wrong format. Use only ID from"
"the `Basic information` section on the GCP console. E.g., if your "
"prediction endpoint looks like https://us-documentai.googleapis.com"
"/v1/projects/PROJECT_ID/locations/us/processors/PROCESSOR_ID:process"
", use only PROCESSOR_ID part."
)
self._gcs_output_path = gcs_output_path
self._processor_name = processor_name
if client:
self._client = client
else:
try:
from google.api_core.client_options import ClientOptions
from google.cloud.documentai import DocumentProcessorServiceClient
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
options = ClientOptions(
api_endpoint=f"{location}-documentai.googleapis.com"
)
self._client = DocumentProcessorServiceClient(
client_options=options,
client_info=get_client_info(module="document-ai"),
)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Parses a blob lazily.
Args:
blobs: a Blob to parse
This is a long-running operation. A recommended way is to batch
documents together and use the `batch_parse()` method.
"""
yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path)
def online_process(
self,
blob: Blob,
enable_native_pdf_parsing: bool = True,
field_mask: Optional[str] = None,
page_range: Optional[List[int]] = None,
) -> Iterator[Document]:
"""Parses a blob lazily using online processing.
Args:
blob: a blob to parse.
enable_native_pdf_parsing: enable pdf embedded text extraction
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
page_range: list of page numbers to parse. If `None`,
entire document will be parsed.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import (
IndividualPageSelector,
OcrConfig,
ProcessOptions,
)
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
try:
from google.cloud.documentai_toolbox.wrappers.document import (
Document as WrappedDocument,
)
except ImportError as exc:
raise ImportError(
"documentai_toolbox package not found, please install it with"
" `pip install google-cloud-documentai-toolbox`"
) from exc
ocr_config = (
OcrConfig(enable_native_pdf_parsing=enable_native_pdf_parsing)
if enable_native_pdf_parsing
else None
)
individual_page_selector = (
IndividualPageSelector(pages=page_range) if page_range else None
)
response = self._client.process_document(
documentai.ProcessRequest(
name=self._processor_name,
gcs_document=documentai.GcsDocument(
gcs_uri=blob.path,
mime_type=blob.mimetype or "application/pdf",
),
process_options=ProcessOptions(
ocr_config=ocr_config,
individual_page_selector=individual_page_selector,
),
skip_human_review=True,
field_mask=field_mask,
)
)
wrapped_document = WrappedDocument.from_documentai_document(response.document)
yield from (
Document(
page_content=page.text,
metadata={
"page": page.page_number,
"source": wrapped_document.gcs_input_uri,
},
)
for page in wrapped_document.pages
)
def batch_parse(
self,
blobs: Sequence[Blob],
gcs_output_path: Optional[str] = None,
timeout_sec: int = 3600,
check_in_interval_sec: int = 60,
) -> Iterator[Document]:
"""Parses a list of blobs lazily.
Args:
blobs: a list of blobs to parse.
gcs_output_path: a path on Google Cloud Storage to store parsing results.
timeout_sec: a timeout to wait for Document AI to complete, in seconds.
check_in_interval_sec: an interval to wait until next check
whether parsing operations have been completed, in seconds
This is a long-running operation. A recommended way is to decouple
parsing from creating LangChain Documents:
>>> operations = parser.docai_parse(blobs, gcs_path)
>>> parser.is_running(operations)
You can get operations names and save them:
>>> names = [op.operation.name for op in operations]
And when all operations are finished, you can use their results:
>>> operations = parser.operations_from_names(operation_names)
>>> results = parser.get_results(operations)
>>> docs = parser.parse_from_results(results)
"""
output_path = gcs_output_path or self._gcs_output_path
if not output_path:
raise ValueError(
"An output path on Google Cloud Storage should be provided."
)
operations = self.docai_parse(blobs, gcs_output_path=output_path)
operation_names = [op.operation.name for op in operations]
logger.debug(
"Started parsing with Document AI, submitted operations %s", operation_names
)
time_elapsed = 0
while self.is_running(operations):
time.sleep(check_in_interval_sec)
time_elapsed += check_in_interval_sec
if time_elapsed > timeout_sec:
raise TimeoutError(
"Timeout exceeded! Check operations " f"{operation_names} later!"
)
logger.debug(".")
results = self.get_results(operations=operations)
yield from self.parse_from_results(results)
def parse_from_results(
self, results: List[DocAIParsingResults]
) -> Iterator[Document]:
try:
from google.cloud.documentai_toolbox.utilities.gcs_utilities import (
split_gcs_uri,
)
from google.cloud.documentai_toolbox.wrappers.document import (
Document as WrappedDocument,
)
except ImportError as exc:
raise ImportError(
"documentai_toolbox package not found, please install it with"
" `pip install google-cloud-documentai-toolbox`"
) from exc
for result in results:
gcs_bucket_name, gcs_prefix = split_gcs_uri(result.parsed_path)
wrapped_document = WrappedDocument.from_gcs(
gcs_bucket_name, gcs_prefix, gcs_input_uri=result.source_path
)
yield from (
Document(
page_content=page.text,
metadata={
"page": page.page_number,
"source": wrapped_document.gcs_input_uri,
},
)
for page in wrapped_document.pages
)
def operations_from_names(self, operation_names: List[str]) -> List["Operation"]:
"""Initializes Long-Running Operations from their names."""
try:
from google.longrunning.operations_pb2 import (
GetOperationRequest, # type: ignore
)
except ImportError as exc:
raise ImportError(
"long running operations package not found, please install it with"
" `pip install gapic-google-longrunning`"
) from exc
return [
self._client.get_operation(request=GetOperationRequest(name=name))
for name in operation_names
]
def is_running(self, operations: List["Operation"]) -> bool:
return any(not op.done() for op in operations)
def docai_parse(
self,
blobs: Sequence[Blob],
*,
gcs_output_path: Optional[str] = None,
processor_name: Optional[str] = None,
batch_size: int = 1000,
enable_native_pdf_parsing: bool = True,
field_mask: Optional[str] = None,
) -> List["Operation"]:
"""Runs Google Document AI PDF Batch Processing on a list of blobs.
Args:
blobs: a list of blobs to be parsed
gcs_output_path: a path (folder) on GCS to store results
processor_name: name of a Document AI processor.
batch_size: amount of documents per batch
enable_native_pdf_parsing: a config option for the parser
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
Document AI has a 1000 file limit per batch, so batches larger than that need
to be split into multiple requests.
Batch processing is an async long-running operation
and results are stored in a output GCS bucket.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
output_path = gcs_output_path or self._gcs_output_path
if output_path is None:
raise ValueError(
"An output path on Google Cloud Storage should be provided."
)
processor_name = processor_name or self._processor_name
if processor_name is None:
raise ValueError("A Document AI processor name should be provided.")
operations = []
for batch in batch_iterate(size=batch_size, iterable=blobs):
input_config = documentai.BatchDocumentsInputConfig(
gcs_documents=documentai.GcsDocuments(
documents=[
documentai.GcsDocument(
gcs_uri=blob.path,
mime_type=blob.mimetype or "application/pdf",
)
for blob in batch
]
)
)
output_config = documentai.DocumentOutputConfig(
gcs_output_config=documentai.DocumentOutputConfig.GcsOutputConfig(
gcs_uri=output_path, field_mask=field_mask
)
)
process_options = (
ProcessOptions(
ocr_config=OcrConfig(
enable_native_pdf_parsing=enable_native_pdf_parsing
)
)
if enable_native_pdf_parsing
else None
)
operations.append(
self._client.batch_process_documents(
documentai.BatchProcessRequest(
name=processor_name,
input_documents=input_config,
document_output_config=output_config,
process_options=process_options,
skip_human_review=True,
)
)
)
return operations
def get_results(self, operations: List["Operation"]) -> List[DocAIParsingResults]:
try:
from google.cloud.documentai_v1 import BatchProcessMetadata
except ImportError as exc:
raise ImportError(
"documentai package not found, please install it with"
" `pip install google-cloud-documentai`"
) from exc
return [
DocAIParsingResults(
source_path=status.input_gcs_source,
parsed_path=status.output_gcs_destination,
)
for op in operations
for status in (
op.metadata.individual_process_statuses
if isinstance(op.metadata, BatchProcessMetadata)
else BatchProcessMetadata.deserialize(
op.metadata.value
).individual_process_statuses
)
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~sagemaker_endpoint.py | from typing import Any, Dict, List, Optional
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]):
"""Content handler for LLM class."""
class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointEmbeddings
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
#Use with boto3 client
client = boto3.client(
"sagemaker-runtime",
region_name=region_name
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
client=client
)
"""
client: Any = None
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: EmbeddingsContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompts: prompts, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["vectors"]
""" # noqa: E501
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Dont do anything if client provided externally"""
if values.get("client") is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.extend(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])[0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~awadb.py | from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import awadb
logger = logging.getLogger()
DEFAULT_TOPN = 4
class AwaDB(VectorStore):
"""`AwaDB` vector store."""
_DEFAULT_TABLE_NAME = "langchain_awadb"
def __init__(
self,
table_name: str = _DEFAULT_TABLE_NAME,
embedding: Optional[Embeddings] = None,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
**kwargs: Any,
) -> None:
"""Initialize with AwaDB client.
If table_name is not specified,
a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid`
would be created automatically.
Args:
table_name: Name of the table created, default _DEFAULT_TABLE_NAME.
embedding: Optional Embeddings initially set.
log_and_data_dir: Optional the root directory of log and data.
client: Optional AwaDB client.
kwargs: Any possible extend parameters in the future.
Returns:
None.
"""
try:
import awadb
except ImportError:
raise ImportError(
"Could not import awadb python package. "
"Please install it with `pip install awadb`."
)
if client is not None:
self.awadb_client = client
else:
if log_and_data_dir is not None:
self.awadb_client = awadb.Client(log_and_data_dir)
else:
self.awadb_client = awadb.Client()
if table_name == self._DEFAULT_TABLE_NAME:
table_name += "_"
table_name += str(uuid.uuid4()).split("-")[-1]
self.awadb_client.Create(table_name)
self.table2embeddings: dict[str, Embeddings] = {}
if embedding is not None:
self.table2embeddings[table_name] = embedding
self.using_table_name = table_name
@property
def embeddings(self) -> Optional[Embeddings]:
if self.using_table_name in self.table2embeddings:
return self.table2embeddings[self.using_table_name]
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
is_duplicate_texts: Optional[bool] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
is_duplicate_texts: Optional whether to duplicate texts. Defaults to True.
kwargs: any possible extend parameters in the future.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embeddings = None
if self.using_table_name in self.table2embeddings:
embeddings = self.table2embeddings[self.using_table_name].embed_documents(
list(texts)
)
return self.awadb_client.AddTexts(
"embedding_text",
"text_embedding",
texts,
embeddings,
metadatas,
is_duplicate_texts,
)
def load_local(
self,
table_name: str,
**kwargs: Any,
) -> bool:
"""Load the local specified table.
Args:
table_name: Table name
kwargs: Any possible extend parameters in the future.
Returns:
Success or failure of loading the local specified table
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
return self.awadb_client.Load(table_name)
def similarity_search(
self,
query: str,
k: int = DEFAULT_TOPN,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text query.
k: The maximum number of documents to return.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
E.g. `{"max_price" : 15.66, "min_price": 4.20}`
`price` is the metadata field, means range filter(4.20<'price'<15.66).
E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}`
`price` is the metadata field, means range filter(4.20<='price'<=15.66).
kwargs: Any possible extend parameters in the future.
Returns:
Returns the k most similar documents to the specified text query.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
not_include_fields: Set[str] = {"text_embedding", "_id", "score"}
return self.similarity_search_by_vector(
embedding,
k,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields,
)
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_TOPN,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""The most k similar documents and scores of the specified query.
Args:
query: Text query.
k: The k most similar documents to the text query.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by metadata. Defaults to None.
kwargs: Any possible extend parameters in the future.
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
results: List[Tuple[Document, float]] = []
not_include_fields: Set[str] = {"text_embedding", "_id"}
retrieval_docs = self.similarity_search_by_vector(
embedding,
k,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields,
)
for doc in retrieval_docs:
score = doc.metadata["score"]
del doc.metadata["score"]
doc_tuple = (doc, score)
results.append(doc_tuple)
return results
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
return self.similarity_search_with_score(query, k, **kwargs)
def similarity_search_by_vector(
self,
embedding: Optional[List[float]] = None,
k: int = DEFAULT_TOPN,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
not_include_fields_in_metadata: Optional[Set[str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by metadata. Defaults to None.
not_incude_fields_in_metadata: Not include meta fields of each document.
Returns:
List of Documents which are the most similar to the query vector.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
results: List[Document] = []
if embedding is None:
return results
show_results = self.awadb_client.Search(
embedding,
k,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
not_include_fields=not_include_fields_in_metadata,
)
if show_results.__len__() == 0:
return results
for item_detail in show_results[0]["ResultItems"]:
content = ""
meta_data = {}
for item_key in item_detail:
if item_key == "embedding_text":
content = item_detail[item_key]
continue
elif not_include_fields_in_metadata is not None:
if item_key in not_include_fields_in_metadata:
continue
meta_data[item_key] = item_detail[item_key]
results.append(Document(page_content=content, metadata=meta_data))
return results
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding: List[float] = []
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
if embedding.__len__() == 0:
return []
results = self.max_marginal_relevance_search_by_vector(
embedding,
k,
fetch_k,
lambda_mult=lambda_mult,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
)
return results
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
results: List[Document] = []
if embedding is None:
return results
not_include_fields: set = {"_id", "score"}
retrieved_docs = self.similarity_search_by_vector(
embedding,
fetch_k,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields,
)
top_embeddings = []
for doc in retrieved_docs:
top_embeddings.append(doc.metadata["text_embedding"])
selected_docs = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32), embedding_list=top_embeddings
)
for s_id in selected_docs:
if "text_embedding" in retrieved_docs[s_id].metadata:
del retrieved_docs[s_id].metadata["text_embedding"]
results.append(retrieved_docs[s_id])
return results
def get(
self,
ids: Optional[List[str]] = None,
text_in_page_content: Optional[str] = None,
meta_filter: Optional[dict] = None,
not_include_fields: Optional[Set[str]] = None,
limit: Optional[int] = None,
**kwargs: Any,
) -> Dict[str, Document]:
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by any metadata of the document.
not_include_fields: Not pack the specified fields of each document.
limit: The number of documents to return. Defaults to 5. Optional.
Returns:
Documents which satisfy the input conditions.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
docs_detail = self.awadb_client.Get(
ids=ids,
text_in_page_content=text_in_page_content,
meta_filter=meta_filter,
not_include_fields=not_include_fields,
limit=limit,
)
results: Dict[str, Document] = {}
for doc_detail in docs_detail:
content = ""
meta_info = {}
for field in doc_detail:
if field == "embedding_text":
content = doc_detail[field]
continue
elif field == "text_embedding" or field == "_id":
continue
meta_info[field] = doc_detail[field]
doc = Document(page_content=content, metadata=meta_info)
results[doc_detail["_id"]] = doc
return results
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
ret: Optional[bool] = None
if ids is None or ids.__len__() == 0:
return ret
ret = self.awadb_client.Delete(ids)
return ret
def update(
self,
ids: List[str],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Update the documents which have the specified ids.
Args:
ids: The id list of the updating embedding vector.
texts: The texts of the updating documents.
metadatas: The metadatas of the updating documents.
Returns:
the ids of the updated documents.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
return self.awadb_client.UpdateTexts(
ids=ids, text_field_name="embedding_text", texts=texts, metadatas=metadatas
)
def create_table(
self,
table_name: str,
**kwargs: Any,
) -> bool:
"""Create a new table."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Create(table_name)
if ret:
self.using_table_name = table_name
return ret
def use(
self,
table_name: str,
**kwargs: Any,
) -> bool:
"""Use the specified table. Don't know the tables, please invoke list_tables."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Use(table_name)
if ret:
self.using_table_name = table_name
return ret
def list_tables(
self,
**kwargs: Any,
) -> List[str]:
"""List all the tables created by the client."""
if self.awadb_client is None:
return []
return self.awadb_client.ListAllTables()
def get_current_table(
self,
**kwargs: Any,
) -> str:
"""Get the current table."""
return self.using_table_name
@classmethod
def from_texts(
cls: Type[AwaDB],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
table_name: str = _DEFAULT_TABLE_NAME,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
**kwargs: Any,
) -> AwaDB:
"""Create an AwaDB vectorstore from a raw documents.
Args:
texts (List[str]): List of texts to add to the table.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory of logging and persistence.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
"""
awadb_client = cls(
table_name=table_name,
embedding=embedding,
log_and_data_dir=log_and_data_dir,
client=client,
)
awadb_client.add_texts(texts=texts, metadatas=metadatas)
return awadb_client
@classmethod
def from_documents(
cls: Type[AwaDB],
documents: List[Document],
embedding: Optional[Embeddings] = None,
table_name: str = _DEFAULT_TABLE_NAME,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
**kwargs: Any,
) -> AwaDB:
"""Create an AwaDB vectorstore from a list of documents.
If a log_and_data_dir specified, the table will be persisted there.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory to persist the table.
client (Optional[awadb.Client]): AwaDB client.
Any: Any possible parameters in the future
Returns:
AwaDB: AwaDB vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
table_name=table_name,
log_and_data_dir=log_and_data_dir,
client=client,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~deepinfra.py | import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
import aiohttp
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM, GenerationChunk
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
class DeepInfra(LLM):
"""DeepInfra models.
To use, you should have the environment variable ``DEEPINFRA_API_TOKEN``
set with your API token, or pass it as a named parameter to the
constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[Dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _url(self) -> str:
return f"https://api.deepinfra.com/v1/inference/{self.model_id}"
def _headers(self) -> Dict:
return {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
def _body(self, prompt: str, kwargs: Any) -> Dict:
model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {
"input": prompt,
**model_kwargs,
}
def _handle_status(self, code: int, text: Any) -> None:
if code >= 500:
raise Exception(f"DeepInfra Server: Error {code}")
elif code >= 400:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
f"DeepInfra returned an unexpected response with status "
f"{code}: {text}"
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data["results"][0]["generated_text"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, kwargs)
) as response:
self._handle_status(response.status, response.text)
data = await response.json()
return data["results"][0]["generated_text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
request = Requests(headers=self._headers())
response = request.post(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
)
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
) as response:
self._handle_status(response.status, response.text)
async for line in _parse_stream_async(response.content):
chunk = _handle_sse_line(line)
if chunk:
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
async def _parse_stream_async(rbody: aiohttp.StreamReader) -> AsyncIterator[str]:
async for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
def _parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace
line = line[len(b"data: ") :]
else:
line = line[len(b"data:") :]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def _handle_sse_line(line: str) -> Optional[GenerationChunk]:
try:
obj = json.loads(line)
return GenerationChunk(
text=obj.get("token", {}).get("text"),
)
except Exception:
return None
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~polars_dataframe.py | from typing import Any, Iterator
from langchain.docstore.document import Document
from langchain.document_loaders.dataframe import BaseDataFrameLoader
class PolarsDataFrameLoader(BaseDataFrameLoader):
"""Load `Polars` DataFrame."""
def __init__(self, data_frame: Any, *, page_content_column: str = "text"):
"""Initialize with dataframe object.
Args:
data_frame: Polars DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
import polars as pl
if not isinstance(data_frame, pl.DataFrame):
raise ValueError(
f"Expected data_frame to be a pl.DataFrame, got {type(data_frame)}"
)
super().__init__(data_frame, page_content_column=page_content_column)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from dataframe."""
for row in self.data_frame.iter_rows(named=True):
text = row[self.page_content_column]
row.pop(self.page_content_column)
yield Document(page_content=text, metadata=row)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~chromium.py | import asyncio
import logging
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class AsyncChromiumLoader(BaseLoader):
"""Scrape HTML pages from URLs using a
headless instance of the Chromium."""
def __init__(
self,
urls: List[str],
):
"""
Initialize the loader with a list of URL paths.
Args:
urls (List[str]): A list of URLs to scrape content from.
Raises:
ImportError: If the required 'playwright' package is not installed.
"""
self.urls = urls
try:
import playwright # noqa: F401
except ImportError:
raise ImportError(
"playwright is required for AsyncChromiumLoader. "
"Please install it with `pip install playwright`."
)
async def ascrape_playwright(self, url: str) -> str:
"""
Asynchronously scrape the content of a given URL using Playwright's async API.
Args:
url (str): The URL to scrape.
Returns:
str: The scraped HTML content or an error message if an exception occurs.
"""
from playwright.async_api import async_playwright
logger.info("Starting scraping...")
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
results = await page.content() # Simply get the HTML content
logger.info("Content scraped")
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def lazy_load(self) -> Iterator[Document]:
"""
Lazily load text content from the provided URLs.
This method yields Documents one at a time as they're scraped,
instead of waiting to scrape all URLs before returning.
Yields:
Document: The scraped content encapsulated within a Document object.
"""
for url in self.urls:
html_content = asyncio.run(self.ascrape_playwright(url))
metadata = {"source": url}
yield Document(page_content=html_content, metadata=metadata)
def load(self) -> List[Document]:
"""
Load and return all Documents from the provided URLs.
Returns:
List[Document]: A list of Document objects
containing the scraped content from each URL.
"""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~larksuite.py | import json
import urllib.request
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class LarkSuiteDocLoader(BaseLoader):
"""Load from `LarkSuite` (`FeiShu`)."""
def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load.
"""
self.domain = domain
self.access_token = access_token
self.document_id = document_id
def _get_larksuite_api_json_data(self, api_url: str) -> Any:
"""Get LarkSuite (FeiShu) API response json data."""
headers = {"Authorization": f"Bearer {self.access_token}"}
request = urllib.request.Request(api_url, headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
def lazy_load(self) -> Iterator[Document]:
"""Lazy load LarkSuite (FeiShu) document."""
api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents"
metadata_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}"
)
raw_content_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}/raw_content"
)
text = raw_content_json["data"]["content"]
metadata = {
"document_id": self.document_id,
"revision_id": metadata_json["data"]["document"]["revision_id"],
"title": metadata_json["data"]["document"]["title"],
}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load LarkSuite (FeiShu) document."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~chat_loaders~test_whatsapp.py | import pathlib
from langchain.chat_loaders import utils, whatsapp
def test_whatsapp_chat_loader() -> None:
chat_path = pathlib.Path(__file__).parent / "data" / "whatsapp_chat.txt"
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(
utils.map_ai_messages(loader.lazy_load(), sender="Dr. Feather")
)
assert chat_sessions, "Chat sessions should not be empty"
assert chat_sessions[0]["messages"], "Chat messages should not be empty"
assert (
"I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest."
" Such a magnificent creature!" in chat_sessions[0]["messages"][0].content
), "Chat content mismatch"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to [email protected] for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from requests import Request, Session
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}",
response=response,
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}",
response=response,
)
raise http_err
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~predictionguard.py | import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class PredictionGuard(LLM):
"""Prediction Guard large language models.
To use, you should have the ``predictionguard`` python package installed, and the
environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass
it as a named parameter to the constructor. To use Prediction Guard's API along
with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your
OpenAI API key as well.
Example:
.. code-block:: python
pgllm = PredictionGuard(model="MPT-7B-Instruct",
token="my-access-token",
output={
"type": "boolean"
})
"""
client: Any #: :meta private:
model: Optional[str] = "MPT-7B-Instruct"
"""Model name to use."""
output: Optional[Dict[str, Any]] = None
"""The output type or structure for controlling the LLM output."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
token: Optional[str] = None
"""Your Prediction Guard access token."""
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the access token and python package exists in environment."""
token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN")
try:
import predictionguard as pg
values["client"] = pg.Client(token=token)
except ImportError:
raise ImportError(
"Could not import predictionguard python package. "
"Please install it with `pip install predictionguard`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Prediction Guard API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "predictionguard"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Prediction Guard's model API.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = pgllm("Tell me a joke.")
"""
import predictionguard as pg
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = pg.Completion.create(
model=self.model,
prompt=prompt,
output=self.output,
temperature=params["temperature"],
max_tokens=params["max_tokens"],
**kwargs,
)
text = response["choices"][0]["text"]
# If stop tokens are provided, Prediction Guard's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~human.py | """ChatModel wrapper which returns user input as the response.."""
import asyncio
from functools import partial
from io import StringIO
from typing import Any, Callable, Dict, List, Mapping, Optional
import yaml
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Field
from langchain.schema.messages import (
BaseMessage,
HumanMessage,
_message_from_dict,
messages_to_dict,
)
from langchain.schema.output import ChatGeneration, ChatResult
def _display_messages(messages: List[BaseMessage]) -> None:
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(
message,
default_flow_style=False,
sort_keys=False,
allow_unicode=True,
width=10000,
line_break=None,
)
print("\n", "======= start of message =======", "\n\n")
print(yaml_string)
print("======= end of message =======", "\n\n")
def _collect_yaml_input(
messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> BaseMessage:
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = "\n".join(lines)
# Try to parse the input string as YAML
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content="")
if stop:
message.content = enforce_stop_tokens(message.content, stop)
return message
except yaml.YAMLError:
raise ValueError("Invalid YAML string entered.")
except ValueError:
raise ValueError("Invalid message entered.")
class HumanInputChatModel(BaseChatModel):
"""ChatModel which returns user input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
message_func: Callable = Field(default_factory=lambda: _display_messages)
separator: str = "\n"
input_kwargs: Mapping[str, Any] = {}
message_kwargs: Mapping[str, Any] = {}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"input_func": self.input_func.__name__,
"message_func": self.message_func.__name__,
}
@property
def _llm_type(self) -> str:
"""Returns the type of LLM."""
return "human-input-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Displays the messages to the user and returns their input as a response.
Args:
messages (List[BaseMessage]): The messages to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
ChatResult: The user's input as a response.
"""
self.message_func(messages, **self.message_kwargs)
user_input = self.input_func(messages, stop=stop, **self.input_kwargs)
return ChatResult(generations=[ChatGeneration(message=user_input)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_loaders~facebook_messenger.py | import json
import logging
from pathlib import Path
from typing import Iterator, Union
from langchain.chat_loaders.base import BaseChatLoader
from langchain.schema.chat import ChatSession
from langchain.schema.messages import HumanMessage
logger = logging.getLogger(__file__)
class SingleFileFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a single file.
Args:
path (Union[Path, str]): The path to the chat file.
Attributes:
path (Path): The path to the chat file.
"""
def __init__(self, path: Union[Path, str]) -> None:
super().__init__()
self.file_path = path if isinstance(path, Path) else Path(path)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
with open(self.file_path) as f:
data = json.load(f)
sorted_data = sorted(data["messages"], key=lambda x: x["timestamp_ms"])
messages = []
for m in sorted_data:
messages.append(
HumanMessage(
content=m["content"], additional_kwargs={"sender": m["sender_name"]}
)
)
yield ChatSession(messages=messages)
class FolderFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a folder.
Args:
path (Union[str, Path]): The path to the directory
containing the chat files.
Attributes:
path (Path): The path to the directory containing the chat files.
"""
def __init__(self, path: Union[str, Path]) -> None:
super().__init__()
self.directory_path = Path(path) if isinstance(path, str) else path
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
inbox_path = self.directory_path / "inbox"
for _dir in inbox_path.iterdir():
if _dir.is_dir():
for _file in _dir.iterdir():
if _file.suffix.lower() == ".json":
file_loader = SingleFileFacebookMessengerChatLoader(path=_file)
for result in file_loader.lazy_load():
yield result
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~streamlit_agent~gigachat_streaming.py | """Пример работы с чатом через gigachain """
import streamlit as st
# Try demo - https://gigachat-streaming.streamlit.app/
from langchain.chat_models import GigaChat
from langchain.schema import ChatMessage
st.title("GigaChain Bot")
with st.sidebar:
st.title("GIGACHAT API")
base_url = st.selectbox(
"GIGACHAT_BASE_URL",
(
"https://gigachat.devices.sberbank.ru/api/v1",
"https://beta.saluteai.sberdevices.ru/v1",
),
)
st.title("Авторизационные данные")
credentials = st.text_input("GIGACHAT_CREDENTIALS", type="password")
st.title("OR")
access_token = st.text_input("GIGACHAT_ACCESS_TOKEN", type="password")
st.title("OR")
user = st.text_input("GIGACHAT_USER")
password = st.text_input("GIGACHAT_PASSWORD", type="password")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [
ChatMessage(
role="system",
content="Ты - умный ИИ ассистент, который всегда готов помочь пользователю.",
),
ChatMessage(role="assistant", content="Как я могу помочь вам?"),
]
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message.role):
st.markdown(message.content)
if prompt := st.chat_input():
if not access_token and not credentials and not (user and password):
st.info("Заполните данные GigaChat для того, чтобы продолжить")
st.stop()
chat = GigaChat(
base_url=base_url,
credentials=credentials,
access_token=st.session_state.get("token")
or access_token, # Переиспользуем токен
user=user,
password=password,
verify_ssl_certs=False,
)
message = ChatMessage(role="user", content=prompt)
st.session_state.messages.append(message)
with st.chat_message(message.role):
st.markdown(message.content)
message = ChatMessage(role="assistant", content="")
st.session_state.messages.append(message)
with st.chat_message(message.role):
message_placeholder = st.empty()
for chunk in chat.stream(st.session_state.messages):
message.content += chunk.content
message_placeholder.markdown(message.content + "▌")
message_placeholder.markdown(message.content)
# Каждый раз, когда пользователь нажимает что-то в интерфейсе весь скрипт выполняется заново.
# Сохраняем токен и закрываем соединения
st.session_state.token = chat._client.token
chat._client.close()
| [
"Как я могу помочь вам?",
"Ты - умный ИИ ассистент, который всегда готов помочь пользователю."
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~retrievers~docarray~fixtures.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Generator, Tuple
import numpy as np
import pytest
from langchain.pydantic_v1 import Field
if TYPE_CHECKING:
from docarray.index import (
ElasticDocIndex,
HnswDocumentIndex,
InMemoryExactNNIndex,
QdrantDocumentIndex,
WeaviateDocumentIndex,
)
from docarray.typing import NdArray
from qdrant_client.http import models as rest
from langchain.embeddings import FakeEmbeddings
@pytest.fixture
def init_weaviate() -> (
Generator[
Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings],
None,
None,
]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
from docarray import BaseDoc
from docarray.index import (
WeaviateDocumentIndex,
)
class WeaviateDoc(BaseDoc):
# When initializing the Weaviate index, denote the field
# you want to search on with `is_embedding=True`
title: str
title_embedding: NdArray[32] = Field(is_embedding=True) # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize WeaviateDocumentIndex
dbconfig = WeaviateDocumentIndex.DBConfig(host="http://localhost:8080")
weaviate_db = WeaviateDocumentIndex[WeaviateDoc](
db_config=dbconfig, index_name="docarray_retriever"
)
# index data
weaviate_db.index(
[
WeaviateDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"path": ["year"], "operator": "LessThanEqual", "valueInt": "90"}
yield weaviate_db, filter_query, embeddings
weaviate_db._client.schema.delete_all()
@pytest.fixture
def init_elastic() -> (
Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
from docarray import BaseDoc
from docarray.index import (
ElasticDocIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize ElasticDocIndex
elastic_db = ElasticDocIndex[MyDoc](
hosts="http://localhost:9200", index_name="docarray_retriever"
)
# index data
elastic_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"range": {"year": {"lte": 90}}}
yield elastic_db, filter_query, embeddings
elastic_db._client.indices.delete(index="docarray_retriever")
@pytest.fixture
def init_qdrant() -> Tuple[QdrantDocumentIndex, rest.Filter, FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize QdrantDocumentIndex
qdrant_config = QdrantDocumentIndex.DBConfig(path=":memory:")
qdrant_db = QdrantDocumentIndex[MyDoc](qdrant_config)
# index data
qdrant_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = rest.Filter(
must=[
rest.FieldCondition(
key="year",
range=rest.Range(
gte=10,
lt=90,
),
)
]
)
return qdrant_db, filter_query, embeddings
@pytest.fixture
def init_in_memory() -> Tuple[InMemoryExactNNIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import InMemoryExactNNIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
in_memory_db = InMemoryExactNNIndex[MyDoc]()
# index data
in_memory_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return in_memory_db, filter_query, embeddings
@pytest.fixture
def init_hnsw(
tmp_path: Path,
) -> Tuple[HnswDocumentIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import (
HnswDocumentIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path)
# index data
hnsw_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return hnsw_db, filter_query, embeddings
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~graphs~graph_document.py | from __future__ import annotations
from typing import List, Union
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Field
from langchain.schema import Document
class Node(Serializable):
"""Represents a node in a graph with associated properties.
Attributes:
id (Union[str, int]): A unique identifier for the node.
type (str): The type or label of the node, default is "Node".
properties (dict): Additional properties and metadata associated with the node.
"""
id: Union[str, int]
type: str = "Node"
properties: dict = Field(default_factory=dict)
class Relationship(Serializable):
"""Represents a directed relationship between two nodes in a graph.
Attributes:
source (Node): The source node of the relationship.
target (Node): The target node of the relationship.
type (str): The type of the relationship.
properties (dict): Additional properties associated with the relationship.
"""
source: Node
target: Node
type: str
properties: dict = Field(default_factory=dict)
class GraphDocument(Serializable):
"""Represents a graph document consisting of nodes and relationships.
Attributes:
nodes (List[Node]): A list of nodes in the graph.
relationships (List[Relationship]): A list of relationships in the graph.
source (Document): The document from which the graph information is derived.
"""
nodes: List[Node]
relationships: List[Relationship]
source: Document
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~retrievers~test_web_research.py | from typing import List
import pytest
from langchain.retrievers.web_research import QuestionListOutputParser
@pytest.mark.parametrize(
"text,expected",
(
(
"1. Line one.\n",
["1. Line one.\n"],
),
(
"1. Line one.",
["1. Line one."],
),
(
"1. Line one.\n2. Line two.\n",
["1. Line one.\n", "2. Line two.\n"],
),
(
"1. Line one.\n2. Line two.",
["1. Line one.\n", "2. Line two."],
),
(
"1. Line one.\n2. Line two.\n3. Line three.",
["1. Line one.\n", "2. Line two.\n", "3. Line three."],
),
),
)
def test_list_output_parser(text: str, expected: List[str]) -> None:
parser = QuestionListOutputParser()
result = parser.parse(text)
assert result.lines == expected
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~embeddings~test_xinference.py | """Test Xinference embeddings."""
import time
from typing import AsyncGenerator, Tuple
import pytest_asyncio
from langchain.embeddings import XinferenceEmbeddings
@pytest_asyncio.fixture
async def setup() -> AsyncGenerator[Tuple[str, str], None]:
import xoscar as xo
from xinference.deploy.supervisor import start_supervisor_components
from xinference.deploy.utils import create_worker_actor_pool
from xinference.deploy.worker import start_worker_components
pool = await create_worker_actor_pool(
f"test://127.0.0.1:{xo.utils.get_next_port()}"
)
print(f"Pool running on localhost:{pool.external_address}")
endpoint = await start_supervisor_components(
pool.external_address, "127.0.0.1", xo.utils.get_next_port()
)
await start_worker_components(
address=pool.external_address, supervisor_address=pool.external_address
)
# wait for the api.
time.sleep(3)
async with pool:
yield endpoint, pool.external_address
def test_xinference_embedding_documents(setup: Tuple[str, str]) -> None:
"""Test xinference embeddings for documents."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(
model_name="vicuna-v1.3",
model_size_in_billions=7,
model_format="ggmlv3",
quantization="q4_0",
)
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
documents = ["foo bar", "bar foo"]
output = xinference.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 4096
def test_xinference_embedding_query(setup: Tuple[str, str]) -> None:
"""Test xinference embeddings for query."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(
model_name="vicuna-v1.3", model_size_in_billions=7, quantization="q4_0"
)
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
document = "foo bar"
output = xinference.embed_query(document)
assert len(output) == 4096
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~audio_text_to_speech.py | from __future__ import annotations
import logging
from typing import Dict, List, Literal, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import Field, root_validator, validator
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiTextToSpeechTool(EdenaiTool):
"""Tool that queries the Eden AI Text to speech API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/audio_text_to_speech_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_text_to_speech"
description = (
"A wrapper around edenai Services text to speech."
"Useful for when you need to convert text to speech."
"""the output is a string representing the URL of the audio file,
or the path to the downloaded wav file """
)
language: Optional[str] = "en"
"""
language of the text passed to the model.
"""
# optional params see api documentation for more info
return_type: Literal["url", "wav"] = "url"
rate: Optional[int]
pitch: Optional[int]
volume: Optional[int]
audio_format: Optional[str]
sampling_rate: Optional[int]
voice_models: Dict[str, str] = Field(default_factory=dict)
voice: Literal["MALE", "FEMALE"]
"""voice option : 'MALE' or 'FEMALE' """
feature: str = "audio"
subfeature: str = "text_to_speech"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
@root_validator
def check_voice_models_key_is_provider_name(cls, values: dict) -> dict:
for key in values.get("voice_models", {}).keys():
if key not in values.get("providers", []):
raise ValueError(
"voice_model should be formatted like this "
"{<provider_name>: <its_voice_model>}"
)
return values
def _download_wav(self, url: str, save_path: str) -> None:
response = requests.get(url)
if response.status_code == 200:
with open(save_path, "wb") as f:
f.write(response.content)
else:
raise ValueError("Error while downloading wav file")
def _parse_response(self, response: list) -> str:
result = response[0]
if self.return_type == "url":
return result["audio_resource_url"]
else:
self._download_wav(result["audio_resource_url"], "audio.wav")
return "audio.wav"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"text": query,
"language": self.language,
"option": self.voice,
"return_type": self.return_type,
"rate": self.rate,
"pitch": self.pitch,
"volume": self.volume,
"audio_format": self.audio_format,
"sampling_rate": self.sampling_rate,
"settings": self.voice_models,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~url.py | """Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class UnstructuredURLLoader(BaseLoader):
"""Load files from remote URLs using `Unstructured`.
Use the unstructured partition function to detect the MIME type
and route the file to the appropriate partitioner.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredURLLoader
loader = UnstructuredURLLoader(
urls=["<url-1>", "<url-2>"], mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
mode: str = "single",
show_progress_bar: bool = False,
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop("headers", {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
"You are using an old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
self.show_progress_bar = show_progress_bar
def _validate_mode(self, mode: str) -> None:
_valid_modes = {"single", "elements"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
def __is_headers_available_for_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def __is_headers_available_for_non_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 13)
def __is_non_html_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 12)
def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.auto import partition
from unstructured.partition.html import partition_html
docs: List[Document] = list()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. "
"Please install with 'pip install tqdm' or set "
"show_progress_bar=False."
) from e
urls = tqdm(self.urls)
else:
urls = self.urls
for url in urls:
try:
if self.__is_non_html_available():
if self.__is_headers_available_for_non_html():
elements = partition(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition(url=url, **self.unstructured_kwargs)
else:
if self.__is_headers_available_for_html():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
else:
raise e
if self.mode == "single":
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
elif self.mode == "elements":
for element in elements:
metadata = element.metadata.to_dict()
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~tongyi.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) -> Any:
resp = llm.client.call(**_kwargs)
if resp.status_code == 200:
return resp
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return _generate_with_retry(**kwargs)
def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
stream_resps = []
resps = llm.client.call(**_kwargs)
for resp in resps:
if resp.status_code == 200:
stream_resps.append(resp)
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return stream_resps
return _stream_generate_with_retry(**kwargs)
class Tongyi(LLM):
"""Tongyi Qwen large language models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Tongyi
Tongyi = tongyi()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
client: Any #: :meta private:
model_name: str = "qwen-plus-v1"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = None
"""Dashscope api key provide by alicloud."""
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"top_p": self.top_p,
}
return {**normal_params, **self.model_kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Tongyi's generate endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = tongyi("Tell me a joke.")
"""
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
return completion["output"]["text"]
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations = []
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
params["stream"] = True
for stream_resp in stream_generate_with_retry(
self, prompt=prompts[0], **params
):
generations.append(
[
Generation(
text=stream_resp["output"]["text"],
generation_info=dict(
finish_reason=stream_resp["output"]["finish_reason"],
),
)
]
)
else:
for prompt in prompts:
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
generations.append(
[
Generation(
text=completion["output"]["text"],
generation_info=dict(
finish_reason=completion["output"]["finish_reason"],
),
)
]
)
return LLMResult(generations=generations)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~self_query~myscale.py | import datetime
import re
from typing import Any, Callable, Dict, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
def _DEFAULT_COMPOSER(op_name: str) -> Callable:
"""
Default composer for logical operators.
Args:
op_name: Name of the operator.
Returns:
Callable that takes a list of arguments and returns a string.
"""
def f(*args: Any) -> str:
args_: map[str] = map(str, args)
return f" {op_name} ".join(args_)
return f
def _FUNCTION_COMPOSER(op_name: str) -> Callable:
"""
Composer for functions.
Args:
op_name: Name of the function.
Returns:
Callable that takes a list of arguments and returns a string.
"""
def f(*args: Any) -> str:
args_: map[str] = map(str, args)
return f"{op_name}({','.join(args_)})"
return f
class MyScaleTranslator(Visitor):
"""Translate `MyScale` internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
Comparator.CONTAIN,
Comparator.LIKE,
]
map_dict = {
Operator.AND: _DEFAULT_COMPOSER("AND"),
Operator.OR: _DEFAULT_COMPOSER("OR"),
Operator.NOT: _DEFAULT_COMPOSER("NOT"),
Comparator.EQ: _DEFAULT_COMPOSER("="),
Comparator.GT: _DEFAULT_COMPOSER(">"),
Comparator.GTE: _DEFAULT_COMPOSER(">="),
Comparator.LT: _DEFAULT_COMPOSER("<"),
Comparator.LTE: _DEFAULT_COMPOSER("<="),
Comparator.CONTAIN: _FUNCTION_COMPOSER("has"),
Comparator.LIKE: _DEFAULT_COMPOSER("ILIKE"),
}
def __init__(self, metadata_key: str = "metadata") -> None:
super().__init__()
self.metadata_key = metadata_key
def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
func = operation.operator
self._validate_func(func)
return self.map_dict[func](*args)
def visit_comparison(self, comparison: Comparison) -> Dict:
regex = r"\((.*?)\)"
matched = re.search(r"\(\w+\)", comparison.attribute)
# If arbitrary function is applied to an attribute
if matched:
attr = re.sub(
regex,
f"({self.metadata_key}.{matched.group(0)[1:-1]})",
comparison.attribute,
)
else:
attr = f"{self.metadata_key}.{comparison.attribute}"
value = comparison.value
comp = comparison.comparator
value = f"'{value}'" if isinstance(value, str) else value
# convert timestamp for datetime objects
if type(value) is datetime.date:
attr = f"parseDateTime32BestEffort({attr})"
value = f"parseDateTime32BestEffort('{value.strftime('%Y-%m-%d')}')"
# string pattern match
if comp is Comparator.LIKE:
value = f"'%{value[1:-1]}%'"
return self.map_dict[comp](attr, value)
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
print(structured_query)
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"where_str": structured_query.filter.accept(self)}
return structured_query.query, kwargs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~pydantic.py | import json
import re
from typing import Type, TypeVar
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
from langchain.pydantic_v1 import BaseModel, ValidationError
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar("T", bound=BaseModel)
class PydanticOutputParser(BaseOutputParser[T]):
"""Parse an output using a pydantic model."""
pydantic_object: Type[T]
"""The pydantic model to parse."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st json candidate.
match = re.search(
r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL
)
json_str = ""
if match:
json_str = match.group()
json_object = json.loads(json_str, strict=False)
return self.pydantic_object.parse_obj(json_object)
except (json.JSONDecodeError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text)
def get_format_instructions(self) -> str:
schema = self.pydantic_object.schema()
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~tools~file_management~test_write.py | """Test the WriteFile tool."""
from pathlib import Path
from tempfile import TemporaryDirectory
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
)
from langchain.tools.file_management.write import WriteFileTool
def test_write_file_with_root_dir() -> None:
"""Test the WriteFile tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = WriteFileTool(root_dir=temp_dir)
tool.run({"file_path": "file.txt", "text": "Hello, world!"})
assert (Path(temp_dir) / "file.txt").exists()
assert (Path(temp_dir) / "file.txt").read_text() == "Hello, world!"
def test_write_file_errs_outside_root_dir() -> None:
"""Test the WriteFile tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = WriteFileTool(root_dir=temp_dir)
result = tool.run({"file_path": "../file.txt", "text": "Hello, world!"})
assert result == INVALID_PATH_TEMPLATE.format(
arg_name="file_path", value="../file.txt"
)
def test_write_file() -> None:
"""Test the WriteFile tool."""
with TemporaryDirectory() as temp_dir:
file_path = str(Path(temp_dir) / "file.txt")
tool = WriteFileTool()
tool.run({"file_path": file_path, "text": "Hello, world!"})
assert (Path(temp_dir) / "file.txt").exists()
assert (Path(temp_dir) / "file.txt").read_text() == "Hello, world!"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~graph_qa~nebulagraph.py | """Question answering over a graph."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.graphs.nebula_graph import NebulaGraph
from langchain.pydantic_v1 import Field
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
class NebulaGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating nGQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: NebulaGraph = Field(exclude=True)
ngql_generation_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT,
ngql_prompt: BasePromptTemplate = NGQL_GENERATION_PROMPT,
**kwargs: Any,
) -> NebulaGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt)
return cls(
qa_chain=qa_chain,
ngql_generation_chain=ngql_generation_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Generate nGQL statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
generated_ngql = self.ngql_generation_chain.run(
{"question": question, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated nGQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_ngql, color="green", end="\n", verbose=self.verbose
)
context = self.graph.query(generated_ngql)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"question": question, "context": context},
callbacks=callbacks,
)
return {self.output_key: result[self.qa_chain.output_key]}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~tomarkdown.py | from __future__ import annotations
from typing import Iterator, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class ToMarkdownLoader(BaseLoader):
"""Load `HTML` using `2markdown API`."""
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://2markdown.com/api/2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""Load file."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~vectorstores~test_neo4jvector.py | """Test Neo4jVector functionality."""
import os
from typing import List
from langchain.docstore.document import Document
from langchain.vectorstores.neo4j_vector import Neo4jVector, SearchType
from langchain.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
url = os.environ.get("NEO4J_URL", "bolt://localhost:7687")
username = os.environ.get("NEO4J_USERNAME", "neo4j")
password = os.environ.get("NEO4J_PASSWORD", "pleaseletmein")
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz"]
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f neo4j.yml up
"""
def drop_vector_indexes(store: Neo4jVector) -> None:
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX {index['name']}")
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
def test_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_euclidean() -> None:
"""Test euclidean distance"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_catch_wrong_index_name() -> None:
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_catch_wrong_node_label() -> None:
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_with_metadatas() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
drop_vector_indexes(docsearch)
def test_neo4jvector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9998376369476318),
(Document(page_content="baz", metadata={"page": "2"}), 0.9993523359298706),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.9999},
)
output = retriever.get_relevant_documents("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
drop_vector_indexes(docsearch)
def test_custom_return_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
retrieval_query="RETURN 'foo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_prefer_indexname() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["bar"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
output = existing_index.similarity_search("bar", k=1)
assert output == [Document(page_content="bar", metadata={})]
drop_vector_indexes(existing_index)
def test_neo4jvector_prefer_indexname_insert() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["baz"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
existing_index.add_documents([Document(page_content="bar", metadata={})])
output = existing_index.similarity_search("bar", k=2)
assert output == [
Document(page_content="bar", metadata={}),
Document(page_content="foo", metadata={}),
]
drop_vector_indexes(existing_index)
def test_neo4jvector_hybrid() -> None:
"""Test end to end construction with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_deduplicate() -> None:
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN 'moo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="moo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query2() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN node.text AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_missing_keyword() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
try:
Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
search_type=SearchType.HYBRID,
)
except ValueError as e:
assert str(e) == (
"keyword_index name has to be specified when " "using hybrid search option"
)
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_from_existing() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
keyword_index_name="keyword",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph() -> None:
"""Test from_existing_graph with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_hybrid() -> None:
"""Test from_existing_graph hybrid with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~qa_with_sources~refine_prompts.py | # flake8: noqa
from langchain.prompts import PromptTemplate
DEFAULT_REFINE_PROMPT_TMPL = (
"Исходный вопрос звучит так: {question}\n"
"Мы предоставили существующий ответ, включая источники: {existing_answer}\n"
"У нас есть возможность уточнить существующий ответ"
"(только если это необходимо) с некоторым дополнительным контекстом ниже.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Учитывая новый контекст, уточни исходный ответ, чтобы лучше "
"ответить на вопрос. "
"Если ты обновляешь его, пожалуйста, обнови и источники. "
"Если контекст не полезен, верни исходный ответ."
)
DEFAULT_REFINE_PROMPT = PromptTemplate(
input_variables=["question", "existing_answer", "context_str"],
template=DEFAULT_REFINE_PROMPT_TMPL,
)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Информация контекста ниже. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Учитывая информацию контекста и не имея предварительных знаний, "
"ответь на вопрос: {question}\n"
)
DEFAULT_TEXT_QA_PROMPT = PromptTemplate(
input_variables=["context_str", "question"], template=DEFAULT_TEXT_QA_PROMPT_TMPL
)
EXAMPLE_PROMPT = PromptTemplate(
template="Содержание: {page_content}\nИсточник: {source}",
input_variables=["page_content", "source"],
)
| [
"existing_answer",
"context_str",
"question",
"Исходный вопрос звучит так: {question}\nМы предоставили существующий ответ, включая источники: {existing_answer}\nУ нас есть возможность уточнить существующий ответ(только если это необходимо) с некоторым дополнительным контекстом ниже.\n------------\n{context_str}\n------------\nУчитывая новый контекст, уточни исходный ответ, чтобы лучше ответить на вопрос. Если ты обновляешь его, пожалуйста, обнови и источники. Если контекст не полезен, верни исходный ответ.",
"Информация контекста ниже. \n---------------------\n{context_str}\n---------------------\nУчитывая информацию контекста и не имея предварительных знаний, ответь на вопрос: {question}\n",
"page_content",
"Содержание: {page_content}\nИсточник: {source}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~agent_toolkits~openapi~planner.py | """Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
import json
import re
from functools import partial
from typing import Any, Callable, Dict, List, Optional
import yaml
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_DELETE_PROMPT,
PARSING_GET_PROMPT,
PARSING_PATCH_PROMPT,
PARSING_POST_PROMPT,
PARSING_PUT_PROMPT,
REQUESTS_DELETE_TOOL_DESCRIPTION,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_PATCH_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
REQUESTS_PUT_TOOL_DESCRIPTION,
)
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.memory import ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import Field
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.base import BaseTool
from langchain.tools.requests.tool import BaseRequestsTool
from langchain.utilities.requests import RequestsWrapper
#
# Requests tools with LLM-instructed extraction of truncated responses.
#
# Of course, truncating so bluntly may lose a lot of valuable
# information in the response.
# However, the goal for now is to have only a single inference step.
MAX_RESPONSE_LENGTH = 5000
"""Maximum length of the response to be returned."""
def _get_default_llm_chain(prompt: BasePromptTemplate) -> LLMChain:
return LLMChain(
llm=OpenAI(),
prompt=prompt,
)
def _get_default_llm_chain_factory(
prompt: BasePromptTemplate,
) -> Callable[[], LLMChain]:
"""Returns a default LLMChain factory."""
return partial(_get_default_llm_chain, prompt)
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests GET tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_get"
"""Tool name."""
description = REQUESTS_GET_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get("params")
response = self.requests_wrapper.get(data["url"], params=data_params)
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests POST tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_post"
"""Tool name."""
description = REQUESTS_POST_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests PATCH tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_patch"
"""Tool name."""
description = REQUESTS_PATCH_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.patch(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool):
"""Requests PUT tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_put"
"""Tool name."""
description = REQUESTS_PUT_TOOL_DESCRIPTION
"""Tool description."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.put(data["url"], data["data"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
"""A tool that sends a DELETE request and parses the response."""
name: str = "requests_delete"
"""The name of the tool."""
description = REQUESTS_DELETE_TOOL_DESCRIPTION
"""The description of the tool."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""The maximum length of the response."""
llm_chain: LLMChain = Field(
default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT)
)
"""The LLM chain used to parse the response."""
def _run(self, text: str) -> str:
try:
data = json.loads(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.delete(data["url"])
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
#
# Orchestrator, planner, controller.
#
def _create_api_planner_tool(
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
) -> Tool:
endpoint_descriptions = [
f"{name} {description}" for name, description, _ in api_spec.endpoints
]
prompt = PromptTemplate(
template=API_PLANNER_PROMPT,
input_variables=["query"],
partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)},
)
chain = LLMChain(llm=llm, prompt=prompt)
tool = Tool(
name=API_PLANNER_TOOL_NAME,
description=API_PLANNER_TOOL_DESCRIPTION,
func=chain.run,
)
return tool
def _create_api_controller_agent(
api_url: str,
api_docs: str,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> AgentExecutor:
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
tools: List[BaseTool] = [
RequestsGetToolWithParsing(
requests_wrapper=requests_wrapper, llm_chain=get_llm_chain
),
RequestsPostToolWithParsing(
requests_wrapper=requests_wrapper, llm_chain=post_llm_chain
),
]
prompt = PromptTemplate(
template=API_CONTROLLER_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"api_url": api_url,
"api_docs": api_docs,
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def _create_api_controller_tool(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
) -> Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]["url"] # TODO: do better.
def _create_and_run_api_controller_agent(plan_str: str) -> str:
pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*"
matches = re.findall(pattern, plan_str)
endpoint_names = [
"{method} {route}".format(method=method, route=route.split("?")[0])
for method, route in matches
]
docs_str = ""
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub("\{.*?\}", ".*", name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n"
if not found_match:
raise ValueError(f"{endpoint_name} endpoint does not exist.")
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
return agent.run(plan_str)
return Tool(
name=API_CONTROLLER_TOOL_NAME,
func=_create_and_run_api_controller_agent,
description=API_CONTROLLER_TOOL_DESCRIPTION,
)
def create_openapi_agent(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
shared_memory: Optional[ReadOnlySharedMemory] = None,
callback_manager: Optional[BaseCallbackManager] = None,
verbose: bool = True,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Instantiate OpenAI API planner and controller for a given spec.
Inject credentials via requests_wrapper.
We use a top-level "orchestrator" agent to invoke the planner and controller,
rather than a top-level planner
that invokes a controller with its plan. This is to keep the planner simple.
"""
tools = [
_create_api_planner_tool(api_spec, llm),
_create_api_controller_tool(api_spec, requests_wrapper, llm),
]
prompt = PromptTemplate(
template=API_ORCHESTRATOR_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory),
allowed_tools=[tool.name for tool in tools],
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
| [
"tool_descriptions",
"\n",
"tool_names",
"agent_scratchpad",
"- ",
"input",
", ",
"endpoints"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~rwkv.py | """RWKV models.
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
"""
from typing import Any, Dict, List, Mapping, Optional, Set
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
class RWKV(LLM, BaseModel):
"""RWKV language models.
To use, you should have the ``rwkv`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import RWKV
model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32")
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained RWKV model file."""
tokens_path: str
"""Path to the RWKV tokens file."""
strategy: str = "cpu fp32"
"""Token context window."""
rwkv_verbose: bool = True
"""Print debug information."""
temperature: float = 1.0
"""The temperature to use for sampling."""
top_p: float = 0.5
"""The top-p value to use for sampling."""
penalty_alpha_frequency: float = 0.4
"""Positive values penalize new tokens based on their existing frequency
in the text so far, decreasing the model's likelihood to repeat the same
line verbatim.."""
penalty_alpha_presence: float = 0.4
"""Positive values penalize new tokens based on whether they appear
in the text so far, increasing the model's likelihood to talk about
new topics.."""
CHUNK_LEN: int = 256
"""Batch size for prompt processing."""
max_tokens_per_generation: int = 256
"""Maximum number of tokens to generate."""
client: Any = None #: :meta private:
tokenizer: Any = None #: :meta private:
pipeline: Any = None #: :meta private:
model_tokens: Any = None #: :meta private:
model_state: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"verbose": self.verbose,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_alpha_frequency": self.penalty_alpha_frequency,
"penalty_alpha_presence": self.penalty_alpha_presence,
"CHUNK_LEN": self.CHUNK_LEN,
"max_tokens_per_generation": self.max_tokens_per_generation,
}
@staticmethod
def _rwkv_param_names() -> Set[str]:
"""Get the identifying parameters."""
return {
"verbose",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
"Could not import tokenizers python package. "
"Please install it with `pip install tokenizers`."
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs["verbose"] = values["rwkv_verbose"]
values["client"] = RWKVMODEL(
values["model"], strategy=values["strategy"], **model_kwargs
)
values["pipeline"] = PIPELINE(values["client"], values["tokens_path"])
except ImportError:
raise ImportError(
"Could not import rwkv python package. "
"Please install it with `pip install rwkv`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params,
**{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "rwkv"
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ",:?!"
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
def rwkv_generate(self, prompt: str) -> str:
self.model_state = None
self.model_tokens = []
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
decoded = ""
for i in range(self.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p
)
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = self.run_rnn([token])
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
if "\ufffd" not in xxx: # avoid utf-8 display issues
decoded += xxx
out_last = begin + i + 1
if i >= self.max_tokens_per_generation - 100:
break
return decoded
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""RWKV generation
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text = self.rwkv_generate(prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~airbyte_json.py | import json
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import stringify_dict
class AirbyteJSONLoader(BaseLoader):
"""Load local `Airbyte` json files."""
def __init__(self, file_path: str):
"""Initialize with a file path. This should start with '/tmp/airbyte_local/'."""
self.file_path = file_path
"""Path to the directory containing the json files."""
def load(self) -> List[Document]:
text = ""
for line in open(self.file_path, "r"):
data = json.loads(line)["_airbyte_data"]
text += stringify_dict(data)
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~pubmed.py | import json
import logging
import time
import urllib.error
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "[email protected]"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~gradient_ai.py | import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
class TrainResult(TypedDict):
"""Train result."""
loss: float
class GradientLLM(BaseLLM):
"""Gradient.ai LLM Endpoints.
GradientLLM is a class to interact with LLMs on gradient.ai
To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your
API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace,
or alternatively provide them as keywords to the constructor of this class.
Example:
.. code-block:: python
from langchain.llms import GradientLLM
GradientLLM(
model="99148c6d-c2a0-4fbe-a4a7-e7c05bdb8a09_base_ml_model",
model_kwargs={
"max_generated_token_count": 128,
"temperature": 0.75,
"top_p": 0.95,
"top_k": 20,
"stop": [],
},
gradient_workspace_id="12345614fc0_workspace",
gradient_access_token="gradientai-access_token",
)
"""
model_id: str = Field(alias="model", min_length=2)
"Underlying gradient.ai model id (base or fine-tuned)."
gradient_workspace_id: Optional[str] = None
"Underlying gradient.ai workspace_id."
gradient_access_token: Optional[str] = None
"""gradient.ai API Token, which can be generated by going to
https://auth.gradient.ai/select-workspace
and selecting "Access tokens" under the profile drop-down.
"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
gradient_api_url: str = "https://api.gradient.ai/api"
"""Endpoint URL to use."""
aiosession: Optional[aiohttp.ClientSession] = None #: :meta private:
"""ClientSession, private, subject to change in upcoming releases."""
# LLM call kwargs
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["gradient_access_token"] = get_from_dict_or_env(
values, "gradient_access_token", "GRADIENT_ACCESS_TOKEN"
)
values["gradient_workspace_id"] = get_from_dict_or_env(
values, "gradient_workspace_id", "GRADIENT_WORKSPACE_ID"
)
if (
values["gradient_access_token"] is None
or len(values["gradient_access_token"]) < 10
):
raise ValueError("env variable `GRADIENT_ACCESS_TOKEN` must be set")
if (
values["gradient_workspace_id"] is None
or len(values["gradient_access_token"]) < 3
):
raise ValueError("env variable `GRADIENT_WORKSPACE_ID` must be set")
if values["model_kwargs"]:
kw = values["model_kwargs"]
if not 0 <= kw.get("temperature", 0.5) <= 1:
raise ValueError("`temperature` must be in the range [0.0, 1.0]")
if not 0 <= kw.get("top_p", 0.5) <= 1:
raise ValueError("`top_p` must be in the range [0.0, 1.0]")
if 0 >= kw.get("top_k", 0.5):
raise ValueError("`top_k` must be positive")
if 0 >= kw.get("max_generated_token_count", 1):
raise ValueError("`max_generated_token_count` must be positive")
values["gradient_api_url"] = get_from_dict_or_env(
values, "gradient_api_url", "GRADIENT_API_URL"
)
try:
import gradientai # noqa
except ImportError:
logging.warning(
"DeprecationWarning: `GradientLLM` will use "
"`pip install gradientai` in future releases of langchain."
)
except Exception:
pass
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"gradient_api_url": self.gradient_api_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gradient"
def _kwargs_post_fine_tune_request(
self, inputs: Sequence[str], kwargs: Mapping[str, Any]
) -> Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
multipliers = _params.get("multipliers", None)
return dict(
url=f"{self.gradient_api_url}/models/{self.model_id}/fine-tune",
headers={
"authorization": f"Bearer {self.gradient_access_token}",
"x-gradient-workspace-id": f"{self.gradient_workspace_id}",
"accept": "application/json",
"content-type": "application/json",
},
json=dict(
samples=tuple(
{
"inputs": input,
}
for input in inputs
)
if multipliers is None
else tuple(
{
"inputs": input,
"fineTuningParameters": {
"multiplier": multiplier,
},
}
for input, multiplier in zip(inputs, multipliers)
),
),
)
def _kwargs_post_request(
self, prompt: str, kwargs: Mapping[str, Any]
) -> Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
return dict(
url=f"{self.gradient_api_url}/models/{self.model_id}/complete",
headers={
"authorization": f"Bearer {self.gradient_access_token}",
"x-gradient-workspace-id": f"{self.gradient_workspace_id}",
"accept": "application/json",
"content-type": "application/json",
},
json=dict(
query=prompt,
maxGeneratedTokenCount=_params.get("max_generated_token_count", None),
temperature=_params.get("temperature", None),
topK=_params.get("top_k", None),
topP=_params.get("top_p", None),
),
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
try:
response = requests.post(**self._kwargs_post_request(prompt, kwargs))
if response.status_code != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
except requests.exceptions.RequestException as e:
raise Exception(f"RequestException while calling Gradient Endpoint: {e}")
text = response.json()["generatedOutput"]
if stop is not None:
# Apply stop tokens when making calls to Gradient
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
**self._kwargs_post_request(prompt=prompt, kwargs=kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
text = (await response.json())["generatedOutput"]
else:
async with self.aiosession.post(
**self._kwargs_post_request(prompt=prompt, kwargs=kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
text = (await response.json())["generatedOutput"]
if stop is not None:
# Apply stop tokens when making calls to Gradient
text = enforce_stop_tokens(text, stop)
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# same thing with threading
def _inner_generate(prompt: str) -> List[Generation]:
return [
Generation(
text=self._call(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
)
]
if len(prompts) <= 1:
generations = list(map(_inner_generate, prompts))
else:
with ThreadPoolExecutor(min(8, len(prompts))) as p:
generations = list(p.map(_inner_generate, prompts))
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
for generation in asyncio.gather(
[self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs)]
for prompt in prompts
):
generations.append([Generation(text=generation)])
return LLMResult(generations=generations)
def train_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult:
try:
response = requests.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
)
if response.status_code != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
except requests.exceptions.RequestException as e:
raise Exception(f"RequestException while calling Gradient Endpoint: {e}")
response_json = response.json()
loss = response_json["sumLoss"] / response_json["numberOfTrainableTokens"]
return TrainResult(loss=loss)
async def atrain_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
loss = (
response_json["sumLoss"]
/ response_json["numberOfTrainableTokens"]
)
else:
async with self.aiosession.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
loss = (
response_json["sumLoss"] / response_json["numberOfTrainableTokens"]
)
return TrainResult(loss=loss)
| [
"application/json"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~huggingface_text_gen_inference.py | import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_pydantic_field_names
logger = logging.getLogger(__name__)
class HuggingFaceTextGenInference(LLM):
"""
HuggingFace text generation API.
To use, you should have the `text-generation` python package installed and
a text-generation server running.
Example:
.. code-block:: python
# Basic Example (no streaming)
llm = HuggingFaceTextGenInference(
inference_server_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
)
print(llm("What is Deep Learning?"))
# Streaming response example
from langchain.callbacks import streaming_stdout
callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()]
llm = HuggingFaceTextGenInference(
inference_server_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
callbacks=callbacks,
streaming=True
)
print(llm("What is Deep Learning?"))
"""
max_new_tokens: int = 512
"""Maximum number of generated tokens"""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens to keep for
top-k-filtering."""
top_p: Optional[float] = 0.95
"""If set to < 1, only the smallest set of most probable tokens with probabilities
that add up to `top_p` or higher are kept for generation."""
typical_p: Optional[float] = 0.95
"""Typical Decoding mass. See [Typical Decoding for Natural Language
Generation](https://arxiv.org/abs/2202.00666) for more information."""
temperature: Optional[float] = 0.8
"""The value used to module the logits distribution."""
repetition_penalty: Optional[float] = None
"""The parameter for repetition penalty. 1.0 means no penalty.
See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details."""
return_full_text: bool = False
"""Whether to prepend the prompt to the generated text"""
truncate: Optional[int] = None
"""Truncate inputs tokens to the given size"""
stop_sequences: List[str] = Field(default_factory=list)
"""Stop generating tokens if a member of `stop_sequences` is generated"""
seed: Optional[int] = None
"""Random sampling seed"""
inference_server_url: str = ""
"""text-generation-inference instance base url"""
timeout: int = 120
"""Timeout in seconds"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
do_sample: bool = False
"""Activate logits sampling"""
watermark: bool = False
"""Watermarking with [A Watermark for Large Language Models]
(https://arxiv.org/abs/2301.10226)"""
server_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any text-generation-inference server parameters not explicitly specified"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `call` not explicitly specified"""
client: Any
async_client: Any
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
import text_generation
values["client"] = text_generation.Client(
values["inference_server_url"],
timeout=values["timeout"],
**values["server_kwargs"],
)
values["async_client"] = text_generation.AsyncClient(
values["inference_server_url"],
timeout=values["timeout"],
**values["server_kwargs"],
)
except ImportError:
raise ImportError(
"Could not import text_generation python package. "
"Please install it with `pip install text_generation`."
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_textgen_inference"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling text generation inference API."""
return {
"max_new_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"typical_p": self.typical_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"truncate": self.truncate,
"stop_sequences": self.stop_sequences,
"seed": self.seed,
"do_sample": self.do_sample,
"watermark": self.watermark,
**self.model_kwargs,
}
def _invocation_params(
self, runtime_stop: Optional[List[str]], **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
params["stop_sequences"] = params["stop_sequences"] + (runtime_stop or [])
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = self.client.generate(prompt, **invocation_params)
# remove stop sequences from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[
: res.generated_text.index(stop_seq)
]
return res.generated_text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = await self.async_client.generate(prompt, **invocation_params)
# remove stop sequences from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[
: res.generated_text.index(stop_seq)
]
return res.generated_text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
for res in self.client.generate_stream(prompt, **invocation_params):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.token.text:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if res.token.special:
text = None
elif stop_seq_found:
text = res.token.text[: res.token.text.index(stop_seq_found)]
else:
text = res.token.text
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text)
# break if stop sequence found
if stop_seq_found:
break
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
async for res in self.async_client.generate_stream(prompt, **invocation_params):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in res.token.text:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if res.token.special:
text = None
elif stop_seq_found:
text = res.token.text[: res.token.text.index(stop_seq_found)]
else:
text = res.token.text
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
# break if stop sequence found
if stop_seq_found:
break
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~pal_chain~math_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
template = (
'''
Q: У Оливии было $23. Она купила пять бейглов по $3 каждый. Сколько денег у неё осталось?
# solution in Python:
def solution():
"""У Оливии было $23. Она купила пять бейглов по $3 каждый. Сколько денег у неё осталось?"""
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
Q: У Майкла было 58 мячей для гольфа. Во вторник он потерял 23 мяча. В среду он потерял еще 2. Сколько мячей для гольфа у него осталось в конце среды?
# solution in Python:
def solution():
"""У Майкла было 58 мячей для гольфа. Во вторник он потерял 23 мяча. В среду он потерял еще 2. Сколько мячей для гольфа у него осталось в конце среды?"""
golf_balls_initial = 58
golf_balls_lost_tuesday = 23
golf_balls_lost_wednesday = 2
golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday
result = golf_balls_left
return result
Q: В серверной было девять компьютеров. С понедельника по четверг каждый день устанавливали по пять новых компьютеров. Сколько компьютеров теперь в серверной?
# solution in Python:
def solution():
"""В серверной было девять компьютеров. С понедельника по четверг каждый день устанавливали по пять новых компьютеров. Сколько компьютеров теперь в серверной?"""
computers_initial = 9
computers_per_day = 5
num_days = 4 # 4 days between monday and thursday
computers_added = computers_per_day * num_days
computers_total = computers_initial + computers_added
result = computers_total
return result
Q: У Шона было пять игрушек. На Рождество он получил по две игрушки от мамы и папы. Сколько игрушек у него теперь?
# solution in Python:
def solution():
"""У Шона было пять игрушек. На Рождество он получил по две игрушки от мамы и папы. Сколько игрушек у него теперь?"""
toys_initial = 5
mom_toys = 2
dad_toys = 2
total_received = mom_toys + dad_toys
total_toys = toys_initial + total_received
result = total_toys
return result
Q: У Джейсона было 20 леденцов. Он отдал некоторые леденцы Денни. Теперь у Джейсона 12 леденцов. Сколько леденцов Джейсон отдал Денни?
# solution in Python:
def solution():
"""У Джейсона было 20 леденцов. Он отдал некоторые леденцы Денни. Теперь у Джейсона 12 леденцов. Сколько леденцов Джейсон отдал Денни?"""
jason_lollipops_initial = 20
jason_lollipops_after = 12
denny_lollipops = jason_lollipops_initial - jason_lollipops_after
result = denny_lollipops
return result
Q: У Лии было 32 шоколадки, а у её сестры - 42. Если они съели 35, сколько шоколадок у них осталось в общей сложности?
# solution in Python:
def solution():
"""У Лии было 32 шоколадки, а у её сестры - 42. Если они съели 35, сколько шоколадок у них осталось в общей сложности?"""
leah_chocolates = 32
sister_chocolates = 42
total_chocolates = leah_chocolates + sister_chocolates
chocolates_eaten = 35
chocolates_left = total_chocolates - chocolates_eaten
result = chocolates_left
return result
Q: Если на парковке 3 автомобиля и приезжают еще 2, сколько автомобилей теперь на парковке?
# solution in Python:
def solution():
"""Если на парковке 3 автомобиля и приезжают еще 2, сколько автомобилей теперь на парковке?"""
cars_initial = 3
cars_arrived = 2
total_cars = cars_initial + cars_arrived
result = total_cars
return result
Q: В роще 15 деревьев. Сегодня садовники посадят еще деревья. Когда они закончат, в роще будет 21 дерево. Сколько деревьев садовники посадили сегодня?
# solution in Python:
def solution():
"""В роще 15 деревьев. Сегодня садовники посадят еще деревья. Когда они закончат, в роще будет 21 дерево. Сколько деревьев садовники посадили сегодня?"""
trees_initial = 15
trees_after = 21
trees_added = trees_after - trees_initial
result = trees_added
return result
Q: {question}
# solution in Python:
'''.strip()
+ "\n\n\n"
)
MATH_PROMPT = PromptTemplate(input_variables=["question"], template=template)
| [
"question",
"Q: У Оливии было $23. Она купила пять бейглов по $3 каждый. Сколько денег у неё осталось?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"У Оливии было $23. Она купила пять бейглов по $3 каждый. Сколько денег у неё осталось?\"\"\"\n money_initial = 23\n bagels = 5\n bagel_cost = 3\n money_spent = bagels * bagel_cost\n money_left = money_initial - money_spent\n result = money_left\n return result\n\n\n\n\n\nQ: У Майкла было 58 мячей для гольфа. Во вторник он потерял 23 мяча. В среду он потерял еще 2. Сколько мячей для гольфа у него осталось в конце среды?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"У Майкла было 58 мячей для гольфа. Во вторник он потерял 23 мяча. В среду он потерял еще 2. Сколько мячей для гольфа у него осталось в конце среды?\"\"\"\n golf_balls_initial = 58\n golf_balls_lost_tuesday = 23\n golf_balls_lost_wednesday = 2\n golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday\n result = golf_balls_left\n return result\n\n\n\n\n\nQ: В серверной было девять компьютеров. С понедельника по четверг каждый день устанавливали по пять новых компьютеров. Сколько компьютеров теперь в серверной?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"В серверной было девять компьютеров. С понедельника по четверг каждый день устанавливали по пять новых компьютеров. Сколько компьютеров теперь в серверной?\"\"\"\n computers_initial = 9\n computers_per_day = 5\n num_days = 4 # 4 days between monday and thursday\n computers_added = computers_per_day * num_days\n computers_total = computers_initial + computers_added\n result = computers_total\n return result\n\n\n\n\n\nQ: У Шона было пять игрушек. На Рождество он получил по две игрушки от мамы и папы. Сколько игрушек у него теперь?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"У Шона было пять игрушек. На Рождество он получил по две игрушки от мамы и папы. Сколько игрушек у него теперь?\"\"\"\n toys_initial = 5\n mom_toys = 2\n dad_toys = 2\n total_received = mom_toys + dad_toys\n total_toys = toys_initial + total_received\n result = total_toys\n return result\n\n\n\n\n\nQ: У Джейсона было 20 леденцов. Он отдал некоторые леденцы Денни. Теперь у Джейсона 12 леденцов. Сколько леденцов Джейсон отдал Денни?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"У Джейсона было 20 леденцов. Он отдал некоторые леденцы Денни. Теперь у Джейсона 12 леденцов. Сколько леденцов Джейсон отдал Денни?\"\"\"\n jason_lollipops_initial = 20\n jason_lollipops_after = 12\n denny_lollipops = jason_lollipops_initial - jason_lollipops_after\n result = denny_lollipops\n return result\n\n\n\n\n\nQ: У Лии было 32 шоколадки, а у её сестры - 42. Если они съели 35, сколько шоколадок у них осталось в общей сложности?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"У Лии было 32 шоколадки, а у её сестры - 42. Если они съели 35, сколько шоколадок у них осталось в общей сложности?\"\"\"\n leah_chocolates = 32\n sister_chocolates = 42\n total_chocolates = leah_chocolates + sister_chocolates\n chocolates_eaten = 35\n chocolates_left = total_chocolates - chocolates_eaten\n result = chocolates_left\n return result\n\n\n\n\n\nQ: Если на парковке 3 автомобиля и приезжают еще 2, сколько автомобилей теперь на парковке?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Если на парковке 3 автомобиля и приезжают еще 2, сколько автомобилей теперь на парковке?\"\"\"\n cars_initial = 3\n cars_arrived = 2\n total_cars = cars_initial + cars_arrived\n result = total_cars\n return result\n\n\n\n\n\nQ: В роще 15 деревьев. Сегодня садовники посадят еще деревья. Когда они закончат, в роще будет 21 дерево. Сколько деревьев садовники посадили сегодня?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"В роще 15 деревьев. Сегодня садовники посадят еще деревья. Когда они закончат, в роще будет 21 дерево. Сколько деревьев садовники посадили сегодня?\"\"\"\n trees_initial = 15\n trees_after = 21\n trees_added = trees_after - trees_initial\n result = trees_added\n return result\n\n\n\n\n\nQ: {question}\n\n# solution in Python:\n\n\n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~usearch.py | from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
def dependable_usearch_import() -> Any:
"""
Import usearch if available, otherwise raise error.
"""
try:
import usearch.index
except ImportError:
raise ImportError(
"Could not import usearch python package. "
"Please install it with `pip install usearch` "
)
return usearch.index
class USearch(VectorStore):
"""`USearch` vector store.
To use, you should have the ``usearch`` python package installed.
"""
def __init__(
self,
embedding: Embeddings,
index: Any,
docstore: Docstore,
ids: List[str],
):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.ids = ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict]] = None,
ids: Optional[np.ndarray] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
embeddings = self.embedding.embed_documents(list(texts))
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
last_id = int(self.ids[-1]) + 1
if ids is None:
ids = np.array([str(last_id + id) for id, _ in enumerate(texts)])
self.index.add(np.array(ids), np.array(embeddings))
self.docstore.add(dict(zip(ids, documents)))
self.ids.extend(ids)
return ids.tolist()
def similarity_search_with_score(
self,
query: str,
k: int = 4,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query with distance.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs_with_scores: List[Tuple[Document, float]] = []
for id, score in zip(matches.keys, matches.distances):
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {id}, got {doc}")
docs_with_scores.append((doc, score))
return docs_with_scores
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs: List[Document] = []
for id in matches.keys:
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {id}, got {doc}")
docs.append(doc)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict]] = None,
ids: Optional[np.ndarray] = None,
metric: str = "cos",
**kwargs: Any,
) -> USearch:
"""Construct USearch wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the USearch database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import USearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
usearch = USearch.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
documents: List[Document] = []
if ids is None:
ids = np.array([str(id) for id, _ in enumerate(texts)])
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
docstore = InMemoryDocstore(dict(zip(ids, documents)))
usearch = dependable_usearch_import()
index = usearch.Index(ndim=len(embeddings[0]), metric=metric)
index.add(np.array(ids), np.array(embeddings))
return cls(embedding, index, docstore, ids.tolist())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~max_compute.py | from __future__ import annotations
from typing import TYPE_CHECKING, Iterator, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from odps import ODPS
class MaxComputeAPIWrapper:
"""Interface for querying Alibaba Cloud MaxCompute tables."""
def __init__(self, client: ODPS):
"""Initialize MaxCompute document loader.
Args:
client: odps.ODPS MaxCompute client object.
"""
self.client = client
@classmethod
def from_params(
cls,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
) -> MaxComputeAPIWrapper:
"""Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
try:
from odps import ODPS
except ImportError as ex:
raise ImportError(
"Could not import pyodps python package. "
"Please install it with `pip install pyodps` or refer to "
"https://pyodps.readthedocs.io/."
) from ex
access_id = access_id or get_from_env("access_id", "MAX_COMPUTE_ACCESS_ID")
secret_access_key = secret_access_key or get_from_env(
"secret_access_key", "MAX_COMPUTE_SECRET_ACCESS_KEY"
)
client = ODPS(
access_id=access_id,
secret_access_key=secret_access_key,
project=project,
endpoint=endpoint,
)
if not client.exist_project(project):
raise ValueError(f'The project "{project}" does not exist.')
return cls(client)
def lazy_query(self, query: str) -> Iterator[dict]:
# Execute SQL query.
with self.client.execute_sql(query).open_reader() as reader:
if reader.count == 0:
raise ValueError("Table contains no data.")
for record in reader:
yield {k: v for k, v in record}
def query(self, query: str) -> List[dict]:
return list(self.lazy_query(query))
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~brave_search.py | import json
from typing import List
import requests
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import Document
class BraveSearchWrapper(BaseModel):
"""Wrapper around the Brave search engine."""
api_key: str
"""The API key to use for the Brave search engine."""
search_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the search request."""
base_url: str = "https://api.search.brave.com/res/v1/web/search"
"""The base URL for the Brave search engine."""
def run(self, query: str) -> str:
"""Query the Brave search engine and return the results as a JSON string.
Args:
query: The query to search for.
Returns: The results as a JSON string.
"""
web_search_results = self._search_request(query=query)
final_results = [
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": item.get("description"),
}
for item in web_search_results
]
return json.dumps(final_results)
def download_documents(self, query: str) -> List[Document]:
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [
Document(
page_content=item.get("description"),
metadata={"title": item.get("title"), "link": item.get("url")},
)
for item in results
]
def _search_request(self, query: str) -> List[dict]:
headers = {
"X-Subscription-Token": self.api_key,
"Accept": "application/json",
}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("web", {}).get("results", [])
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~edenai_base_tool.py | from __future__ import annotations
import logging
from abc import abstractmethod
from typing import Any, Dict, List, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import root_validator
from langchain.tools.base import BaseTool
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class EdenaiTool(BaseTool):
"""
the base tool for all the EdenAI Tools .
you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
feature: str
subfeature: str
edenai_api_key: Optional[str] = None
is_async: bool = False
providers: List[str]
"""provider to use for the API call."""
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain/{__version__}"
def _call_eden_ai(self, query_params: Dict[str, Any]) -> str:
"""
Make an API call to the EdenAI service with the specified query parameters.
Args:
query_params (dict): The parameters to include in the API call.
Returns:
requests.Response: The response from the EdenAI API call.
"""
# faire l'API call
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
url = f"https://api.edenai.run/v2/{self.feature}/{self.subfeature}"
payload = {
"providers": str(self.providers),
"response_as_dict": False,
"attributes_as_list": True,
"show_original_response": False,
}
payload.update(query_params)
response = requests.post(url, json=payload, headers=headers)
self._raise_on_error(response)
try:
return self._parse_response(response.json())
except Exception as e:
raise RuntimeError(f"An error occurred while running tool: {e}")
def _raise_on_error(self, response: requests.Response) -> None:
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
# case where edenai call succeeded but provider returned an error
# (eg: rate limit, server error, etc.)
if self.is_async is False:
# async call are different and only return a job_id,
# not the provider response directly
provider_response = response.json()[0]
if provider_response.get("status") == "fail":
err_msg = provider_response["error"]["message"]
raise ValueError(err_msg)
@abstractmethod
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
pass
@abstractmethod
def _parse_response(self, response: Any) -> str:
"""Take a dict response and condense it's data in a human readable string"""
pass
def _get_edenai(self, url: str) -> requests.Response:
headers = {
"accept": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response
def _parse_json_multilevel(
self, extracted_data: dict, formatted_list: list, level: int = 0
) -> None:
for section, subsections in extracted_data.items():
indentation = " " * level
if isinstance(subsections, str):
subsections = subsections.replace("\n", ",")
formatted_list.append(f"{indentation}{section} : {subsections}")
elif isinstance(subsections, list):
formatted_list.append(f"{indentation}{section} : ")
self._list_handling(subsections, formatted_list, level + 1)
elif isinstance(subsections, dict):
formatted_list.append(f"{indentation}{section} : ")
self._parse_json_multilevel(subsections, formatted_list, level + 1)
def _list_handling(
self, subsection_list: list, formatted_list: list, level: int
) -> None:
for list_item in subsection_list:
if isinstance(list_item, dict):
self._parse_json_multilevel(list_item, formatted_list, level)
elif isinstance(list_item, list):
self._list_handling(list_item, formatted_list, level + 1)
else:
formatted_list.append(f"{' ' * level}{list_item}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~runnable~router.py | from __future__ import annotations
from typing import (
Any,
AsyncIterator,
Callable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Union,
cast,
)
from typing_extensions import TypedDict
from langchain.schema.runnable.base import (
Input,
Output,
Runnable,
RunnableSerializable,
coerce_to_runnable,
)
from langchain.schema.runnable.config import (
RunnableConfig,
get_config_list,
get_executor_for_config,
)
from langchain.schema.runnable.utils import (
ConfigurableFieldSpec,
gather_with_concurrency,
get_unique_config_specs,
)
class RouterInput(TypedDict):
"""A Router input.
Attributes:
key: The key to route on.
input: The input to pass to the selected runnable.
"""
key: str
input: Any
class RouterRunnable(RunnableSerializable[RouterInput, Output]):
"""
A runnable that routes to a set of runnables based on Input['key'].
Returns the output of the selected runnable.
"""
runnables: Mapping[str, Runnable[Any, Output]]
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec for step in self.runnables.values() for spec in step.config_specs
)
def __init__(
self,
runnables: Mapping[str, Union[Runnable[Any, Output], Callable[[Any], Output]]],
) -> None:
super().__init__(
runnables={key: coerce_to_runnable(r) for key, r in runnables.items()}
)
class Config:
arbitrary_types_allowed = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
def invoke(
self, input: RouterInput, config: Optional[RunnableConfig] = None
) -> Output:
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
return runnable.invoke(actual_input, config)
async def ainvoke(
self,
input: RouterInput,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
return await runnable.ainvoke(actual_input, config)
def batch(
self,
inputs: List[RouterInput],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
if not inputs:
return []
keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable")
def invoke(
runnable: Runnable, input: Input, config: RunnableConfig
) -> Union[Output, Exception]:
if return_exceptions:
try:
return runnable.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return runnable.invoke(input, config, **kwargs)
runnables = [self.runnables[key] for key in keys]
configs = get_config_list(config, len(inputs))
with get_executor_for_config(configs[0]) as executor:
return cast(
List[Output],
list(executor.map(invoke, runnables, actual_inputs, configs)),
)
async def abatch(
self,
inputs: List[RouterInput],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
if not inputs:
return []
keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable")
async def ainvoke(
runnable: Runnable, input: Input, config: RunnableConfig
) -> Union[Output, Exception]:
if return_exceptions:
try:
return await runnable.ainvoke(input, config, **kwargs)
except Exception as e:
return e
else:
return await runnable.ainvoke(input, config, **kwargs)
runnables = [self.runnables[key] for key in keys]
configs = get_config_list(config, len(inputs))
return await gather_with_concurrency(
configs[0].get("max_concurrency"),
*(
ainvoke(runnable, input, config)
for runnable, input, config in zip(runnables, actual_inputs, configs)
),
)
def stream(
self,
input: RouterInput,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
yield from runnable.stream(actual_input, config)
async def astream(
self,
input: RouterInput,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
async for output in runnable.astream(actual_input, config):
yield output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~fireworks.py | from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Union
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils.env import get_from_dict_or_env
def _stream_response_to_generation_chunk(
stream_response: Any,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(
text=stream_response.choices[0].text,
generation_info=dict(
finish_reason=stream_response.choices[0].finish_reason,
logprobs=stream_response.choices[0].logprobs,
),
)
class Fireworks(LLM):
"""Fireworks models."""
model: str = "accounts/fireworks/models/llama-v2-7b-chat"
model_kwargs: dict = Field(
default_factory=lambda: {
"temperature": 0.7,
"max_tokens": 512,
"top_p": 1,
}.copy()
)
fireworks_api_key: Optional[str] = None
max_retries: int = 20
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
"Could not import fireworks-ai python package. "
"Please install it with `pip install fireworks-ai`."
) from e
fireworks_api_key = get_from_dict_or_env(
values, "fireworks_api_key", "FIREWORKS_API_KEY"
)
fireworks.client.api_key = fireworks_api_key
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fireworks"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Run the LLM on the given prompt and input."""
params: dict = {
"model": self.model,
"prompt": prompt,
**self.model_kwargs,
}
response = completion_with_retry(
self, run_manager=run_manager, stop=stop, **params
)
return response.choices[0].text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Run the LLM on the given prompt and input."""
params = {
"model": self.model,
"prompt": prompt,
**self.model_kwargs,
}
response = await acompletion_with_retry(
self, run_manager=run_manager, stop=stop, **params
)
return response.choices[0].text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
for stream_resp in completion_with_retry(
self, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
async for stream_resp in await acompletion_with_retry_streaming(
self, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def completion_with_retry(
llm: Fireworks,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.create(
**kwargs,
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Fireworks,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
async def acompletion_with_retry_streaming(
llm: Fireworks,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call for streaming."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def _create_retry_decorator(
llm: Fireworks,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [
fireworks.client.error.RateLimitError,
fireworks.client.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_transformers~beautiful_soup_transformer.py | from typing import Any, List, Sequence
from langchain.schema import BaseDocumentTransformer, Document
class BeautifulSoupTransformer(BaseDocumentTransformer):
"""Transform HTML content by extracting specific tags and removing unwanted ones.
Example:
.. code-block:: python
from langchain.document_transformers import BeautifulSoupTransformer
bs4_transformer = BeautifulSoupTransformer()
docs_transformed = bs4_transformer.transform_documents(docs)
"""
def __init__(self) -> None:
"""
Initialize the transformer.
This checks if the BeautifulSoup4 package is installed.
If not, it raises an ImportError.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BeautifulSoupTransformer. "
"Please install it with `pip install beautifulsoup4`."
)
def transform_documents(
self,
documents: Sequence[Document],
unwanted_tags: List[str] = ["script", "style"],
tags_to_extract: List[str] = ["p", "li", "div", "a"],
remove_lines: bool = True,
**kwargs: Any,
) -> Sequence[Document]:
"""
Transform a list of Document objects by cleaning their HTML content.
Args:
documents: A sequence of Document objects containing HTML content.
unwanted_tags: A list of tags to be removed from the HTML.
tags_to_extract: A list of tags whose content will be extracted.
remove_lines: If set to True, unnecessary lines will be
removed from the HTML content.
Returns:
A sequence of Document objects with transformed content.
"""
for doc in documents:
cleaned_content = doc.page_content
cleaned_content = self.remove_unwanted_tags(cleaned_content, unwanted_tags)
cleaned_content = self.extract_tags(cleaned_content, tags_to_extract)
if remove_lines:
cleaned_content = self.remove_unnecessary_lines(cleaned_content)
doc.page_content = cleaned_content
return documents
@staticmethod
def remove_unwanted_tags(html_content: str, unwanted_tags: List[str]) -> str:
"""
Remove unwanted tags from a given HTML content.
Args:
html_content: The original HTML content string.
unwanted_tags: A list of tags to be removed from the HTML.
Returns:
A cleaned HTML string with unwanted tags removed.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, "html.parser")
for tag in unwanted_tags:
for element in soup.find_all(tag):
element.decompose()
return str(soup)
@staticmethod
def extract_tags(html_content: str, tags: List[str]) -> str:
"""
Extract specific tags from a given HTML content.
Args:
html_content: The original HTML content string.
tags: A list of tags to be extracted from the HTML.
Returns:
A string combining the content of the extracted tags.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, "html.parser")
text_parts = []
for tag in tags:
elements = soup.find_all(tag)
for element in elements:
if tag == "a":
href = element.get("href")
if href:
text_parts.append(f"{element.get_text()} ({href})")
else:
text_parts.append(element.get_text())
else:
text_parts.append(element.get_text())
return " ".join(text_parts)
@staticmethod
def remove_unnecessary_lines(content: str) -> str:
"""
Clean up the content by removing unnecessary lines.
Args:
content: A string, which may contain unnecessary lines or spaces.
Returns:
A cleaned string with unnecessary lines removed.
"""
lines = content.split("\n")
stripped_lines = [line.strip() for line in lines]
non_empty_lines = [line for line in stripped_lines if line]
seen = set()
deduped_lines = []
for line in non_empty_lines:
if line not in seen:
seen.add(line)
deduped_lines.append(line)
cleaned_content = " ".join(deduped_lines)
return cleaned_content
async def atransform_documents(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> Sequence[Document]:
raise NotImplementedError
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~ollama.py | import json
from typing import Any, Iterator, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.ollama import _OllamaCommon
from langchain.schema import ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGeneration, ChatGenerationChunk
def _stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(content=parsed_response.get("response", "")),
generation_info=generation_info,
)
class ChatOllama(BaseChatModel, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain.chat_models import ChatOllama
ollama = ChatOllama(model="llama2")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "ollama-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _format_messages_as_text(self, messages: List[BaseMessage]) -> str:
return "\n".join(
[self._format_message_as_text(message) for message in messages]
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
prompt = self._format_messages_as_text(messages)
final_chunk = super()._stream_with_aggregation(
prompt, stop=stop, run_manager=run_manager, verbose=self.verbose, **kwargs
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._format_messages_as_text(messages)
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_chat_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~stochasticai.py | import logging
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class StochasticAI(LLM):
"""StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
"""
api_url: str = ""
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
stochasticai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
stochasticai_api_key = get_from_dict_or_env(
values, "stochasticai_api_key", "STOCHASTICAI_API_KEY"
)
values["stochasticai_api_key"] = stochasticai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.api_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "stochasticai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to StochasticAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = StochasticAI("Tell me a joke.")
"""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response_post = requests.post(
url=self.api_url,
json={"prompt": prompt, "params": params},
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_post.raise_for_status()
response_post_json = response_post.json()
completed = False
while not completed:
response_get = requests.get(
url=response_post_json["data"]["responseUrl"],
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_get.raise_for_status()
response_get_json = response_get.json()["data"]
text = response_get_json.get("completion")
completed = text is not None
time.sleep(0.5)
text = text[0]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~multi_query.py | import asyncio
import json
import logging
from typing import List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseRetriever, Document
logger = logging.getLogger(__name__)
class LineList(BaseModel):
"""List of lines."""
lines: List[str] = Field(description="Lines of text")
"""List of lines."""
class LineListOutputParser(PydanticOutputParser):
"""Output parser for a list of lines."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = text.strip().split("\n")
return LineList(lines=lines)
class JSONLineListOutputParser(PydanticOutputParser):
"""Output parser for a list of lines."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = json.loads(text)
return LineList(lines=lines)
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""Ты - помощник на основе AI. Твоя задача -
сгенерировать 3 разные версии заданного пользователем
вопроса для извлечения соответствующих документов из векторной базы данных.
Генерируя разные варианты вопроса пользователя,
твоя цель - помочь пользователю преодолеть некоторые ограничения
поиска по сходству на основе расстояния. Предоставь эти альтернативные
вопросы, разделенные новыми строками. Исходный вопрос: {question}""",
)
class MultiQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to write a set of queries.
Retrieve docs for each query. Rake the unique union of all retrieved docs.
"""
retriever: BaseRetriever
llm_chain: LLMChain
verbose: bool = True
parser_key: str = "lines"
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
parser_key: str = "lines",
) -> "MultiQueryRetriever":
"""Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
return cls(
retriever=retriever,
llm_chain=llm_chain,
parser_key=parser_key,
)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get relevant documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
"""
queries = await self.agenerate_queries(query, run_manager)
documents = await self.aretrieve_documents(queries, run_manager)
return self.unique_union(documents)
async def agenerate_queries(
self, question: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[str]:
"""Generate queries based upon user input.
Args:
question: user query
Returns:
List of LLM generated queries that are similar to the user input
"""
response = await self.llm_chain.acall(
inputs={"question": question}, callbacks=run_manager.get_child()
)
lines = getattr(response["text"], self.parser_key, [])
if self.verbose:
logger.info(f"Generated queries: {lines}")
return lines
async def aretrieve_documents(
self, queries: List[str], run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
Returns:
List of retrieved Documents
"""
document_lists = await asyncio.gather(
*(
self.retriever.aget_relevant_documents(
query, callbacks=run_manager.get_child()
)
for query in queries
)
)
return [doc for docs in document_lists for doc in docs]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get relevated documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(query, run_manager)
documents = self.retrieve_documents(queries, run_manager)
unique_documents = self.unique_union(documents)
return unique_documents
def generate_queries(
self, question: str, run_manager: CallbackManagerForRetrieverRun
) -> List[str]:
"""Generate queries based upon user input.
Args:
question: user query
Returns:
List of LLM generated queries that are similar to the user input
"""
response = self.llm_chain(
{"question": question}, callbacks=run_manager.get_child()
)
lines = getattr(response["text"], self.parser_key, [])
if self.verbose:
logger.info(f"Generated queries: {lines}")
return lines
def retrieve_documents(
self, queries: List[str], run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
Returns:
List of retrieved Documents
"""
documents = []
for query in queries:
docs = self.retriever.get_relevant_documents(
query, callbacks=run_manager.get_child()
)
documents.extend(docs)
return documents
def unique_union(self, documents: List[Document]) -> List[Document]:
"""Get unique Documents.
Args:
documents: List of retrieved Documents
Returns:
List of unique retrieved Documents
"""
# Create a dictionary with page_content as keys to remove duplicates
# TODO: Add Document ID property (e.g., UUID)
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc
for doc in documents
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
| [
"question",
"Ты - помощник на основе AI. Твоя задача - \n сгенерировать 3 разные версии заданного пользователем \n вопроса для извлечения соответствующих документов из векторной базы данных. \n Генерируя разные варианты вопроса пользователя, \n твоя цель - помочь пользователю преодолеть некоторые ограничения \n поиска по сходству на основе расстояния. Предоставь эти альтернативные \n вопросы, разделенные новыми строками. Исходный вопрос: {question}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~concurrent.py | from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers.registry import get_parser
from langchain.schema import Document
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
class ConcurrentLoader(GenericLoader):
"""Load and pars Documents concurrently."""
def __init__(
self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, num_workers: int = 4
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs()
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
@classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
) -> ConcurrentLoader:
"""
Create a concurrent generic document loader using a
filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
"""
blob_loader = FileSystemBlobLoader(
path,
glob=glob,
exclude=exclude,
suffixes=suffixes,
show_progress=show_progress,
)
if isinstance(parser, str):
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~embaas.py | import base64
import warnings
from typing import Any, Dict, Iterator, List, Optional
import requests
from typing_extensions import NotRequired, TypedDict
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
from langchain.document_loaders.blob_loaders import Blob
from langchain.pydantic_v1 import BaseModel, root_validator, validator
from langchain.text_splitter import TextSplitter
from langchain.utils import get_from_dict_or_env
EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/"
class EmbaasDocumentExtractionParameters(TypedDict):
"""Parameters for the embaas document extraction API."""
mime_type: NotRequired[str]
"""The mime type of the document."""
file_extension: NotRequired[str]
"""The file extension of the document."""
file_name: NotRequired[str]
"""The file name of the document."""
should_chunk: NotRequired[bool]
"""Whether to chunk the document into pages."""
chunk_size: NotRequired[int]
"""The maximum size of the text chunks."""
chunk_overlap: NotRequired[int]
"""The maximum overlap allowed between chunks."""
chunk_splitter: NotRequired[str]
"""The text splitter class name for creating chunks."""
separators: NotRequired[List[str]]
"""The separators for chunks."""
should_embed: NotRequired[bool]
"""Whether to create embeddings for the document in the response."""
model: NotRequired[str]
"""The model to pass to the Embaas document extraction API."""
instruction: NotRequired[str]
"""The instruction to pass to the Embaas document extraction API."""
class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters):
"""Payload for the Embaas document extraction API."""
bytes: str
"""The base64 encoded bytes of the document to extract text from."""
class BaseEmbaasLoader(BaseModel):
"""Base loader for `Embaas` document extraction API."""
embaas_api_key: Optional[str] = None
"""The API key for the Embaas document extraction API."""
api_url: str = EMBAAS_DOC_API_URL
"""The URL of the Embaas document extraction API."""
params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters()
"""Additional parameters to pass to the Embaas document extraction API."""
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = get_from_dict_or_env(
values, "embaas_api_key", "EMBAAS_API_KEY"
)
values["embaas_api_key"] = embaas_api_key
return values
class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser):
"""Load `Embaas` blob.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Default parsing
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader()
blob = Blob.from_path(path="example.mp3")
documents = loader.parse(blob=blob)
# Custom api parameters (create embeddings automatically)
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader(
params={
"should_embed": True,
"model": "e5-large-v2",
"chunk_size": 256,
"chunk_splitter": "CharacterTextSplitter"
}
)
blob = Blob.from_path(path="example.pdf")
documents = loader.parse(blob=blob)
"""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Parses the blob lazily.
Args:
blob: The blob to parse.
"""
yield from self._get_documents(blob=blob)
@staticmethod
def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]:
"""Convert the API response to a list of documents."""
docs = []
for chunk in chunks:
metadata = chunk["metadata"]
if chunk.get("embedding", None) is not None:
metadata["embedding"] = chunk["embedding"]
doc = Document(page_content=chunk["text"], metadata=metadata)
docs.append(doc)
return docs
def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload:
"""Generates payload for the API request."""
base64_byte_str = base64.b64encode(blob.as_bytes()).decode()
payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload(
bytes=base64_byte_str,
# Workaround for mypy issue: https://github.com/python/mypy/issues/9408
# type: ignore
**self.params,
)
if blob.mimetype is not None and payload.get("mime_type", None) is None:
payload["mime_type"] = blob.mimetype
return payload
def _handle_request(
self, payload: EmbaasDocumentExtractionPayload
) -> List[Document]:
"""Sends a request to the embaas API and handles the response."""
headers = {
"Authorization": f"Bearer {self.embaas_api_key}",
"Content-Type": "application/json",
}
response = requests.post(self.api_url, headers=headers, json=payload)
response.raise_for_status()
parsed_response = response.json()
return EmbaasBlobLoader._api_response_to_documents(
chunks=parsed_response["data"]["chunks"]
)
def _get_documents(self, blob: Blob) -> Iterator[Document]:
"""Get the documents from the blob."""
payload = self._generate_payload(blob=blob)
try:
documents = self._handle_request(payload=payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(
f"Error raised by Embaas document text extraction API: {e}"
)
parsed_response = e.response.json()
if "message" in parsed_response:
raise ValueError(
f"Validation Error raised by Embaas document text extraction API:"
f" {parsed_response['message']}"
)
raise
yield from documents
class EmbaasLoader(BaseEmbaasLoader, BaseLoader):
"""Load from `Embaas`.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Default parsing
from langchain.document_loaders.embaas import EmbaasLoader
loader = EmbaasLoader(file_path="example.mp3")
documents = loader.load()
# Custom api parameters (create embeddings automatically)
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader(
file_path="example.pdf",
params={
"should_embed": True,
"model": "e5-large-v2",
"chunk_size": 256,
"chunk_splitter": "CharacterTextSplitter"
}
)
documents = loader.load()
"""
file_path: str
"""The path to the file to load."""
blob_loader: Optional[EmbaasBlobLoader]
"""The blob loader to use. If not provided, a default one will be created."""
@validator("blob_loader", always=True)
def validate_blob_loader(
cls, v: EmbaasBlobLoader, values: Dict
) -> EmbaasBlobLoader:
return v or EmbaasBlobLoader(
embaas_api_key=values["embaas_api_key"],
api_url=values["api_url"],
params=values["params"],
)
def lazy_load(self) -> Iterator[Document]:
"""Load the documents from the file path lazily."""
blob = Blob.from_path(path=self.file_path)
assert self.blob_loader is not None
# Should never be None, but mypy doesn't know that.
yield from self.blob_loader.lazy_parse(blob=blob)
def load(self) -> List[Document]:
return list(self.lazy_load())
def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> List[Document]:
if self.params.get("should_embed", False):
warnings.warn(
"Embeddings are not supported with load_and_split."
" Use the API splitter to properly generate embeddings."
" For more information see embaas.io docs."
)
return super().load_and_split(text_splitter=text_splitter)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~embeddings~test_jina.py | """Test jina embeddings."""
from langchain.embeddings.jina import JinaEmbeddings
def test_jina_embedding_documents() -> None:
"""Test jina embeddings for documents."""
documents = ["foo bar", "bar foo"]
embedding = JinaEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 512
def test_jina_embedding_query() -> None:
"""Test jina embeddings for query."""
document = "foo bar"
embedding = JinaEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 512
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.utilities import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box_list" in res.keys():
res["answer_box"] = res["answer_box_list"]
if "answer_box" in res.keys():
answer_box = res["answer_box"]
if isinstance(answer_box, list):
answer_box = answer_box[0]
if "result" in answer_box.keys():
return answer_box["result"]
elif "answer" in answer_box.keys():
return answer_box["answer"]
elif "snippet" in answer_box.keys():
return answer_box["snippet"]
elif "snippet_highlighted_words" in answer_box.keys():
return answer_box["snippet_highlighted_words"]
else:
answer = {}
for key, value in answer_box.items():
if not isinstance(value, (list, dict)) and not (
isinstance(value, str) and value.startswith("http")
):
answer[key] = value
return str(answer)
elif "events_results" in res.keys():
return res["events_results"][:10]
elif "sports_results" in res.keys():
return res["sports_results"]
elif "top_stories" in res.keys():
return res["top_stories"]
elif "news_results" in res.keys():
return res["news_results"]
elif "jobs_results" in res.keys() and "jobs" in res["jobs_results"].keys():
return res["jobs_results"]["jobs"]
elif (
"shopping_results" in res.keys()
and "title" in res["shopping_results"][0].keys()
):
toret = res["shopping_results"][:3]
elif (
"knowledge_graph" in res.keys()
and "description" in res["knowledge_graph"].keys()
):
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
elif "link" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["link"]
elif (
"images_results" in res.keys()
and "thumbnail" in res["images_results"][0].keys()
):
return str([item["thumbnail"] for item in res["images_results"][:10]])
snippets = []
if "knowledge_graph" in res.keys():
knowledge_graph = res["knowledge_graph"]
title = knowledge_graph["title"] if "title" in knowledge_graph else ""
if "description" in knowledge_graph.keys():
snippets.append(knowledge_graph["description"])
for key, value in knowledge_graph.items():
if (
isinstance(key, str)
and isinstance(value, str)
and key not in ["title", "description"]
and not key.endswith("_stick")
and not key.endswith("_link")
and not value.startswith("http")
):
snippets.append(f"{title} {key}: {value}.")
if "organic_results" in res.keys():
first_organic_result = res["organic_results"][0]
if "snippet" in first_organic_result.keys():
snippets.append(first_organic_result["snippet"])
elif "snippet_highlighted_words" in first_organic_result.keys():
snippets.append(first_organic_result["snippet_highlighted_words"])
elif "rich_snippet" in first_organic_result.keys():
snippets.append(first_organic_result["rich_snippet"])
elif "rich_snippet_table" in first_organic_result.keys():
snippets.append(first_organic_result["rich_snippet_table"])
elif "link" in first_organic_result.keys():
snippets.append(first_organic_result["link"])
if "buying_guide" in res.keys():
snippets.append(res["buying_guide"])
if "local_results" in res.keys() and "places" in res["local_results"].keys():
snippets.append(res["local_results"]["places"])
if len(snippets) > 0:
return str(snippets)
else:
toret = "No good search result found"
return toret
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~vearch.py | from __future__ import annotations
import os
import time
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
if TYPE_CHECKING:
import vearch
DEFAULT_TOPN = 4
class Vearch(VectorStore):
_DEFAULT_TABLE_NAME = "langchain_vearch"
_DEFAULT_CLUSTER_DB_NAME = "cluster_client_db"
_DEFAULT_VERSION = 1
def __init__(
self,
embedding_function: Embeddings,
path_or_url: Optional[str] = None,
table_name: str = _DEFAULT_TABLE_NAME,
db_name: str = _DEFAULT_CLUSTER_DB_NAME,
flag: int = _DEFAULT_VERSION,
**kwargs: Any,
) -> None:
"""Initialize vearch vector store
flag 1 for cluster,0 for standalone
"""
try:
if flag:
import vearch_cluster
else:
import vearch
except ImportError:
raise ValueError(
"Could not import suitable python package. "
"Please install it with `pip install vearch or vearch_cluster`."
)
if flag:
if path_or_url is None:
raise ValueError("Please input url of cluster")
if not db_name:
db_name = self._DEFAULT_CLUSTER_DB_NAME
db_name += "_"
db_name += str(uuid.uuid4()).split("-")[-1]
self.using_db_name = db_name
self.url = path_or_url
self.vearch = vearch_cluster.VearchCluster(path_or_url)
else:
if path_or_url is None:
metadata_path = os.getcwd().replace("\\", "/")
else:
metadata_path = path_or_url
if not os.path.isdir(metadata_path):
os.makedirs(metadata_path)
log_path = os.path.join(metadata_path, "log")
if not os.path.isdir(log_path):
os.makedirs(log_path)
self.vearch = vearch.Engine(metadata_path, log_path)
self.using_metapath = metadata_path
if not table_name:
table_name = self._DEFAULT_TABLE_NAME
table_name += "_"
table_name += str(uuid.uuid4()).split("-")[-1]
self.using_table_name = table_name
self.embedding_func = embedding_function
self.flag = flag
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding_func
@classmethod
def from_documents(
cls: Type[Vearch],
documents: List[Document],
embedding: Embeddings,
path_or_url: Optional[str] = None,
table_name: str = _DEFAULT_TABLE_NAME,
db_name: str = _DEFAULT_CLUSTER_DB_NAME,
flag: int = _DEFAULT_VERSION,
**kwargs: Any,
) -> Vearch:
"""Return Vearch VectorStore"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
path_or_url=path_or_url,
table_name=table_name,
db_name=db_name,
flag=flag,
**kwargs,
)
@classmethod
def from_texts(
cls: Type[Vearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
path_or_url: Optional[str] = None,
table_name: str = _DEFAULT_TABLE_NAME,
db_name: str = _DEFAULT_CLUSTER_DB_NAME,
flag: int = _DEFAULT_VERSION,
**kwargs: Any,
) -> Vearch:
"""Return Vearch VectorStore"""
vearch_db = cls(
embedding_function=embedding,
embedding=embedding,
path_or_url=path_or_url,
db_name=db_name,
table_name=table_name,
flag=flag,
)
vearch_db.add_texts(texts=texts, metadatas=metadatas)
return vearch_db
def _create_table(
self,
dim: int = 1024,
field_list: List[dict] = [
{"field": "text", "type": "str"},
{"field": "metadata", "type": "str"},
],
) -> int:
"""
Create VectorStore Table
Args:
dim:dimension of vector
fields_list: the field you want to store
Return:
code,0 for success,1 for failed
"""
type_dict = {"int": vearch.dataType.INT, "str": vearch.dataType.STRING}
engine_info = {
"index_size": 10000,
"retrieval_type": "IVFPQ",
"retrieval_param": {"ncentroids": 2048, "nsubvector": 32},
}
fields = [
vearch.GammaFieldInfo(fi["field"], type_dict[fi["type"]])
for fi in field_list
]
vector_field = vearch.GammaVectorInfo(
name="text_embedding",
type=vearch.dataType.VECTOR,
is_index=True,
dimension=dim,
model_id="",
store_type="MemoryOnly",
store_param={"cache_size": 10000},
has_source=False,
)
response_code = self.vearch.create_table(
engine_info,
name=self.using_table_name,
fields=fields,
vector_field=vector_field,
)
return response_code
def _create_space(
self,
dim: int = 1024,
) -> int:
"""
Create VectorStore space
Args:
dim:dimension of vector
Return:
code,0 failed for ,1 for success
"""
space_config = {
"name": self.using_table_name,
"partition_num": 1,
"replica_num": 1,
"engine": {
"name": "gamma",
"index_size": 1,
"retrieval_type": "FLAT",
"retrieval_param": {
"metric_type": "L2",
},
},
"properties": {
"text": {
"type": "string",
},
"metadata": {
"type": "string",
},
"text_embedding": {
"type": "vector",
"index": True,
"dimension": dim,
"store_type": "MemoryOnly",
},
},
}
response_code = self.vearch.create_space(self.using_db_name, space_config)
return response_code
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self.embedding_func is not None:
embeddings = self.embedding_func.embed_documents(list(texts))
if embeddings is None:
raise ValueError("embeddings is None")
if self.flag:
dbs_list = self.vearch.list_dbs()
if self.using_db_name not in dbs_list:
create_db_code = self.vearch.create_db(self.using_db_name)
if not create_db_code:
raise ValueError("create db failed!!!")
space_list = self.vearch.list_spaces(self.using_db_name)
if self.using_table_name not in space_list:
create_space_code = self._create_space(len(embeddings[0]))
if not create_space_code:
raise ValueError("create space failed!!!")
docid = []
if embeddings is not None and metadatas is not None:
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles: dict[str, Any] = {}
profiles["text"] = text
profiles["metadata"] = metadata["source"]
embed_np = np.array(embed)
profiles["text_embedding"] = {
"feature": (embed_np / np.linalg.norm(embed_np)).tolist()
}
insert_res = self.vearch.insert_one(
self.using_db_name, self.using_table_name, profiles
)
if insert_res["status"] == 200:
docid.append(insert_res["_id"])
continue
else:
retry_insert = self.vearch.insert_one(
self.using_db_name, self.using_table_name, profiles
)
docid.append(retry_insert["_id"])
continue
else:
table_path = os.path.join(
self.using_metapath, self.using_table_name + ".schema"
)
if not os.path.exists(table_path):
dim = len(embeddings[0])
response_code = self._create_table(dim)
if response_code:
raise ValueError("create table failed!!!")
if embeddings is not None and metadatas is not None:
doc_items = []
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles_v: dict[str, Any] = {}
profiles_v["text"] = text
profiles_v["metadata"] = metadata["source"]
embed_np = np.array(embed)
profiles_v["text_embedding"] = embed_np / np.linalg.norm(embed_np)
doc_items.append(profiles_v)
docid = self.vearch.add(doc_items)
t_time = 0
while len(docid) != len(embeddings):
time.sleep(0.5)
if t_time > 6:
break
t_time += 1
self.vearch.dump()
return docid
def _load(self) -> None:
"""
load vearch engine for standalone vearch
"""
self.vearch.load()
@classmethod
def load_local(
cls,
embedding: Embeddings,
path_or_url: Optional[str] = None,
table_name: str = _DEFAULT_TABLE_NAME,
db_name: str = _DEFAULT_CLUSTER_DB_NAME,
flag: int = _DEFAULT_VERSION,
**kwargs: Any,
) -> Vearch:
"""Load the local specified table of standalone vearch.
Returns:
Success or failure of loading the local specified table
"""
if not path_or_url:
raise ValueError("No metadata path!!!")
if not table_name:
raise ValueError("No table name!!!")
table_path = os.path.join(path_or_url, table_name + ".schema")
if not os.path.exists(table_path):
raise ValueError("vearch vectorbase table not exist!!!")
vearch_db = cls(
embedding_function=embedding,
path_or_url=path_or_url,
table_name=table_name,
db_name=db_name,
flag=flag,
)
vearch_db._load()
return vearch_db
def similarity_search(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
"""
if self.embedding_func is None:
raise ValueError("embedding_func is None!!!")
embeddings = self.embedding_func.embed_query(query)
docs = self.similarity_search_by_vector(embeddings, k)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Document]:
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
embed = np.array(embedding)
if self.flag:
query_data = {
"query": {
"sum": [
{
"field": "text_embedding",
"feature": (embed / np.linalg.norm(embed)).tolist(),
}
],
},
"size": k,
"fields": ["text", "metadata"],
}
query_result = self.vearch.search(
self.using_db_name, self.using_table_name, query_data
)
res = query_result["hits"]["hits"]
else:
query_data = {
"vector": [
{
"field": "text_embedding",
"feature": embed / np.linalg.norm(embed),
}
],
"fields": [],
"is_brute_search": 1,
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
"topn": k,
}
query_result = self.vearch.search(query_data)
res = query_result[0]["result_items"]
docs = []
for item in res:
content = ""
meta_data = {}
if self.flag:
item = item["_source"]
for item_key in item:
if item_key == "text":
content = item[item_key]
continue
if item_key == "metadata":
meta_data["source"] = item[item_key]
continue
docs.append(Document(page_content=content, metadata=meta_data))
return docs
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.embedding_func is None:
raise ValueError("embedding_func is None!!!")
embeddings = self.embedding_func.embed_query(query)
embed = np.array(embeddings)
if self.flag:
query_data = {
"query": {
"sum": [
{
"field": "text_embedding",
"feature": (embed / np.linalg.norm(embed)).tolist(),
}
],
},
"size": k,
"fields": ["text_embedding", "text", "metadata"],
}
query_result = self.vearch.search(
self.using_db_name, self.using_table_name, query_data
)
res = query_result["hits"]["hits"]
else:
query_data = {
"vector": [
{
"field": "text_embedding",
"feature": embed / np.linalg.norm(embed),
}
],
"fields": [],
"is_brute_search": 1,
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
"topn": k,
}
query_result = self.vearch.search(query_data)
res = query_result[0]["result_items"]
results: List[Tuple[Document, float]] = []
for item in res:
content = ""
meta_data = {}
if self.flag:
score = item["_score"]
item = item["_source"]
for item_key in item:
if item_key == "text":
content = item[item_key]
continue
if item_key == "metadata":
meta_data["source"] = item[item_key]
continue
if self.flag != 1 and item_key == "score":
score = item[item_key]
continue
tmp_res = (Document(page_content=content, metadata=meta_data), score)
results.append(tmp_res)
return results
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
return self.similarity_search_with_score(query, k, **kwargs)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
ret: Optional[bool] = None
tmp_res = []
if ids is None or ids.__len__() == 0:
return ret
for _id in ids:
if self.flag:
ret = self.vearch.delete(self.using_db_name, self.using_table_name, _id)
else:
ret = self.vearch.del_doc(_id)
tmp_res.append(ret)
ret = all(i == 0 for i in tmp_res)
return ret
def get(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Document]:
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
Returns:
Documents which satisfy the input conditions.
"""
results: Dict[str, Document] = {}
if ids is None or ids.__len__() == 0:
return results
if self.flag:
query_data = {"query": {"ids": ids}}
docs_detail = self.vearch.mget_by_ids(
self.using_db_name, self.using_table_name, query_data
)
for record in docs_detail:
if record["found"] is False:
continue
content = ""
meta_info = {}
for field in record["_source"]:
if field == "text":
content = record["_source"][field]
continue
elif field == "metadata":
meta_info["source"] = record["_source"][field]
continue
results[record["_id"]] = Document(
page_content=content, metadata=meta_info
)
else:
for id in ids:
docs_detail = self.vearch.get_doc_by_id(id)
if docs_detail == {}:
continue
content = ""
meta_info = {}
for field in docs_detail:
if field == "text":
content = docs_detail[field]
continue
elif field == "metadata":
meta_info["source"] = docs_detail[field]
continue
results[docs_detail["_id"]] = Document(
page_content=content, metadata=meta_info
)
return results
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~re_phraser.py | import logging
from typing import List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseRetriever, Document
logger = logging.getLogger(__name__)
# Default template
DEFAULT_TEMPLATE = """Ты помощник, задача которого состоит в том, \
чтобы принять запрос на естественном языке от пользователя и \
преобразовать его в запрос для векторного хранилища. \
В этом процессе ты отсеиваешь информацию, которая не имеет отношения \
к задаче извлечения. Вот пользовательский запрос: {question}"""
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE)
class RePhraseQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to re-phrase it.
Then, retrieve docs for the re-phrased query."""
retriever: BaseRetriever
llm_chain: LLMChain
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
) -> "RePhraseQueryRetriever":
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(
retriever=retriever,
llm_chain=llm_chain,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get relevated documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
response = self.llm_chain(query, callbacks=run_manager.get_child())
re_phrased_question = response["text"]
logger.info(f"Re-phrased question: {re_phrased_question}")
docs = self.retriever.get_relevant_documents(
re_phrased_question, callbacks=run_manager.get_child()
)
return docs
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
| [
"Ты помощник, задача которого состоит в том, чтобы принять запрос на естественном языке от пользователя и преобразовать его в запрос для векторного хранилища. В этом процессе ты отсеиваешь информацию, которая не имеет отношения к задаче извлечения. Вот пользовательский запрос: {question}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~entity.py | import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.utilities.redis import get_client
logger = logging.getLogger(__name__)
class BaseEntityStore(BaseModel, ABC):
"""Abstract base class for Entity store."""
@abstractmethod
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get entity value from store."""
pass
@abstractmethod
def set(self, key: str, value: Optional[str]) -> None:
"""Set entity value in store."""
pass
@abstractmethod
def delete(self, key: str) -> None:
"""Delete entity value from store."""
pass
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if entity exists in store."""
pass
@abstractmethod
def clear(self) -> None:
"""Delete all entities from store."""
pass
class InMemoryEntityStore(BaseEntityStore):
"""In-memory Entity store."""
store: Dict[str, Optional[str]] = {}
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.store.get(key, default)
def set(self, key: str, value: Optional[str]) -> None:
self.store[key] = value
def delete(self, key: str) -> None:
del self.store[key]
def exists(self, key: str) -> bool:
return key in self.store
def clear(self) -> None:
return self.store.clear()
class UpstashRedisEntityStore(BaseEntityStore):
"""Upstash Redis backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
def __init__(
self,
session_id: str = "default",
url: str = "",
token: str = "",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash_redis python package. "
"Please install it with `pip install upstash_redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
def scan_and_delete(cursor: int) -> int:
cursor, keys_to_delete = self.redis_client.scan(
cursor, f"{self.full_key_prefix}:*"
)
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
class RedisEntityStore(BaseEntityStore):
"""Redis-backed Entity store.
Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
"""
redis_client: Any
session_id: str = "default"
key_prefix: str = "memory_store"
ttl: Optional[int] = 60 * 60 * 24
recall_ttl: Optional[int] = 60 * 60 * 24 * 3
def __init__(
self,
session_id: str = "default",
url: str = "redis://localhost:6379/0",
key_prefix: str = "memory_store",
ttl: Optional[int] = 60 * 60 * 24,
recall_ttl: Optional[int] = 60 * 60 * 24 * 3,
*args: Any,
**kwargs: Any,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
super().__init__(*args, **kwargs)
try:
self.redis_client = get_client(redis_url=url, decode_responses=True)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
@property
def full_key_prefix(self) -> str:
return f"{self.key_prefix}:{self.session_id}"
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
res = (
self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl)
or default
or ""
)
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
logger.debug(
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}"
)
def delete(self, key: str) -> None:
self.redis_client.delete(f"{self.full_key_prefix}:{key}")
def exists(self, key: str) -> bool:
return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1
def clear(self) -> None:
# iterate a list in batches of size batch_size
def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]:
iterator = iter(iterable)
while batch := list(islice(iterator, batch_size)):
yield batch
for keybatch in batched(
self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500
):
self.redis_client.delete(*keybatch)
class SQLiteEntityStore(BaseEntityStore):
"""SQLite-backed Entity store"""
session_id: str = "default"
table_name: str = "memory_store"
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
try:
import sqlite3
except ImportError:
raise ImportError(
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
super().__init__(*args, **kwargs)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
return f"{self.table_name}_{self.session_id}"
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
def set(self, key: str, value: Optional[str]) -> None:
if not value:
return self.delete(key)
query = f"""
INSERT OR REPLACE INTO {self.full_table_name} (key, value)
VALUES (?, ?)
"""
with self.conn:
self.conn.execute(query, (key, value))
def delete(self, key: str) -> None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
def exists(self, key: str) -> bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
def clear(self) -> None:
query = f"""
DELETE FROM {self.full_table_name}
"""
with self.conn:
self.conn.execute(query)
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer memory.
Extracts named entities from the recent chat history and generates summaries.
With a swappable entity store, persisting entities across conversations.
Defaults to an in-memory entity store, and can be swapped out for a Redis,
SQLite, or other entity store.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
# Cache of recently detected entity names, if any
# It is updated when load_memory_variables is called:
entity_cache: List[str] = []
# Number of recent message pairs to consider when updating entities:
k: int = 3
chat_history_key: str = "history"
# Store to manage entity-related data:
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns chat history and all generated entities with summaries if available,
and updates or clears the recent entity cache.
New entity name can be found when calling this method, before the entity
summaries are generated, so the entity cache values may be empty if no entity
descriptions are generated yet.
"""
# Create an LLMChain for predicting entity names from the recent chat history:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
# Generates a comma-separated list of named entities,
# e.g. "Jane, White House, UFO"
# or "NONE" if no named entities are extracted:
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
# If no named entities are extracted, assigns an empty list.
if output.strip() == "NONE":
entities = []
else:
# Make a list of the extracted entities:
entities = [w.strip() for w in output.split(",")]
# Make a dictionary of entities with summary if exists:
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
# Replaces the entity name cache with the most recently discussed entities,
# or if no entities were extracted, clears the cache:
self.entity_cache = entities
# Should we return as message objects or as a string?
if self.return_messages:
# Get last `k` pair of chat messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
# Reuse the string we made earlier:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
# Extract an arbitrary window of the last message pairs from
# the chat history, where the hyperparameter k is the
# number of message pairs:
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
# Create an LLMChain for predicting entity summarization from the context
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# Generate new summaries for entities and save them in the entity store
for entity in self.entity_cache:
# Get existing summary if it exists
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
# Save the updated summary to the entity store
self.entity_store.set(entity, output.strip())
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_loaders~gmail.py | import base64
import re
from typing import Any, Iterator
from langchain.chat_loaders.base import BaseChatLoader
from langchain.schema.chat import ChatSession
from langchain.schema.messages import HumanMessage
def _extract_email_content(msg: Any) -> HumanMessage:
from_email = None
for values in msg["payload"]["headers"]:
name = values["name"]
if name == "From":
from_email = values["value"]
if from_email is None:
raise ValueError
for part in msg["payload"]["parts"]:
if part["mimeType"] == "text/plain":
data = part["body"]["data"]
data = base64.urlsafe_b64decode(data).decode("utf-8")
# Regular expression to split the email body at the first
# occurrence of a line that starts with "On ... wrote:"
pattern = re.compile(r"\r\nOn .+(\r\n)*wrote:\r\n")
# Split the email body and extract the first part
newest_response = re.split(pattern, data)[0]
message = HumanMessage(
content=newest_response, additional_kwargs={"sender": from_email}
)
return message
raise ValueError
def _get_message_data(service: Any, message: Any) -> ChatSession:
msg = service.users().messages().get(userId="me", id=message["id"]).execute()
message_content = _extract_email_content(msg)
in_reply_to = None
email_data = msg["payload"]["headers"]
for values in email_data:
name = values["name"]
if name == "In-Reply-To":
in_reply_to = values["value"]
if in_reply_to is None:
raise ValueError
thread_id = msg["threadId"]
thread = service.users().threads().get(userId="me", id=thread_id).execute()
messages = thread["messages"]
response_email = None
for message in messages:
email_data = message["payload"]["headers"]
for values in email_data:
if values["name"] == "Message-ID":
message_id = values["value"]
if message_id == in_reply_to:
response_email = message
if response_email is None:
raise ValueError
starter_content = _extract_email_content(response_email)
return ChatSession(messages=[starter_content, message_content])
class GMailLoader(BaseChatLoader):
"""Load data from `GMail`.
There are many ways you could want to load data from GMail.
This loader is currently fairly opinionated in how to do so.
The way it does it is it first looks for all messages that you have sent.
It then looks for messages where you are responding to a previous email.
It then fetches that previous email, and creates a training example
of that email, followed by your email.
Note that there are clear limitations here. For example,
all examples created are only looking at the previous email for context.
To use:
- Set up a Google Developer Account:
Go to the Google Developer Console, create a project,
and enable the Gmail API for that project.
This will give you a credentials.json file that you'll need later.
"""
def __init__(self, creds: Any, n: int = 100, raise_error: bool = False) -> None:
super().__init__()
self.creds = creds
self.n = n
self.raise_error = raise_error
def lazy_load(self) -> Iterator[ChatSession]:
from googleapiclient.discovery import build
service = build("gmail", "v1", credentials=self.creds)
results = (
service.users()
.messages()
.list(userId="me", labelIds=["SENT"], maxResults=self.n)
.execute()
)
messages = results.get("messages", [])
for message in messages:
try:
yield _get_message_data(service, message)
except Exception as e:
# TODO: handle errors better
if self.raise_error:
raise e
else:
pass
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~sagemaker_endpoint.py | """Sagemaker InvokeEndpoint API."""
import io
import json
from abc import abstractmethod
from typing import Any, Dict, Generic, Iterator, List, Mapping, Optional, TypeVar, Union
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]])
OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]], Iterator])
class LineIterator:
"""
A helper class for parsing the byte stream input.
The output of the model will be in the following format:
b'{"outputs": [" a"]}\n'
b'{"outputs": [" challenging"]}\n'
b'{"outputs": [" problem"]}\n'
...
While usually each PayloadPart event from the event stream will
contain a byte array with a full json, this is not guaranteed
and some of the json objects may be split acrossPayloadPart events.
For example:
{'PayloadPart': {'Bytes': b'{"outputs": '}}
{'PayloadPart': {'Bytes': b'[" problem"]}\n'}}
This class accounts for this by concatenating bytes written via the 'write' function
and then exposing a method which will return lines (ending with a '\n' character)
within the buffer via the 'scan_lines' function.
It maintains the position of the last read position to ensure
that previous bytes are not exposed again.
For more details see:
https://aws.amazon.com/blogs/machine-learning/elevating-the-generative-ai-experience-introducing-streaming-support-in-amazon-sagemaker-hosting/
"""
def __init__(self, stream: Any) -> None:
self.byte_iterator = iter(stream)
self.buffer = io.BytesIO()
self.read_pos = 0
def __iter__(self) -> "LineIterator":
return self
def __next__(self) -> Any:
while True:
self.buffer.seek(self.read_pos)
line = self.buffer.readline()
if line and line[-1] == ord("\n"):
self.read_pos += len(line)
return line[:-1]
try:
chunk = next(self.byte_iterator)
except StopIteration:
if self.read_pos < self.buffer.getbuffer().nbytes:
continue
raise
if "PayloadPart" not in chunk:
# Unknown Event Type
continue
self.buffer.seek(0, io.SEEK_END)
self.buffer.write(chunk["PayloadPart"]["Bytes"])
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects.
Similarly, the class handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> OUTPUT_TYPE:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class LLMContentHandler(ContentHandlerBase[str, str]):
"""Content handler for LLM class."""
class SagemakerEndpoint(LLM):
"""Sagemaker Inference Endpoint models.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Args:
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
client: boto3 client for Sagemaker Endpoint
content_handler: Implementation for model specific LLMContentHandler
Example:
.. code-block:: python
from langchain.llms import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
#Use with boto3 client
client = boto3.client(
"sagemaker-runtime",
region_name=region_name
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
client=client
)
"""
client: Any = None
"""Boto3 client for sagemaker runtime"""
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: LLMContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
streaming: bool = False
"""Whether to stream the results."""
"""
Example:
.. code-block:: python
from langchain.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Dont do anything if client provided externally"""
if values.get("client") is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
if self.streaming and run_manager:
try:
resp = self.client.invoke_endpoint_with_response_stream(
EndpointName=self.endpoint_name,
Body=body,
ContentType=self.content_handler.content_type,
**_endpoint_kwargs,
)
iterator = LineIterator(resp["Body"])
current_completion: str = ""
for line in iterator:
resp = json.loads(line)
resp_output = resp.get("outputs")[0]
if stop is not None:
# Uses same approach as below
resp_output = enforce_stop_tokens(resp_output, stop)
current_completion += resp_output
run_manager.on_llm_new_token(resp_output)
return current_completion
except Exception as e:
raise ValueError(f"Error raised by streaming inference endpoint: {e}")
else:
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~prompts~few_shot.py | """Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
get_template_variables,
)
from langchain.prompts.chat import BaseChatPromptTemplate, BaseMessagePromptTemplate
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.schema.messages import BaseMessage, get_buffer_string
class _FewShotPromptTemplateMixin(BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Optional[BaseExampleSelector] = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
def _get_examples(self, **kwargs: Any) -> List[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
"""
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
"""Prompt template that contains few shot examples."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
validate_template: bool = False
"""Whether or not to try validating the template."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: Union[Literal["f-string"], Literal["jinja2"]] = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix, and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
elif values.get("template_format"):
values["input_variables"] = [
var
for var in get_template_variables(
values["prefix"] + values["suffix"], values["template_format"]
)
if var not in values["partial_variables"]
]
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def save(self, file_path: Union[Path, str]) -> None:
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().save(file_path)
class FewShotChatMessagePromptTemplate(
BaseChatPromptTemplate, _FewShotPromptTemplateMixin
):
"""Chat prompt template that supports few-shot examples.
The high level structure of produced by this prompt template is a list of messages
consisting of prefix message(s), example message(s), and suffix message(s).
This structure enables creating a conversation with intermediate examples like:
System: You are a helpful AI Assistant
Human: What is 2+2?
AI: 4
Human: What is 2+3?
AI: 5
Human: What is 4+4?
This prompt template can be used to generate a fixed list of examples or else
to dynamically select examples based on the input.
Examples:
Prompt template with a fixed list of examples (matching the sample
conversation above):
.. code-block:: python
from langchain.prompts import (
FewShotChatMessagePromptTemplate,
ChatPromptTemplate
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain.prompts import SemanticSimilarityExampleSelector
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain.schema import SystemMessage
from langchain.prompts import HumanMessagePromptTemplate
from langchain.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?"))
# Use within an LLM
from langchain.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
| [
"f-string",
"False"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~graphs~falkordb_graph.py | from typing import Any, Dict, List
from langchain.graphs.graph_document import GraphDocument
from langchain.graphs.graph_store import GraphStore
node_properties_query = """
MATCH (n)
WITH keys(n) as keys, labels(n) AS labels
WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, labels
UNWIND labels AS label
UNWIND keys AS key
WITH label, collect(DISTINCT key) AS keys
RETURN {label:label, keys:keys} AS output
"""
rel_properties_query = """
MATCH ()-[r]->()
WITH keys(r) as keys, type(r) AS types
WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, types
UNWIND types AS type
UNWIND keys AS key WITH type,
collect(DISTINCT key) AS keys
RETURN {types:type, keys:keys} AS output
"""
rel_query = """
MATCH (n)-[r]->(m)
UNWIND labels(n) as src_label
UNWIND labels(m) as dst_label
UNWIND type(r) as rel_type
RETURN DISTINCT {start: src_label, type: rel_type, end: dst_label} AS output
"""
class FalkorDBGraph(GraphStore):
"""FalkorDB wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(
self, database: str, host: str = "localhost", port: int = 6379
) -> None:
"""Create a new FalkorDB graph wrapper instance."""
try:
import redis
from redis.commands.graph import Graph
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self._driver = redis.Redis(host=host, port=port)
self._graph = Graph(self._driver, database)
self.schema: str = ""
self.structured_schema: Dict[str, Any] = {}
try:
self.refresh_schema()
except Exception as e:
raise ValueError(f"Could not refresh schema. Error: {e}")
@property
def get_schema(self) -> str:
"""Returns the schema of the FalkorDB database"""
return self.schema
@property
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
def refresh_schema(self) -> None:
"""Refreshes the schema of the FalkorDB database"""
node_properties: List[Any] = self.query(node_properties_query)
rel_properties: List[Any] = self.query(rel_properties_query)
relationships: List[Any] = self.query(rel_query)
self.structured_schema = {
"node_props": {el[0]["label"]: el[0]["keys"] for el in node_properties},
"rel_props": {el[0]["types"]: el[0]["keys"] for el in rel_properties},
"relationships": [el[0] for el in relationships],
}
self.schema = (
f"Node properties: {node_properties}\n"
f"Relationships properties: {rel_properties}\n"
f"Relationships: {relationships}\n"
)
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query FalkorDB database."""
try:
data = self._graph.query(query, params)
return data.result_set
except Exception as e:
raise ValueError("Generated Cypher Statement is not valid\n" f"{e}")
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""
Take GraphDocument as input as uses it to construct a graph.
"""
for document in graph_documents:
# Import nodes
for node in document.nodes:
self.query(
(
f"MERGE (n:{node.type} {{id:'{node.id}'}}) "
"SET n += $properties "
"RETURN distinct 'done' AS result"
),
{"properties": node.properties},
)
# Import relationships
for rel in document.relationships:
self.query(
(
f"MATCH (a:{rel.source.type} {{id:'{rel.source.id}'}}), "
f"(b:{rel.target.type} {{id:'{rel.target.id}'}}) "
f"MERGE (a)-[r:{(rel.type.replace(' ', '_').upper())}]->(b) "
"SET r += $properties "
"RETURN distinct 'done' AS result"
),
{"properties": rel.properties},
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~vectorstores~test_utils.py | """Test vector store utility functions."""
import numpy as np
from langchain.docstore.document import Document
from langchain.vectorstores.utils import (
filter_complex_metadata,
maximal_marginal_relevance,
)
def test_maximal_marginal_relevance_lambda_zero() -> None:
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 2]
actual = maximal_marginal_relevance(
query_embedding, embedding_list, lambda_mult=0, k=2
)
assert expected == actual
def test_maximal_marginal_relevance_lambda_one() -> None:
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 1]
actual = maximal_marginal_relevance(
query_embedding, embedding_list, lambda_mult=1, k=2
)
assert expected == actual
def test_maximal_marginal_relevance() -> None:
query_embedding = np.array([1, 0])
# Vectors that are 30, 45 and 75 degrees from query vector (cosine similarity of
# 0.87, 0.71, 0.26) and the latter two are 15 and 60 degree from the first
# (cosine similarity 0.97 and 0.71). So for 3rd vector be chosen, must be case that
# 0.71lambda - 0.97(1 - lambda) < 0.26lambda - 0.71(1-lambda)
# -> lambda ~< .26 / .71
embedding_list = [[3**0.5, 1], [1, 1], [1, 2 + (3**0.5)]]
expected = [0, 2]
actual = maximal_marginal_relevance(
query_embedding, embedding_list, lambda_mult=(25 / 71), k=2
)
assert expected == actual
expected = [0, 1]
actual = maximal_marginal_relevance(
query_embedding, embedding_list, lambda_mult=(27 / 71), k=2
)
assert expected == actual
def test_maximal_marginal_relevance_query_dim() -> None:
query_embedding = np.random.random(size=5)
query_embedding_2d = query_embedding.reshape((1, 5))
embedding_list = np.random.random(size=(4, 5)).tolist()
first = maximal_marginal_relevance(query_embedding, embedding_list)
second = maximal_marginal_relevance(query_embedding_2d, embedding_list)
assert first == second
def test_filter_list_metadata() -> None:
documents = [
Document(
page_content="",
metadata={
"key1": "this is a string!",
"key2": ["a", "list", "of", "strings"],
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": {"foo"},
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": {"foo": "bar"},
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": True,
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": 1,
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": 1.0,
},
),
Document(
page_content="",
metadata={
"key1": "this is another string!",
"key2": "foo",
},
),
]
updated_documents = filter_complex_metadata(documents)
filtered_metadata = [doc.metadata for doc in updated_documents]
assert filtered_metadata == [
{"key1": "this is a string!"},
{"key1": "this is another string!"},
{"key1": "this is another string!"},
{"key1": "this is another string!", "key2": True},
{"key1": "this is another string!", "key2": 1},
{"key1": "this is another string!", "key2": 1.0},
{"key1": "this is another string!", "key2": "foo"},
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~duckdb_loader.py | from typing import Dict, List, Optional, cast
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class DuckDBLoader(BaseLoader):
"""Load from `DuckDB`.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
database: str = ":memory:",
read_only: bool = False,
config: Optional[Dict[str, str]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
"""
Args:
query: The query to execute.
database: The database to connect to. Defaults to ":memory:".
read_only: Whether to open the database in read-only mode.
Defaults to False.
config: A dictionary of configuration options to pass to the database.
Optional.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
"""
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
def load(self) -> List[Document]:
try:
import duckdb
except ImportError:
raise ImportError(
"Could not import duckdb python package. "
"Please install it with `pip install duckdb`."
)
docs = []
with duckdb.connect(
database=self.database, read_only=self.read_only, config=self.config
) as con:
query_result = con.execute(self.query)
results = query_result.fetchall()
description = cast(list, query_result.description)
field_names = [c[0] for c in description]
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for result in results:
page_content = "\n".join(
f"{column}: {result[field_names.index(column)]}"
for column in page_content_columns
)
metadata = {
column: result[field_names.index(column)]
for column in metadata_columns
}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~smith~evaluation~progress.py | """A simple progress bar for the console."""
import threading
from typing import Any, Dict, Optional, Sequence
from uuid import UUID
from langchain.callbacks import base as base_callbacks
from langchain.schema.document import Document
from langchain.schema.output import LLMResult
class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(self, total: int, ncols: int = 50, **kwargs: Any):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="")
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_chain_end(
self,
outputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~azuresearch.py | from __future__ import annotations
import base64
import json
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.docstore.document import Document
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_env
logger = logging.getLogger()
if TYPE_CHECKING:
from azure.search.documents import SearchClient
from azure.search.documents.indexes.models import (
ScoringProfile,
SearchField,
SemanticSettings,
VectorSearch,
)
# Allow overriding field names for Azure Search
FIELDS_ID = get_from_env(
key="AZURESEARCH_FIELDS_ID", env_key="AZURESEARCH_FIELDS_ID", default="id"
)
FIELDS_CONTENT = get_from_env(
key="AZURESEARCH_FIELDS_CONTENT",
env_key="AZURESEARCH_FIELDS_CONTENT",
default="content",
)
FIELDS_CONTENT_VECTOR = get_from_env(
key="AZURESEARCH_FIELDS_CONTENT_VECTOR",
env_key="AZURESEARCH_FIELDS_CONTENT_VECTOR",
default="content_vector",
)
FIELDS_METADATA = get_from_env(
key="AZURESEARCH_FIELDS_TAG", env_key="AZURESEARCH_FIELDS_TAG", default="metadata"
)
MAX_UPLOAD_BATCH_SIZE = 1000
def _get_search_client(
endpoint: str,
key: str,
index_name: str,
semantic_configuration_name: Optional[str] = None,
fields: Optional[List[SearchField]] = None,
vector_search: Optional[VectorSearch] = None,
semantic_settings: Optional[SemanticSettings] = None,
scoring_profiles: Optional[List[ScoringProfile]] = None,
default_scoring_profile: Optional[str] = None,
default_fields: Optional[List[SearchField]] = None,
user_agent: Optional[str] = "langchain",
) -> SearchClient:
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
HnswVectorSearchAlgorithmConfiguration,
PrioritizedFields,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticSettings,
VectorSearch,
)
default_fields = default_fields or []
if key is None:
credential = DefaultAzureCredential()
elif key.upper() == "INTERACTIVE":
credential = InteractiveBrowserCredential()
credential.get_token("https://search.azure.com/.default")
else:
credential = AzureKeyCredential(key)
index_client: SearchIndexClient = SearchIndexClient(
endpoint=endpoint, credential=credential, user_agent=user_agent
)
try:
index_client.get_index(name=index_name)
except ResourceNotFoundError:
# Fields configuration
if fields is not None:
# Check mandatory fields
fields_types = {f.name: f.type for f in fields}
mandatory_fields = {df.name: df.type for df in default_fields}
# Check for missing keys
missing_fields = {
key: mandatory_fields[key]
for key, value in set(mandatory_fields.items())
- set(fields_types.items())
}
if len(missing_fields) > 0:
fmt_err = lambda x: ( # noqa: E731
f"{x} current type: '{fields_types.get(x, 'MISSING')}'. It has to "
f"be '{mandatory_fields.get(x)}' or you can point to a different "
f"'{mandatory_fields.get(x)}' field name by using the env variable "
f"'AZURESEARCH_FIELDS_{x.upper()}'"
)
error = "\n".join([fmt_err(x) for x in missing_fields])
raise ValueError(
f"You need to specify at least the following fields "
f"{missing_fields} or provide alternative field names in the env "
f"variables.\n\n{error}"
)
else:
fields = default_fields
# Vector search configuration
if vector_search is None:
vector_search = VectorSearch(
algorithm_configurations=[
HnswVectorSearchAlgorithmConfiguration(
name="default",
kind="hnsw",
parameters={ # type: ignore
"m": 4,
"efConstruction": 400,
"efSearch": 500,
"metric": "cosine",
},
)
]
)
# Create the semantic settings with the configuration
if semantic_settings is None and semantic_configuration_name is not None:
semantic_settings = SemanticSettings(
configurations=[
SemanticConfiguration(
name=semantic_configuration_name,
prioritized_fields=PrioritizedFields(
prioritized_content_fields=[
SemanticField(field_name=FIELDS_CONTENT)
],
),
)
]
)
# Create the search index with the semantic settings and vector search
index = SearchIndex(
name=index_name,
fields=fields,
vector_search=vector_search,
semantic_settings=semantic_settings,
scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile,
)
index_client.create_index(index)
# Create the search client
return SearchClient(
endpoint=endpoint,
index_name=index_name,
credential=credential,
user_agent=user_agent,
)
class AzureSearch(VectorStore):
"""`Azure Cognitive Search` vector store."""
def __init__(
self,
azure_search_endpoint: str,
azure_search_key: str,
index_name: str,
embedding_function: Callable,
search_type: str = "hybrid",
semantic_configuration_name: Optional[str] = None,
semantic_query_language: str = "en-us",
fields: Optional[List[SearchField]] = None,
vector_search: Optional[VectorSearch] = None,
semantic_settings: Optional[SemanticSettings] = None,
scoring_profiles: Optional[List[ScoringProfile]] = None,
default_scoring_profile: Optional[str] = None,
**kwargs: Any,
):
from azure.search.documents.indexes.models import (
SearchableField,
SearchField,
SearchFieldDataType,
SimpleField,
)
"""Initialize with necessary components."""
# Initialize base class
self.embedding_function = embedding_function
default_fields = [
SimpleField(
name=FIELDS_ID,
type=SearchFieldDataType.String,
key=True,
filterable=True,
),
SearchableField(
name=FIELDS_CONTENT,
type=SearchFieldDataType.String,
),
SearchField(
name=FIELDS_CONTENT_VECTOR,
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True,
vector_search_dimensions=len(embedding_function("Text")),
vector_search_configuration="default",
),
SearchableField(
name=FIELDS_METADATA,
type=SearchFieldDataType.String,
),
]
user_agent = "langchain"
if "user_agent" in kwargs and kwargs["user_agent"]:
user_agent += " " + kwargs["user_agent"]
self.client = _get_search_client(
azure_search_endpoint,
azure_search_key,
index_name,
semantic_configuration_name=semantic_configuration_name,
fields=fields,
vector_search=vector_search,
semantic_settings=semantic_settings,
scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile,
default_fields=default_fields,
user_agent=user_agent,
)
self.search_type = search_type
self.semantic_configuration_name = semantic_configuration_name
self.semantic_query_language = semantic_query_language
self.fields = fields if fields else default_fields
@property
def embeddings(self) -> Optional[Embeddings]:
# TODO: Support embedding object directly
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
keys = kwargs.get("keys")
ids = []
# Write data to index
data = []
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else str(uuid.uuid4())
# Encoding key for Azure Search valid characters
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
metadata = metadatas[i] if metadatas else {}
# Add data to index
# Additional metadata to fields mapping
doc = {
"@search.action": "upload",
FIELDS_ID: key,
FIELDS_CONTENT: text,
FIELDS_CONTENT_VECTOR: np.array(
self.embedding_function(text), dtype=np.float32
).tolist(),
FIELDS_METADATA: json.dumps(metadata),
}
if metadata:
additional_fields = {
k: v
for k, v in metadata.items()
if k in [x.name for x in self.fields]
}
doc.update(additional_fields)
data.append(doc)
ids.append(key)
# Upload data in batches
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if not all([r.succeeded for r in response]):
raise Exception(response)
# Reset data
data = []
# Considering case where data is an exact multiple of batch-size entries
if len(data) == 0:
return ids
# Upload data to index
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if all([r.succeeded for r in response]):
return ids
else:
raise Exception(response)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
search_type = kwargs.get("search_type", self.search_type)
if search_type == "similarity":
docs = self.vector_search(query, k=k, **kwargs)
elif search_type == "hybrid":
docs = self.hybrid_search(query, k=k, **kwargs)
elif search_type == "semantic_hybrid":
docs = self.semantic_hybrid_search(query, k=k, **kwargs)
else:
raise ValueError(f"search_type of {search_type} not allowed.")
return docs
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
score_threshold = kwargs.pop("score_threshold", None)
result = self.vector_search_with_score(query, k=k, **kwargs)
return (
result
if score_threshold is None
else [r for r in result if r[1] >= score_threshold]
)
def vector_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.vector_search_with_score(
query, k=k, filters=kwargs.get("filters", None)
)
return [doc for doc, _ in docs_and_scores]
def vector_search_with_score(
self, query: str, k: int = 4, filters: Optional[str] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import Vector
results = self.client.search(
search_text="",
vectors=[
Vector(
value=np.array(
self.embedding_function(query), dtype=np.float32
).tolist(),
k=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
)
# Convert results to Document objects
docs = [
(
Document(
page_content=result.pop(FIELDS_CONTENT),
metadata=json.loads(result[FIELDS_METADATA])
if FIELDS_METADATA in result
else {
k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR
},
),
float(result["@search.score"]),
)
for result in results
]
return docs
def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.hybrid_search_with_score(
query, k=k, filters=kwargs.get("filters", None)
)
return [doc for doc, _ in docs_and_scores]
def hybrid_search_with_score(
self, query: str, k: int = 4, filters: Optional[str] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with an hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import Vector
results = self.client.search(
search_text=query,
vectors=[
Vector(
value=np.array(
self.embedding_function(query), dtype=np.float32
).tolist(),
k=k,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
top=k,
)
# Convert results to Document objects
docs = [
(
Document(
page_content=result.pop(FIELDS_CONTENT),
metadata=json.loads(result[FIELDS_METADATA])
if FIELDS_METADATA in result
else {
k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR
},
),
float(result["@search.score"]),
)
for result in results
]
return docs
def semantic_hybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score(
query, k=k, filters=kwargs.get("filters", None)
)
return [doc for doc, _ in docs_and_scores]
def semantic_hybrid_search_with_score(
self, query: str, k: int = 4, filters: Optional[str] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with an hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import Vector
results = self.client.search(
search_text=query,
vectors=[
Vector(
value=np.array(
self.embedding_function(query), dtype=np.float32
).tolist(),
k=50,
fields=FIELDS_CONTENT_VECTOR,
)
],
filter=filters,
query_type="semantic",
query_language=self.semantic_query_language,
semantic_configuration_name=self.semantic_configuration_name,
query_caption="extractive",
query_answer="extractive",
top=k,
)
# Get Semantic Answers
semantic_answers = results.get_answers() or []
semantic_answers_dict: Dict = {}
for semantic_answer in semantic_answers:
semantic_answers_dict[semantic_answer.key] = {
"text": semantic_answer.text,
"highlights": semantic_answer.highlights,
}
# Convert results to Document objects
docs = [
(
Document(
page_content=result.pop(FIELDS_CONTENT),
metadata={
**(
json.loads(result[FIELDS_METADATA])
if FIELDS_METADATA in result
else {
k: v
for k, v in result.items()
if k != FIELDS_CONTENT_VECTOR
}
),
**{
"captions": {
"text": result.get("@search.captions", [{}])[0].text,
"highlights": result.get("@search.captions", [{}])[
0
].highlights,
}
if result.get("@search.captions")
else {},
"answers": semantic_answers_dict.get(
json.loads(result["metadata"]).get("key"), ""
),
},
},
),
float(result["@search.score"]),
)
for result in results
]
return docs
@classmethod
def from_texts(
cls: Type[AzureSearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
azure_search_endpoint: str = "",
azure_search_key: str = "",
index_name: str = "langchain-index",
**kwargs: Any,
) -> AzureSearch:
# Creating a new Azure Search instance
azure_search = cls(
azure_search_endpoint,
azure_search_key,
index_name,
embedding.embed_query,
)
azure_search.add_texts(texts, metadatas, **kwargs)
return azure_search
class AzureSearchVectorStoreRetriever(BaseRetriever):
"""Retriever that uses `Azure Cognitive Search`."""
vectorstore: AzureSearch
"""Azure Search instance used to find similar documents."""
search_type: str = "hybrid"
"""Type of search to perform. Options are "similarity", "hybrid",
"semantic_hybrid"."""
k: int = 4
"""Number of documents to return."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "hybrid", "semantic_hybrid"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def _get_relevant_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.vector_search(query, k=self.k, **kwargs)
elif self.search_type == "hybrid":
docs = self.vectorstore.hybrid_search(query, k=self.k, **kwargs)
elif self.search_type == "semantic_hybrid":
docs = self.vectorstore.semantic_hybrid_search(query, k=self.k, **kwargs)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError(
"AzureSearchVectorStoreRetriever does not support async"
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~chat_message_histories~cosmos_db.py | """Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, messages_from_dict, messages_to_dict
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history backed by Azure CosmosDB."""
def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
"Please install it with `pip install azure-cosmos`."
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~tot~thought_generation.py | """
We provide two strategies for generating thoughts in the Tree of Thoughts (ToT)
framework to avoid repetition:
These strategies ensure that the language model generates diverse and
non-repeating thoughts, which are crucial for problem-solving tasks that require
exploration.
"""
from abc import abstractmethod
from typing import Any, Dict, List, Tuple
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain_experimental.pydantic_v1 import Field
from langchain_experimental.tot.prompts import COT_PROMPT, PROPOSE_PROMPT
class BaseThoughtGenerationStrategy(LLMChain):
"""
Base class for a thought generation strategy.
"""
c: int = 3
"""The number of children thoughts to propose at each step."""
@abstractmethod
def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
"""
Generate the next thought given the problem description and the thoughts
generated so far.
"""
class SampleCoTStrategy(BaseThoughtGenerationStrategy):
"""
Sample thoughts from a Chain-of-Thought (CoT) prompt.
This strategy works better when the thought space is rich, such as when each
thought is a paragraph. Independent and identically distributed samples
lead to diversity, which helps to avoid repetition.
"""
prompt: BasePromptTemplate = COT_PROMPT
def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
response_text = self.predict_and_parse(
problem_description=problem_description, thoughts=thoughts_path, **kwargs
)
return response_text if isinstance(response_text, str) else ""
class ProposePromptStrategy(BaseThoughtGenerationStrategy):
"""
Propose thoughts sequentially using a "propose prompt".
This strategy works better when the thought space is more constrained, such
as when each thought is just a word or a line. Proposing different thoughts
in the same prompt completion helps to avoid duplication.
"""
prompt: BasePromptTemplate = PROPOSE_PROMPT
tot_memory: Dict[Tuple[str, ...], List[str]] = Field(default_factory=dict)
def next_thought(
self,
problem_description: str,
thoughts_path: Tuple[str, ...] = (),
**kwargs: Any
) -> str:
if thoughts_path not in self.tot_memory or not self.tot_memory[thoughts_path]:
new_thoughts = self.predict_and_parse(
problem_description=problem_description,
thoughts=thoughts_path,
n=self.c,
**kwargs
)
if not new_thoughts:
return ""
if isinstance(new_thoughts, list):
self.tot_memory[thoughts_path] = new_thoughts[::-1]
else:
return ""
return self.tot_memory[thoughts_path].pop()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~markdown.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Load `Markdown` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredMarkdownLoader
loader = UnstructuredMarkdownLoader(
"example.md", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-md
"""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.partition.md import partition_md
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
if unstructured_version < (0, 4, 16):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning markdown files is only supported in unstructured>=0.4.16."
)
return partition_md(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~vectorstores~test_usearch.py | """Test USearch functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.usearch import USearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_usearch_from_texts() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_usearch_from_documents() -> None:
"""Test from_documents constructor."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = USearch.from_documents(docs, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
def test_usearch_add_texts() -> None:
"""Test adding a new document"""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
def test_ip() -> None:
"""Test inner product distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="ip")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == -8.0
def test_l2() -> None:
"""Test Flat L2 distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="l2_sq")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == 1.0
def test_cos() -> None:
"""Test cosine distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="cos")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == pytest.approx(0.05, abs=0.002)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~college_confidential.py | from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
class CollegeConfidentialLoader(WebBaseLoader):
"""Load `College Confidential` webpages."""
def load(self) -> List[Document]:
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~openllm.py | from __future__ import annotations
import copy
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
TypedDict,
Union,
overload,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import PrivateAttr
if TYPE_CHECKING:
import openllm
ServerType = Literal["http", "grpc"]
class IdentifyingParams(TypedDict):
"""Parameters for identifying a model as a typed dict."""
model_name: str
model_id: Optional[str]
server_url: Optional[str]
server_type: Optional[ServerType]
embedded: bool
llm_kwargs: Dict[str, Any]
logger = logging.getLogger(__name__)
class OpenLLM(LLM):
"""OpenLLM, supporting both in-process model
instance and remote OpenLLM servers.
To use, you should have the openllm library installed:
.. code-block:: bash
pip install openllm
Learn more at: https://github.com/bentoml/openllm
Example running an LLM model locally managed by OpenLLM:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
)
llm("What is the difference between a duck and a goose?")
For all available supported models, you can run 'openllm models'.
If you have a OpenLLM server running, you can also use it remotely:
.. code-block:: python
from langchain.llms import OpenLLM
llm = OpenLLM(server_url='http://localhost:3000')
llm("What is the difference between a duck and a goose?")
"""
model_name: Optional[str] = None
"""Model name to use. See 'openllm models' for all available models."""
model_id: Optional[str] = None
"""Model Id to use. If not provided, will use the default model for the model name.
See 'openllm models' for all available model variants."""
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
server_type: ServerType = "http"
"""Optional server type. Either 'http' or 'grpc'."""
embedded: bool = True
"""Initialize this LLM instance in current process by default. Should
only set to False when using in conjunction with BentoML Service."""
llm_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to openllm.LLM"""
_runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None)
_client: Union[
openllm.client.HTTPClient, openllm.client.GrpcClient, None
] = PrivateAttr(default=None)
class Config:
extra = "forbid"
@overload
def __init__(
self,
model_name: Optional[str] = ...,
*,
model_id: Optional[str] = ...,
embedded: Literal[True, False] = ...,
**llm_kwargs: Any,
) -> None:
...
@overload
def __init__(
self,
*,
server_url: str = ...,
server_type: Literal["grpc", "http"] = ...,
**llm_kwargs: Any,
) -> None:
...
def __init__(
self,
model_name: Optional[str] = None,
*,
model_id: Optional[str] = None,
server_url: Optional[str] = None,
server_type: Literal["grpc", "http"] = "http",
embedded: bool = True,
**llm_kwargs: Any,
):
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm.'"
) from e
llm_kwargs = llm_kwargs or {}
if server_url is not None:
logger.debug("'server_url' is provided, returning a openllm.Client")
assert (
model_id is None and model_name is None
), "'server_url' and {'model_id', 'model_name'} are mutually exclusive"
client_cls = (
openllm.client.HTTPClient
if server_type == "http"
else openllm.client.GrpcClient
)
client = client_cls(server_url)
super().__init__(
**{
"server_url": server_url,
"server_type": server_type,
"llm_kwargs": llm_kwargs,
}
)
self._runner = None # type: ignore
self._client = client
else:
assert model_name is not None, "Must provide 'model_name' or 'server_url'"
# since the LLM are relatively huge, we don't actually want to convert the
# Runner with embedded when running the server. Instead, we will only set
# the init_local here so that LangChain users can still use the LLM
# in-process. Wrt to BentoML users, setting embedded=False is the expected
# behaviour to invoke the runners remotely.
# We need to also enable ensure_available to download and setup the model.
runner = openllm.Runner(
model_name=model_name,
model_id=model_id,
init_local=embedded,
ensure_available=True,
**llm_kwargs,
)
super().__init__(
**{
"model_name": model_name,
"model_id": model_id,
"embedded": embedded,
"llm_kwargs": llm_kwargs,
}
)
self._client = None # type: ignore
self._runner = runner
@property
def runner(self) -> openllm.LLMRunner:
"""
Get the underlying openllm.LLMRunner instance for integration with BentoML.
Example:
.. code-block:: python
llm = OpenLLM(
model_name='flan-t5',
model_id='google/flan-t5-large',
embedded=False,
)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
svc = bentoml.Service("langchain-openllm", runners=[llm.runner])
@svc.api(input=Text(), output=Text())
def chat(input_text: str):
return agent.run(input_text)
"""
if self._runner is None:
raise ValueError("OpenLLM must be initialized locally with 'model_name'")
return self._runner
@property
def _identifying_params(self) -> IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()["model_name"]
model_id = self._client._metadata()["model_id"]
else:
if self._runner is None:
raise ValueError("Runner must be initialized.")
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(
json.loads(self._runner.identifying_params["configuration"])
)
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(
server_url=self.server_url,
server_type=self.server_type,
embedded=self.embedded,
llm_kwargs=self.llm_kwargs,
model_name=model_name,
model_id=model_id,
)
@property
def _llm_type(self) -> str:
return "openllm_client" if self._client else "openllm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
res = self._client.generate(
prompt, **config.model_dump(flatten=True)
).responses[0]
else:
assert self._runner is not None
res = self._runner(prompt, **config.model_dump(flatten=True))
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with "
"'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(
self._identifying_params["model_name"], **copied
)
if self._client:
async_client = openllm.client.AsyncHTTPClient(self.server_url)
res = (
await async_client.generate(prompt, **config.model_dump(flatten=True))
).responses[0]
else:
assert self._runner is not None
(
prompt,
generate_kwargs,
postprocess_kwargs,
) = self._runner.llm.sanitize_parameters(prompt, **kwargs)
generated_result = await self._runner.generate.async_run(
prompt, **generate_kwargs
)
res = self._runner.llm.postprocess_generate(
prompt, generated_result, **postprocess_kwargs
)
if isinstance(res, dict) and "text" in res:
return res["text"]
elif isinstance(res, str):
return res
else:
raise ValueError(
"Expected result to be a dict with key 'text' or a string. "
f"Received {res}"
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~openai_functions~openapi.py | from __future__ import annotations
import json
import re
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import requests
from requests import Response
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sequential import SequentialChain
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain.prompts import ChatPromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import APIOperation
from langchain.utilities.openapi import OpenAPISpec
from langchain.utils.input import get_colored_text
if TYPE_CHECKING:
from openapi_pydantic import Parameter
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
summary = getattr(o, "summary", None)
description = getattr(o, "description", None)
if prefer_short:
return summary or description
return description or summary
def _format_url(url: str, path_params: dict) -> str:
expected_path_param = re.findall(r"{(.*?)}", url)
new_params = {}
for param in expected_path_param:
clean_param = param.lstrip(".;").rstrip("*")
val = path_params[clean_param]
if isinstance(val, list):
if param[0] == ".":
sep = "." if param[-1] == "*" else ","
new_val = "." + sep.join(val)
elif param[0] == ";":
sep = f"{clean_param}=" if param[-1] == "*" else ","
new_val = f"{clean_param}=" + sep.join(val)
else:
new_val = ",".join(val)
elif isinstance(val, dict):
kv_sep = "=" if param[-1] == "*" else ","
kv_strs = [kv_sep.join((k, v)) for k, v in val.items()]
if param[0] == ".":
sep = "."
new_val = "."
elif param[0] == ";":
sep = ";"
new_val = ";"
else:
sep = ","
new_val = ""
new_val += sep.join(kv_strs)
else:
if param[0] == ".":
new_val = f".{val}"
elif param[0] == ";":
new_val = f";{clean_param}={val}"
else:
new_val = val
new_params[param] = new_val
return url.format(**new_params)
def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -> dict:
properties = {}
required = []
for p in params:
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
properties[p.name] = json.loads(schema.json(exclude_none=True))
if p.required:
required.append(p.name)
return {"type": "object", "properties": properties, "required": required}
def openapi_spec_to_openai_fn(
spec: OpenAPISpec,
) -> Tuple[List[Dict[str, Any]], Callable]:
"""Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
functions.
Args:
spec: OpenAPI spec to convert.
Returns:
Tuple of the OpenAI functions JSON schema and a default function for executing
a request based on the OpenAI function schema.
"""
if not spec.paths:
return [], lambda: None
functions = []
_name_to_call_map = {}
for path in spec.paths:
path_params = {
(p.name, p.param_in): p for p in spec.get_parameters_for_path(path)
}
for method in spec.get_methods_for_path(path):
request_args = {}
op = spec.get_operation(path, method)
op_params = path_params.copy()
for param in spec.get_parameters_for_operation(op):
op_params[(param.name, param.param_in)] = param
params_by_type = defaultdict(list)
for name_loc, p in op_params.items():
params_by_type[name_loc[1]].append(p)
param_loc_to_arg_name = {
"query": "params",
"header": "headers",
"cookie": "cookies",
"path": "path_params",
}
for param_loc, arg_name in param_loc_to_arg_name.items():
if params_by_type[param_loc]:
request_args[arg_name] = _openapi_params_to_json_schema(
params_by_type[param_loc], spec
)
request_body = spec.get_request_body_for_operation(op)
# TODO: Support more MIME types.
if request_body and request_body.content:
media_types = {}
for media_type, media_type_object in request_body.content.items():
if media_type_object.media_type_schema:
schema = spec.get_schema(media_type_object.media_type_schema)
media_types[media_type] = json.loads(
schema.json(exclude_none=True)
)
if len(media_types) == 1:
media_type, schema_dict = list(media_types.items())[0]
key = "json" if media_type == "application/json" else "data"
request_args[key] = schema_dict
elif len(media_types) > 1:
request_args["data"] = {"anyOf": list(media_types.values())}
api_op = APIOperation.from_openapi_spec(spec, path, method)
fn = {
"name": api_op.operation_id,
"description": api_op.description,
"parameters": {
"type": "object",
"properties": request_args,
},
}
functions.append(fn)
_name_to_call_map[fn["name"]] = {
"method": method,
"url": api_op.base_url + api_op.path,
}
def default_call_api(
name: str,
fn_args: dict,
headers: Optional[dict] = None,
params: Optional[dict] = None,
**kwargs: Any,
) -> Any:
method = _name_to_call_map[name]["method"]
url = _name_to_call_map[name]["url"]
path_params = fn_args.pop("path_params", {})
url = _format_url(url, path_params)
if "data" in fn_args and isinstance(fn_args["data"], dict):
fn_args["data"] = json.dumps(fn_args["data"], ensure_ascii=False)
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if "headers" in _kwargs:
_kwargs["headers"].update(headers)
else:
_kwargs["headers"] = headers
if params is not None:
if "params" in _kwargs:
_kwargs["params"].update(params)
else:
_kwargs["params"] = params
return requests.request(method, url, **_kwargs)
return functions, default_call_api
class SimpleRequestChain(Chain):
"""Chain for making a simple request to an API endpoint."""
request_method: Callable
"""Method to use for making the request."""
output_key: str = "response"
"""Key to use for the output of the request."""
input_key: str = "function"
"""Key to use for the input of the request."""
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
name = inputs[self.input_key].pop("name")
args = inputs[self.input_key].pop("arguments")
_pretty_name = get_colored_text(name, "green")
_pretty_args = get_colored_text(
json.dumps(args, indent=2, ensure_ascii=False), "green"
)
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
_run_manager.on_text(_text)
api_response: Response = self.request_method(name, args)
if api_response.status_code != 200:
response = (
f"{api_response.status_code}: {api_response.reason}"
+ f"\nFor {name} "
+ f"Called with args: {args.get('params','')}"
)
else:
try:
response = api_response.json()
except Exception: # noqa: E722
response = api_response.text
return {self.output_key: response}
def get_openapi_chain(
spec: Union[OpenAPISpec, str],
llm: Optional[BaseLanguageModel] = None,
prompt: Optional[BasePromptTemplate] = None,
request_chain: Optional[Chain] = None,
llm_chain_kwargs: Optional[Dict] = None,
verbose: bool = False,
headers: Optional[Dict] = None,
params: Optional[Dict] = None,
**kwargs: Any,
) -> SequentialChain:
"""Create a chain for querying an API from a OpenAPI spec.
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
"""
if isinstance(spec, str):
for conversion in (
OpenAPISpec.from_url,
OpenAPISpec.from_file,
OpenAPISpec.from_text,
):
try:
spec = conversion(spec) # type: ignore[arg-type]
break
except ImportError as e:
raise e
except Exception: # noqa: E722
pass
if isinstance(spec, str):
raise ValueError(f"Unable to parse spec from source {spec}")
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
llm = llm or ChatOpenAI(
model="gpt-3.5-turbo-0613",
)
prompt = prompt or ChatPromptTemplate.from_template(
"Use the provided API's to respond to this user query:\n\n{query}"
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs={"functions": openai_fns},
output_parser=JsonOutputFunctionsParser(args_only=False),
output_key="function",
verbose=verbose,
**(llm_chain_kwargs or {}),
)
request_chain = request_chain or SimpleRequestChain(
request_method=lambda name, args: call_api_fn(
name, args, headers=headers, params=params
),
verbose=verbose,
)
return SequentialChain(
chains=[llm_chain, request_chain],
input_variables=llm_chain.input_keys,
output_variables=["response"],
verbose=verbose,
**kwargs,
)
| [
"Use the provided API's to respond to this user query:\n\n{query}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~sqlitevss.py | from __future__ import annotations
import json
import logging
import sqlite3
import warnings
from typing import (
Any,
Iterable,
List,
Optional,
Tuple,
Type,
)
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger(__name__)
class SQLiteVSS(VectorStore):
"""Wrapper around SQLite with vss extension as a vector database.
To use, you should have the ``sqlite-vss`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import SQLiteVSS
from langchain.embeddings.openai import OpenAIEmbeddings
...
"""
def __init__(
self,
table: str,
connection: Optional[sqlite3.Connection],
embedding: Embeddings,
db_file: str = "vss.db",
):
"""Initialize with sqlite client with vss extension."""
try:
import sqlite_vss # noqa # pylint: disable=unused-import
except ImportError:
raise ImportError(
"Could not import sqlite-vss python package. "
"Please install it with `pip install sqlite-vss`."
)
if not connection:
connection = self.create_connection(db_file)
if not isinstance(embedding, Embeddings):
warnings.warn("embeddings input must be Embeddings object.")
self._connection = connection
self._table = table
self._embedding = embedding
self.create_table_if_not_exists()
def create_table_if_not_exists(self) -> None:
self._connection.execute(
f"""
CREATE TABLE IF NOT EXISTS {self._table}
(
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT,
metadata BLOB,
text_embedding BLOB
)
;
"""
)
self._connection.execute(
f"""
CREATE VIRTUAL TABLE IF NOT EXISTS vss_{self._table} USING vss0(
text_embedding({self.get_dimensionality()})
);
"""
)
self._connection.execute(
f"""
CREATE TRIGGER IF NOT EXISTS embed_text
AFTER INSERT ON {self._table}
BEGIN
INSERT INTO vss_{self._table}(rowid, text_embedding)
VALUES (new.rowid, new.text_embedding)
;
END;
"""
)
self._connection.commit()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
max_id = self._connection.execute(
f"SELECT max(rowid) as rowid FROM {self._table}"
).fetchone()["rowid"]
if max_id is None: # no text added yet
max_id = 0
embeds = self._embedding.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
data_input = [
(text, json.dumps(metadata), json.dumps(embed))
for text, metadata, embed in zip(texts, metadatas, embeds)
]
self._connection.executemany(
f"INSERT INTO {self._table}(text, metadata, text_embedding) "
f"VALUES (?,?,?)",
data_input,
)
self._connection.commit()
# pulling every ids we just inserted
results = self._connection.execute(
f"SELECT rowid FROM {self._table} WHERE rowid > {max_id}"
)
return [row["rowid"] for row in results]
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
sql_query = f"""
SELECT
text,
metadata,
distance
FROM {self._table} e
INNER JOIN vss_{self._table} v on v.rowid = e.rowid
WHERE vss_search(
v.text_embedding,
vss_search_params('{json.dumps(embedding)}', {k})
)
"""
cursor = self._connection.cursor()
cursor.execute(sql_query)
results = cursor.fetchall()
documents = []
for row in results:
metadata = json.loads(row["metadata"]) or {}
doc = Document(page_content=row["text"], metadata=metadata)
documents.append((doc, row["distance"]))
return documents
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return [doc for doc, _ in documents]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query."""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return documents
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
documents = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return [doc for doc, _ in documents]
@classmethod
def from_texts(
cls: Type[SQLiteVSS],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
table: str = "langchain",
db_file: str = "vss.db",
**kwargs: Any,
) -> SQLiteVSS:
"""Return VectorStore initialized from texts and embeddings."""
connection = cls.create_connection(db_file)
vss = cls(
table=table, connection=connection, db_file=db_file, embedding=embedding
)
vss.add_texts(texts=texts, metadatas=metadatas)
return vss
@staticmethod
def create_connection(db_file: str) -> sqlite3.Connection:
import sqlite_vss
connection = sqlite3.connect(db_file)
connection.row_factory = sqlite3.Row
connection.enable_load_extension(True)
sqlite_vss.load(connection)
connection.enable_load_extension(False)
return connection
def get_dimensionality(self) -> int:
"""
Function that does a dummy embedding to figure out how many dimensions
this embedding function returns. Needed for the virtual table DDL.
"""
dummy_text = "This is a dummy text"
dummy_embedding = self._embedding.embed_query(dummy_text)
return len(dummy_embedding)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~myscale.py | from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseSettings
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger()
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string contains multiple substrings.
Args:
s: string to check.
*args: substrings to check.
Returns:
True if all substrings are in the string, False otherwise.
"""
for a in args:
if a not in s:
return False
return True
class MyScaleSettings(BaseSettings):
"""MyScale client configuration.
Attribute:
myscale_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (dict): index build parameter.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('L2', 'Cosine', 'IP'). Defaults to 'Cosine'.
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'vector': 'text_embedding',
'text': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8443
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "MSTG"
index_param: Optional[Dict[str, str]] = None
column_map: Dict[str, str] = {
"id": "id",
"text": "text",
"vector": "vector",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "Cosine"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "myscale_"
env_file_encoding = "utf-8"
class MyScale(VectorStore):
"""`MyScale` vector store.
You need a `clickhouse-connect` python package, and a valid account
to connect to MyScale.
MyScale can not only search with simple vector indexes.
It also supports a complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
[myscale official site](https://docs.myscale.com/en/overview/)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[MyScaleSettings] = None,
**kwargs: Any,
) -> None:
"""MyScale Wrapper to LangChain
embedding (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "vector", "text", "metadata"]:
assert k in self.config.column_map
assert self.config.metric.upper() in ["IP", "COSINE", "L2"]
if self.config.metric in ["ip", "cosine", "l2"]:
logger.warning(
"Lower case metric types will be deprecated "
"the future. Please use one of ('IP', 'Cosine', 'L2')"
)
# initialize the schema
dim = len(embedding.embed_query("try this out"))
index_params = (
", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length(\
{self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} \
TYPE {self.config.index_type}(\
'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self._embeddings = embedding
self.dist_order = (
"ASC" if self.config.metric.upper() in ["COSINE", "L2"] else "DESC"
)
# Create a connection to myscale
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
self.client.command("SET allow_experimental_object_type=1")
self.client.command(schema_)
@property
def embeddings(self) -> Embeddings:
return self._embeddings
def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_i_str = self._build_istr(transac, column_names)
self.client.command(_i_str)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["text"]: texts,
colmap_["vector"]: map(self._embeddings.embed_query, texts),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert len(v[keys.index(self.config.column_map["vector"])]) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@classmethod
def from_texts(
cls,
texts: Iterable[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[MyScaleSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> MyScale:
"""Create Myscale wrapper with existing texts
Args:
texts (Iterable[str]): List or tuple of strings to be added
embedding (Embeddings): Function to extract text embedding
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_qstr(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['text']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self._embeddings.embed_query(query), k, where_str, **kwargs
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self._embeddings.embed_query(query), k, where_str)
try:
return [
(
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
def delete(
self,
ids: Optional[List[str]] = None,
where_str: Optional[str] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
assert not (
ids is None and where_str is None
), "You need to specify where to be deleted! Either with `ids` or `where_str`"
conds = []
if ids:
conds.extend([f"{self.config.column_map['id']} = '{id}'" for id in ids])
if where_str:
conds.append(where_str)
assert len(conds) > 0
where_str_final = " AND ".join(conds)
qstr = (
f"DELETE FROM {self.config.database}.{self.config.table} "
f"WHERE {where_str_final}"
)
try:
self.client.command(qstr)
return True
except Exception as e:
logger.error(str(e))
return False
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~clarifai.py | from __future__ import annotations
import logging
import os
import traceback
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Iterable, List, Optional, Tuple
import requests
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger(__name__)
class Clarifai(VectorStore):
"""`Clarifai AI` vector store.
To use, you should have the ``clarifai`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Clarifai
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Clarifai("langchain_store", embeddings.embed_query)
"""
def __init__(
self,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
) -> None:
"""Initialize with Clarifai client.
Args:
user_id (Optional[str], optional): User ID. Defaults to None.
app_id (Optional[str], optional): App ID. Defaults to None.
pat (Optional[str], optional): Personal access token. Defaults to None.
number_of_docs (Optional[int], optional): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str], optional): API base. Defaults to None.
Raises:
ValueError: If user ID, app ID or personal access token is not provided.
"""
try:
from clarifai.auth.helper import DEFAULT_BASE, ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
if api_base is None:
self._api_base = DEFAULT_BASE
self._user_id = user_id or os.environ.get("CLARIFAI_USER_ID")
self._app_id = app_id or os.environ.get("CLARIFAI_APP_ID")
self._pat = pat or os.environ.get("CLARIFAI_PAT")
if self._user_id is None or self._app_id is None or self._pat is None:
raise ValueError(
"Could not find CLARIFAI_USER_ID, CLARIFAI_APP_ID or\
CLARIFAI_PAT in your environment. "
"Please set those env variables with a valid user ID, \
app ID and personal access token \
from https://clarifai.com/settings/security."
)
self._auth = ClarifaiAuthHelper(
user_id=self._user_id,
app_id=self._app_id,
pat=self._pat,
base=self._api_base,
)
self._stub = create_stub(self._auth)
self._userDataObject = self._auth.get_user_app_id_proto()
self._number_of_docs = number_of_docs
def _post_texts_as_inputs(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[str]:
"""Post text to Clarifai and return the ID of the input.
Args:
text (str): Text to post.
metadata (dict): Metadata to post.
Returns:
str: ID of the input.
"""
try:
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf.struct_pb2 import Struct # type: ignore
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
if metadatas is not None:
assert len(list(texts)) == len(
metadatas
), "Number of texts and metadatas should be the same."
inputs = []
for idx, text in enumerate(texts):
if metadatas is not None:
input_metadata = Struct()
input_metadata.update(metadatas[idx])
inputs.append(
resources_pb2.Input(
data=resources_pb2.Data(
text=resources_pb2.Text(raw=text),
metadata=input_metadata,
)
)
)
post_inputs_response = self._stub.PostInputs(
service_pb2.PostInputsRequest(
user_app_id=self._userDataObject,
inputs=inputs,
)
)
if post_inputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_inputs_response.status)
raise Exception(
"Post inputs failed, status: " + post_inputs_response.status.description
)
input_ids = []
for input in post_inputs_response.inputs:
input_ids.append(input.id)
return input_ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
ltexts = list(texts)
length = len(ltexts)
assert length > 0, "No texts provided to add to the vectorstore."
if metadatas is not None:
assert length == len(
metadatas
), "Number of texts and metadatas should be the same."
batch_size = 32
input_ids = []
for idx in range(0, length, batch_size):
try:
batch_texts = ltexts[idx : idx + batch_size]
batch_metadatas = (
metadatas[idx : idx + batch_size] if metadatas else None
)
result_ids = self._post_texts_as_inputs(batch_texts, batch_metadatas)
input_ids.extend(result_ids)
logger.debug(f"Input {result_ids} posted successfully.")
except Exception as error:
logger.warning(f"Post inputs failed: {error}")
traceback.print_exc()
return input_ids
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
try:
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf import json_format # type: ignore
from google.protobuf.struct_pb2 import Struct # type: ignore
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
# Get number of docs to return
if self._number_of_docs is not None:
k = self._number_of_docs
req = service_pb2.PostAnnotationsSearchesRequest(
user_app_id=self._userDataObject,
searches=[
resources_pb2.Search(
query=resources_pb2.Query(
ranks=[
resources_pb2.Rank(
annotation=resources_pb2.Annotation(
data=resources_pb2.Data(
text=resources_pb2.Text(raw=query),
)
)
)
]
)
)
],
pagination=service_pb2.Pagination(page=1, per_page=k),
)
# Add filter by metadata if provided.
if filter is not None:
search_metadata = Struct()
search_metadata.update(filter)
f = req.searches[0].query.filters.add()
f.annotation.data.metadata.update(search_metadata)
post_annotations_searches_response = self._stub.PostAnnotationsSearches(req)
# Check if search was successful
if post_annotations_searches_response.status.code != status_code_pb2.SUCCESS:
raise Exception(
"Post searches failed, status: "
+ post_annotations_searches_response.status.description
)
# Retrieve hits
hits = post_annotations_searches_response.hits
executor = ThreadPoolExecutor(max_workers=10)
def hit_to_document(hit: resources_pb2.Hit) -> Tuple[Document, float]:
metadata = json_format.MessageToDict(hit.input.data.metadata)
h = {"Authorization": f"Key {self._auth.pat}"}
request = requests.get(hit.input.data.text.url, headers=h)
# override encoding by real educated guess as provided by chardet
request.encoding = request.apparent_encoding
requested_text = request.text
logger.debug(
f"\tScore {hit.score:.2f} for annotation: {hit.annotation.id}\
off input: {hit.input.id}, text: {requested_text[:125]}"
)
return (Document(page_content=requested_text, metadata=metadata), hit.score)
# Iterate over hits and retrieve metadata and text
futures = [executor.submit(hit_to_document, hit) for hit in hits]
docs_and_scores = [future.result() for future in futures]
return docs_and_scores
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
pat (Optional[str]): Personal access token. Defaults to None.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str]): API base. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
clarifai_vector_db = cls(
user_id=user_id,
app_id=app_id,
pat=pat,
number_of_docs=number_of_docs,
api_base=api_base,
)
clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas)
return clarifai_vector_db
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
pat (Optional[str]): Personal access token. Defaults to None.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str]): API base. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
user_id=user_id,
app_id=app_id,
texts=texts,
pat=pat,
number_of_docs=number_of_docs,
api_base=api_base,
metadatas=metadatas,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~azureml_endpoint.py | import json
import urllib.request
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import BaseModel, validator
from langchain.utils import get_from_dict_or_env
class AzureMLEndpointClient(object):
"""AzureML Managed Endpoint client."""
def __init__(
self, endpoint_url: str, endpoint_api_key: str, deployment_name: str = ""
) -> None:
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
def call(self, body: bytes, **kwargs: Any) -> bytes:
"""call."""
# The azureml-model-deployment header will force the request to go to a
# specific deployment. Remove this header to have the request observe the
# endpoint traffic rules.
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + self.endpoint_api_key),
}
if self.deployment_name != "":
headers["azureml-model-deployment"] = self.deployment_name
req = urllib.request.Request(self.endpoint_url, body, headers)
response = urllib.request.urlopen(req, timeout=kwargs.get("timeout", 50))
result = response.read()
return result
class ContentFormatterBase:
"""Transform request and response of AzureML endpoint to match with
required schema.
"""
"""
Example:
.. code-block:: python
class ContentFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(
self,
prompt: str,
model_kwargs: Dict
) -> bytes:
input_str = json.dumps(
{
"inputs": {"input_string": [prompt]},
"parameters": model_kwargs,
}
)
return str.encode(input_str)
def format_response_payload(self, output: str) -> str:
response_json = json.loads(output)
return response_json[0]["0"]
"""
content_type: Optional[str] = "application/json"
"""The MIME type of the input data passed to the endpoint"""
accepts: Optional[str] = "application/json"
"""The MIME type of the response data returned from the endpoint"""
@staticmethod
def escape_special_characters(prompt: str) -> str:
"""Escapes any special characters in `prompt`"""
escape_map = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
# Replace each occurrence of the specified characters with escaped versions
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
@abstractmethod
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
@abstractmethod
def format_response_payload(self, output: bytes) -> str:
"""Formats the response body according to the output
schema of the model. Returns the data type that is
received from the response.
"""
class GPT2ContentFormatter(ContentFormatterBase):
"""Content handler for GPT2"""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": {"input_string": [f'"{prompt}"']}, "parameters": model_kwargs}
)
return str.encode(request_payload)
def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["0"]
class OSSContentFormatter(GPT2ContentFormatter):
"""Deprecated: Kept for backwards compatibility
Content handler for LLMs from the OSS catalog."""
content_formatter: Any = None
def __init__(self) -> None:
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
class HFContentFormatter(ContentFormatterBase):
"""Content handler for LLMs from the HuggingFace catalog."""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{"inputs": [f'"{prompt}"'], "parameters": model_kwargs}
)
return str.encode(request_payload)
def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]["generated_text"]
class DollyContentFormatter(ContentFormatterBase):
"""Content handler for the Dolly-v2-12b model"""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {"input_string": [f'"{prompt}"']},
"parameters": model_kwargs,
}
)
return str.encode(request_payload)
def format_response_payload(self, output: bytes) -> str:
return json.loads(output)[0]
class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for LLaMa"""
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according to the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
{
"input_data": {
"input_string": [f'"{prompt}"'],
"parameters": model_kwargs,
}
}
)
return str.encode(request_payload)
def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)[0]["0"]
class AzureMLOnlineEndpoint(LLM, BaseModel):
"""Azure ML Online Endpoint models.
Example:
.. code-block:: python
azure_llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
""" # noqa: E501
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: str = ""
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
deployment_name: str = ""
"""Deployment Name for Endpoint. NOT REQUIRED to call endpoint. Should be passed
to constructor or specified as env var `AZUREML_DEPLOYMENT_NAME`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exists in environment."""
endpoint_key = get_from_dict_or_env(
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
deployment_name = get_from_dict_or_env(
values, "deployment_name", "AZUREML_DEPLOYMENT_NAME", ""
)
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key, deployment_name)
return http_client
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"deployment_name": self.deployment_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter.format_request_payload(
prompt, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~prompts~test_utils.py | """Test functionality related to prompt utils."""
from langchain.prompts.example_selector.semantic_similarity import sorted_values
def test_sorted_vals() -> None:
"""Test sorted values from dictionary."""
test_dict = {"key2": "val2", "key1": "val1"}
expected_response = ["val1", "val2"]
assert sorted_values(test_dict) == expected_response
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~conversational~output_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
text = re.sub(r"Observation:.*", "", text, 0, re.MULTILINE | re.DOTALL)
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~jira.py | """Util that calls Jira."""
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
# TODO: think about error handling, more specific api specs, and jql/project limits
class JiraAPIWrapper(BaseModel):
"""Wrapper for Jira API."""
jira: Any #: :meta private:
confluence: Any
jira_username: Optional[str] = None
jira_api_token: Optional[str] = None
jira_instance_url: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
jira_username = get_from_dict_or_env(values, "jira_username", "JIRA_USERNAME")
values["jira_username"] = jira_username
jira_api_token = get_from_dict_or_env(
values, "jira_api_token", "JIRA_API_TOKEN"
)
values["jira_api_token"] = jira_api_token
jira_instance_url = get_from_dict_or_env(
values, "jira_instance_url", "JIRA_INSTANCE_URL"
)
values["jira_instance_url"] = jira_instance_url
try:
from atlassian import Confluence, Jira
except ImportError:
raise ImportError(
"atlassian-python-api is not installed. "
"Please install it with `pip install atlassian-python-api`"
)
jira = Jira(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=True,
)
confluence = Confluence(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=True,
)
values["jira"] = jira
values["confluence"] = confluence
return values
def parse_issues(self, issues: Dict) -> List[dict]:
parsed = []
for issue in issues["issues"]:
key = issue["key"]
summary = issue["fields"]["summary"]
created = issue["fields"]["created"][0:10]
priority = issue["fields"]["priority"]["name"]
status = issue["fields"]["status"]["name"]
try:
assignee = issue["fields"]["assignee"]["displayName"]
except Exception:
assignee = "None"
rel_issues = {}
for related_issue in issue["fields"]["issuelinks"]:
if "inwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["inward"]
rel_key = related_issue["inwardIssue"]["key"]
rel_summary = related_issue["inwardIssue"]["fields"]["summary"]
if "outwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["outward"]
rel_key = related_issue["outwardIssue"]["key"]
rel_summary = related_issue["outwardIssue"]["fields"]["summary"]
rel_issues = {"type": rel_type, "key": rel_key, "summary": rel_summary}
parsed.append(
{
"key": key,
"summary": summary,
"created": created,
"assignee": assignee,
"priority": priority,
"status": status,
"related_issues": rel_issues,
}
)
return parsed
def parse_projects(self, projects: List[dict]) -> List[dict]:
parsed = []
for project in projects:
id = project["id"]
key = project["key"]
name = project["name"]
type = project["projectTypeKey"]
style = project["style"]
parsed.append(
{"id": id, "key": key, "name": name, "type": type, "style": style}
)
return parsed
def search(self, query: str) -> str:
issues = self.jira.jql(query)
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
def project(self) -> str:
projects = self.jira.projects()
parsed_projects = self.parse_projects(projects)
parsed_projects_str = (
"Found " + str(len(parsed_projects)) + " projects:\n" + str(parsed_projects)
)
return parsed_projects_str
def issue_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.jira.issue_create(fields=dict(params))
def page_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.confluence.create_page(**dict(params))
def other(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
jira_function = getattr(self.jira, params["function"])
return jira_function(*params.get("args", []), **params.get("kwargs", {}))
def run(self, mode: str, query: str) -> str:
if mode == "jql":
return self.search(query)
elif mode == "get_projects":
return self.project()
elif mode == "create_issue":
return self.issue_create(query)
elif mode == "other":
return self.other(query)
elif mode == "create_page":
return self.page_create(query)
else:
raise ValueError(f"Got unexpected mode {mode}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~onedrive.py | """Loads data from OneDrive"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base_o365 import (
O365BaseLoader,
_FileType,
)
from langchain.document_loaders.parsers.registry import get_parser
from langchain.pydantic_v1 import Field
if TYPE_CHECKING:
from O365.drive import Drive, Folder
logger = logging.getLogger(__name__)
class OneDriveLoader(O365BaseLoader):
"""Load from `Microsoft OneDrive`."""
drive_id: str = Field(...)
""" The ID of the OneDrive drive to load data from."""
folder_path: Optional[str] = None
""" The path to the folder to load data from."""
object_ids: Optional[List[str]] = None
""" The IDs of the objects to load data from."""
@property
def _file_types(self) -> Sequence[_FileType]:
"""Return supported file types."""
return _FileType.DOC, _FileType.DOCX, _FileType.PDF
@property
def _scopes(self) -> List[str]:
"""Return required scopes."""
return ["offline_access", "Files.Read.All"]
def _get_folder_from_path(self, drive: Drive) -> Union[Folder, Drive]:
"""
Returns the folder or drive object located at the
specified path relative to the given drive.
Args:
drive (Drive): The root drive from which the folder path is relative.
Returns:
Union[Folder, Drive]: The folder or drive object
located at the specified path.
Raises:
FileNotFoundError: If the path does not exist.
"""
subfolder_drive = drive
if self.folder_path is None:
return subfolder_drive
subfolders = [f for f in self.folder_path.split("/") if f != ""]
if len(subfolders) == 0:
return subfolder_drive
items = subfolder_drive.get_items()
for subfolder in subfolders:
try:
subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0]
items = subfolder_drive.get_items()
except (IndexError, AttributeError):
raise FileNotFoundError("Path {} not exist.".format(self.folder_path))
return subfolder_drive
def lazy_load(self) -> Iterator[Document]:
"""Load documents lazily. Use this when working at a large scale."""
try:
from O365.drive import Drive
except ImportError:
raise ImportError(
"O365 package not found, please install it with `pip install o365`"
)
drive = self._auth().storage().get_drive(self.drive_id)
if not isinstance(drive, Drive):
raise ValueError(f"There isn't a Drive with id {self.drive_id}.")
blob_parser = get_parser("default")
if self.folder_path:
folder = self._get_folder_from_path(drive)
for blob in self._load_from_folder(folder):
yield from blob_parser.lazy_parse(blob)
if self.object_ids:
for blob in self._load_from_object_ids(drive, self.object_ids):
yield from blob_parser.lazy_parse(blob)
def load(self) -> List[Document]:
"""Load all documents."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~anyscale.py | """Wrapper around Anyscale Endpoint"""
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.openai import (
BaseOpenAI,
acompletion_with_retry,
completion_with_retry,
)
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def create_llm_result(
choices: Any, prompts: List[str], token_usage: Dict[str, int], model_name: str
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
choice = choices[i]
generations.append(
[
Generation(
text=choice["message"]["content"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
]
)
llm_output = {"token_usage": token_usage, "model_name": model_name}
return LLMResult(generations=generations, llm_output=llm_output)
class Anyscale(BaseOpenAI):
"""Anyscale large language models.
To use, you should have the environment variable ``ANYSCALE_API_BASE`` and
``ANYSCALE_API_KEY``set with your Anyscale Endpoint, or pass it as a named
parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscalellm = Anyscale(anyscale_api_base="ANYSCALE_API_BASE",
anyscale_api_key="ANYSCALE_API_KEY",
model_name="meta-llama/Llama-2-7b-chat-hf")
# To leverage Ray for parallel processing
@ray.remote(num_cpus=1)
def send_query(llm, text):
resp = llm(text)
return resp
futures = [send_query.remote(anyscalellm, text) for text in texts]
results = ray.get(futures)
"""
"""Key word arguments to pass to the model."""
anyscale_api_base: Optional[str] = None
anyscale_api_key: Optional[str] = None
prefix_messages: List = Field(default_factory=list)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_base"] = get_from_dict_or_env(
values, "anyscale_api_base", "ANYSCALE_API_BASE"
)
values["anyscale_api_key"] = get_from_dict_or_env(
values, "anyscale_api_key", "ANYSCALE_API_KEY"
)
try:
import openai
## Always create ChatComplete client, replacing the legacy Complete client
values["client"] = openai.ChatCompletion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.anyscale_api_key,
"api_base": self.anyscale_api_base,
}
return {**openai_creds, **{"model": self.model_name}, **super()._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Anyscale LLM"
def _get_chat_messages(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"Anyscale currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = self._invocation_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for Chat api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
choices = []
token_usage: Dict[str, int] = {}
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for prompt in prompts:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"message": {"content": generation.text},
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
choices = []
token_usage: Dict[str, int] = {}
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for prompt in prompts:
messages = self.prefix_messages + [{"role": "user", "content": prompt}]
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"message": {"content": generation.text},
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~dashscope.py | from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]:
multiplier = 1
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 1 seconds, then up to 4 seconds, then 4 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
return resp.output["embeddings"]
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return _embed_with_retry(**kwargs)
class DashScopeEmbeddings(BaseModel, Embeddings):
"""DashScope embedding models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key")
Example:
.. code-block:: python
import os
os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY"
from langchain.embeddings.dashscope import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(
model="text-embedding-v1",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
"""The DashScope client."""
model: str = "text-embedding-v1"
dashscope_api_key: Optional[str] = None
max_retries: int = 5
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
import dashscope
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
)
dashscope.api_key = values["dashscope_api_key"]
try:
import dashscope
values["client"] = dashscope.TextEmbedding
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(
self, input=texts, text_type="document", model=self.model
)
embedding_list = [item["embedding"] for item in embeddings]
return embedding_list
def embed_query(self, text: str) -> List[float]:
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(
self, input=text, text_type="query", model=self.model
)[0]["embedding"]
return embedding
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~pipelineai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class PipelineAI(LLM, BaseModel):
"""PipelineAI large language models.
To use, you should have the ``pipeline-ai`` python package installed,
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import PipelineAI
pipeline = PipelineAI(pipeline_key="")
"""
pipeline_key: str = ""
"""The id or tag of the target pipeline"""
pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any pipeline parameters valid for `create` call not
explicitly specified."""
pipeline_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to pipeline_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["pipeline_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = get_from_dict_or_env(
values, "pipeline_api_key", "PIPELINE_API_KEY"
)
values["pipeline_api_key"] = pipeline_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"pipeline_key": self.pipeline_key},
**{"pipeline_kwargs": self.pipeline_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pipeline_ai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ImportError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
)
client = PipelineCloud(token=self.pipeline_api_key)
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f"A pipeline run should have a `result_preview` attribute."
f"Run was: {run}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the pipeline parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.