date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~hologres.py | from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding"
class HologresWrapper:
"""`Hologres API` wrapper."""
def __init__(self, connection_string: str, ndims: int, table_name: str) -> None:
"""Initialize the wrapper.
Args:
connection_string: Hologres connection string.
ndims: Number of dimensions of the embedding output.
table_name: Name of the table to store embeddings and data.
"""
import psycopg2
self.table_name = table_name
self.conn = psycopg2.connect(connection_string)
self.cursor = self.conn.cursor()
self.conn.autocommit = False
self.ndims = ndims
def create_vector_extension(self) -> None:
self.cursor.execute("create extension if not exists proxima")
self.conn.commit()
def create_table(self, drop_if_exist: bool = True) -> None:
if drop_if_exist:
self.cursor.execute(f"drop table if exists {self.table_name}")
self.conn.commit()
self.cursor.execute(
f"""create table if not exists {self.table_name} (
id text,
embedding float4[] check(array_ndims(embedding) = 1 and \
array_length(embedding, 1) = {self.ndims}),
metadata json,
document text);"""
)
self.cursor.execute(
f"call set_table_property('{self.table_name}'"
+ """, 'proxima_vectors',
'{"embedding":{"algorithm":"Graph",
"distance_method":"SquaredEuclidean",
"build_params":{"min_flush_proxima_row_count" : 1,
"min_compaction_proxima_row_count" : 1,
"max_total_size_to_merge_mb" : 2000}}}');"""
)
self.conn.commit()
def get_by_id(self, id: str) -> List[Tuple]:
statement = (
f"select id, embedding, metadata, "
f"document from {self.table_name} where id = %s;"
)
self.cursor.execute(
statement,
(id),
)
self.conn.commit()
return self.cursor.fetchall()
def insert(
self,
embedding: List[float],
metadata: dict,
document: str,
id: Optional[str] = None,
) -> None:
self.cursor.execute(
f'insert into "{self.table_name}" '
f"values (%s, array{json.dumps(embedding)}::float4[], %s, %s)",
(id if id is not None else "null", json.dumps(metadata), document),
)
self.conn.commit()
def query_nearest_neighbours(
self, embedding: List[float], k: int, filter: Optional[Dict[str, str]] = None
) -> List[Tuple[str, str, float]]:
params = []
filter_clause = ""
if filter is not None:
conjuncts = []
for key, val in filter.items():
conjuncts.append("metadata->>%s=%s")
params.append(key)
params.append(val)
filter_clause = "where " + " and ".join(conjuncts)
sql = (
f"select document, metadata::text, "
f"pm_approx_squared_euclidean_distance(array{json.dumps(embedding)}"
f"::float4[], embedding) as distance from"
f" {self.table_name} {filter_clause} order by distance asc limit {k};"
)
self.cursor.execute(sql, tuple(params))
self.conn.commit()
return self.cursor.fetchall()
class Hologres(VectorStore):
"""`Hologres API` vector store.
- `connection_string` is a hologres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `ndims` is the number of dimensions of the embedding output.
- `table_name` is the name of the table to store embeddings and data.
(default: langchain_pg_embedding)
- NOTE: The table will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `pre_delete_table` if True, will delete the table if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
pre_delete_table: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.ndims = ndims
self.table_name = table_name
self.embedding_function = embedding_function
self.pre_delete_table = pre_delete_table
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self.storage = HologresWrapper(
self.connection_string, self.ndims, self.table_name
)
self.create_vector_extension()
self.create_table()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def create_vector_extension(self) -> None:
try:
self.storage.create_vector_extension()
except Exception as e:
self.logger.exception(e)
raise e
def create_table(self) -> None:
self.storage.create_table(self.pre_delete_table)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding_function: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
pre_delete_table: bool = False,
**kwargs: Any,
) -> Hologres:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
embedding_function=embedding_function,
ndims=ndims,
table_name=table_name,
pre_delete_table=pre_delete_table,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: List[dict],
ids: List[str],
**kwargs: Any,
) -> None:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
try:
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
self.storage.insert(embedding, metadata, text, id)
except Exception as e:
self.logger.exception(e)
self.storage.conn.commit()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Hologres with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results: List[Tuple[str, str, float]] = self.storage.query_nearest_neighbours(
embedding, k, filter
)
docs = [
(
Document(
page_content=result[0],
metadata=json.loads(result[1]),
),
result[2],
)
for result in results
]
return docs
@classmethod
def from_texts(
cls: Type[Hologres],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
ids: Optional[List[str]] = None,
pre_delete_table: bool = False,
**kwargs: Any,
) -> Hologres:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
ndims=ndims,
table_name=table_name,
pre_delete_table=pre_delete_table,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
ids: Optional[List[str]] = None,
pre_delete_table: bool = False,
**kwargs: Any,
) -> Hologres:
"""Construct Hologres wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import Hologres
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
ndims=ndims,
table_name=table_name,
pre_delete_table=pre_delete_table,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[Hologres],
embedding: Embeddings,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
pre_delete_table: bool = False,
**kwargs: Any,
) -> Hologres:
"""
Get instance of an existing Hologres store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
ndims=ndims,
table_name=table_name,
embedding_function=embedding,
pre_delete_table=pre_delete_table,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="HOLOGRES_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the HOLOGRES_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[Hologres],
documents: List[Document],
embedding: Embeddings,
ndims: int = ADA_TOKEN_COUNT,
table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> Hologres:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
metadatas=metadatas,
ids=ids,
ndims=ndims,
table_name=table_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return (
f"dbname={database} user={user} password={password} host={host} port={port}"
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~chat_message_histories~momento.py | from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
@classmethod
def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
api_key: Optional[str] = None,
auth_token: Optional[str] = None, # for backwards compatibility
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
# Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility
try:
api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
except ValueError:
api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY")
credentials = CredentialProvider.from_string(api_key)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message), ensure_ascii=False)
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~self_hosted.py | from typing import Any, Callable, List
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.pydantic_v1 import Extra
from langchain.schema.embeddings import Embeddings
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~scann.py | from __future__ import annotations
import operator
import pickle
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import DistanceStrategy
def normalize(x: np.ndarray) -> np.ndarray:
"""Normalize vectors to unit length."""
x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None)
return x
def dependable_scann_import() -> Any:
"""
Import `scann` if available, otherwise raise error.
"""
try:
import scann
except ImportError:
raise ImportError(
"Could not import scann python package. "
"Please install it with `pip install scann` "
)
return scann
class ScaNN(VectorStore):
"""`ScaNN` vector store.
To use, you should have the ``scann`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import ScaNN
db = ScaNN.from_texts(
['foo', 'bar', 'barz', 'qux'],
HuggingFaceEmbeddings())
db.similarity_search('foo?', k=1)
"""
def __init__(
self,
embedding: Embeddings,
index: Any,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
relevance_score_fn: Optional[Callable[[float], float]] = None,
normalize_L2: bool = False,
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
scann_config: Optional[str] = None,
):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
self.distance_strategy = distance_strategy
self.override_relevance_score_fn = relevance_score_fn
self._normalize_L2 = normalize_L2
self._scann_config = scann_config
def __add(
self,
texts: Iterable[str],
embeddings: Iterable[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
raise NotImplementedError("Updates are not available in ScaNN, yet.")
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents.
embeddings = self.embedding.embed_documents(list(texts))
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
# Embed and create the documents.
texts, embeddings = zip(*text_embeddings)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
raise NotImplementedError("Deletions are not available in ScaNN, yet.")
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
vector = normalize(vector)
indices, scores = self.index.search_batched(
vector, k if filter is None else fetch_k
)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
if filter is not None:
filter = {
key: [value] if not isinstance(value, list) else value
for key, value in filter.items()
}
if all(doc.metadata.get(key) in value for key, value in filter.items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get("score_threshold")
if score_threshold is not None:
cmp = (
operator.ge
if self.distance_strategy
in (DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.JACCARD)
else operator.le
)
docs = [
(doc, similarity)
for doc, similarity in docs
if cmp(similarity, score_threshold)
]
return docs[:k]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of documents most similar to the query text with
L2 distance in float. Lower score represents more similarity.
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(
query, k, filter=filter, fetch_k=fetch_k, **kwargs
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
normalize_L2: bool = False,
**kwargs: Any,
) -> ScaNN:
scann = dependable_scann_import()
distance_strategy = kwargs.get(
"distance_strategy", DistanceStrategy.EUCLIDEAN_DISTANCE
)
scann_config = kwargs.get("scann_config", None)
vector = np.array(embeddings, dtype=np.float32)
if normalize_L2:
vector = normalize(vector)
if scann_config is not None:
index = scann.scann_ops_pybind.create_searcher(vector, scann_config)
else:
if distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
index = (
scann.scann_ops_pybind.builder(vector, 1, "dot_product")
.score_brute_force()
.build()
)
else:
# Default to L2, currently other metric types not initialized.
index = (
scann.scann_ops_pybind.builder(vector, 1, "squared_l2")
.score_brute_force()
.build()
)
documents = []
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = dict(enumerate(ids))
if len(index_to_id) != len(documents):
raise Exception(
f"{len(index_to_id)} ids provided for {len(documents)} documents."
" Each document should have an id."
)
docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents)))
return cls(
embedding,
index,
docstore,
index_to_id,
normalize_L2=normalize_L2,
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ScaNN:
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import ScaNN
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
scann = ScaNN.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ScaNN:
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import ScaNN
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
)
def save_local(self, folder_path: str, index_name: str = "index") -> None:
"""Save ScaNN index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
"""
path = Path(folder_path)
scann_path = path / "{index_name}.scann".format(index_name=index_name)
scann_path.mkdir(exist_ok=True, parents=True)
# save index separately since it is not picklable
self.index.serialize(str(scann_path))
# save docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f:
pickle.dump((self.docstore, self.index_to_docstore_id), f)
@classmethod
def load_local(
cls,
folder_path: str,
embedding: Embeddings,
index_name: str = "index",
**kwargs: Any,
) -> ScaNN:
"""Load ScaNN index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
"""
path = Path(folder_path)
scann_path = path / "{index_name}.scann".format(index_name=index_name)
scann_path.mkdir(exist_ok=True, parents=True)
# load index separately since it is not picklable
scann = dependable_scann_import()
index = scann.scann_ops_pybind.load_searcher(str(scann_path))
# load docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f:
docstore, index_to_docstore_id = pickle.load(f)
return cls(embedding, index, docstore, index_to_docstore_id, **kwargs)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided in
# vectorstore constructor
if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
# Default behavior is to use euclidean distance relevancy
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, max_inner_product,"
" or euclidean"
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and their similarity scores on a scale from 0 to 1."""
# Pop score threshold so that only relevancy scores, not raw scores, are
# filtered.
score_threshold = kwargs.pop("score_threshold", None)
relevance_score_fn = self._select_relevance_score_fn()
if relevance_score_fn is None:
raise ValueError(
"normalize_score_fn must be provided to"
" ScaNN constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
docs_and_rel_scores = [
(doc, relevance_score_fn(score)) for doc, score in docs_and_scores
]
if score_threshold is not None:
docs_and_rel_scores = [
(doc, similarity)
for doc, similarity in docs_and_rel_scores
if similarity >= score_threshold
]
return docs_and_rel_scores
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_transformers~doctran_text_extract.py | from typing import Any, List, Optional, Sequence
from langchain.schema import BaseDocumentTransformer, Document
from langchain.utils import get_from_env
class DoctranPropertyExtractor(BaseDocumentTransformer):
"""Extract properties from text documents using doctran.
Arguments:
properties: A list of the properties to extract.
openai_api_key: OpenAI API key. Can also be specified via environment variable
``OPENAI_API_KEY``.
Example:
.. code-block:: python
from langchain.document_transformers import DoctranPropertyExtractor
properties = [
{
"name": "category",
"description": "What type of email this is.",
"type": "string",
"enum": ["update", "action_item", "customer_feedback", "announcement", "other"],
"required": True,
},
{
"name": "mentions",
"description": "A list of all people mentioned in this email.",
"type": "array",
"items": {
"name": "full_name",
"description": "The full name of the person mentioned.",
"type": "string",
},
"required": True,
},
{
"name": "eli5",
"description": "Explain this email to me like I'm 5 years old.",
"type": "string",
"required": True,
},
]
# Pass in openai_api_key or set env var OPENAI_API_KEY
property_extractor = DoctranPropertyExtractor(properties)
transformed_document = await qa_transformer.atransform_documents(documents)
""" # noqa: E501
def __init__(
self,
properties: List[dict],
openai_api_key: Optional[str] = None,
openai_api_model: Optional[str] = None,
) -> None:
self.properties = properties
self.openai_api_key = openai_api_key or get_from_env(
"openai_api_key", "OPENAI_API_KEY"
)
self.openai_api_model = openai_api_model or get_from_env(
"openai_api_model", "OPENAI_API_MODEL"
)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Extracts properties from text documents using doctran."""
try:
from doctran import Doctran, ExtractProperty
doctran = Doctran(
openai_api_key=self.openai_api_key, openai_model=self.openai_api_model
)
except ImportError:
raise ImportError(
"Install doctran to use this parser. (pip install doctran)"
)
properties = [ExtractProperty(**property) for property in self.properties]
for d in documents:
doctran_doc = (
await doctran.parse(content=d.page_content)
.extract(properties=properties)
.execute()
)
d.metadata["extracted_properties"] = doctran_doc.extracted_properties
return documents
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~pupmed.py | from langchain.retrievers.pubmed import PubMedRetriever
__all__ = [
"PubMedRetriever",
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~openai_functions~tagging.py | from typing import Any, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs
from langchain.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain.prompts import ChatPromptTemplate
from langchain.schema.language_model import BaseLanguageModel
def _get_tagging_function(schema: dict) -> dict:
return {
"name": "information_extraction",
"description": "Extracts the relevant information from the passage.",
"parameters": _convert_schema(schema),
}
_TAGGING_TEMPLATE = """Extract the desired information from the following passage.
Only extract the properties mentioned in the 'information_extraction' function.
Passage:
{input}
"""
def create_tagging_chain(
schema: dict,
llm: BaseLanguageModel,
prompt: Optional[ChatPromptTemplate] = None,
**kwargs: Any
) -> Chain:
"""Creates a chain that extracts information from a passage
based on a schema.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
function = _get_tagging_function(schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = JsonOutputFunctionsParser()
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
**kwargs,
)
return chain
def create_tagging_chain_pydantic(
pydantic_schema: Any,
llm: BaseLanguageModel,
prompt: Optional[ChatPromptTemplate] = None,
**kwargs: Any
) -> Chain:
"""Creates a chain that extracts information from a passage
based on a pydantic schema.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
openai_schema = pydantic_schema.schema()
function = _get_tagging_function(openai_schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=output_parser,
**kwargs,
)
return chain
| [
"information_extraction",
"Extract the desired information from the following passage.\n\nOnly extract the properties mentioned in the 'information_extraction' function.\n\nPassage:\n{input}\n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~alpha_vantage.py | """Util that calls AlphaVantage for Currency Exchange Rate."""
from typing import Any, Dict, List, Optional
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlphaVantageAPIWrapper(BaseModel):
"""Wrapper for AlphaVantage API for Currency Exchange Rate.
Docs for using:
1. Go to AlphaVantage and sign up for an API key
2. Save your API KEY into ALPHAVANTAGE_API_KEY env variable
"""
alphavantage_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["alphavantage_api_key"] = get_from_dict_or_env(
values, "alphavantage_api_key", "ALPHAVANTAGE_API_KEY"
)
return values
def _get_exchange_rate(
self, from_currency: str, to_currency: str
) -> Dict[str, Any]:
"""Make a request to the AlphaVantage API to get the exchange rate."""
response = requests.get(
"https://www.alphavantage.co/query/",
params={
"function": "CURRENCY_EXCHANGE_RATE",
"from_currency": from_currency,
"to_currency": to_currency,
"apikey": self.alphavantage_api_key,
},
)
response.raise_for_status()
data = response.json()
if "Error Message" in data:
raise ValueError(f"API Error: {data['Error Message']}")
return data
@property
def standard_currencies(self) -> List[str]:
return ["USD", "EUR", "GBP", "JPY", "CHF", "CAD", "AUD", "NZD"]
def run(self, from_currency: str, to_currency: str) -> str:
"""Get the current exchange rate for a specified currency pair."""
if to_currency not in self.standard_currencies:
from_currency, to_currency = to_currency, from_currency
data = self._get_exchange_rate(from_currency, to_currency)
return data["Realtime Currency Exchange Rate"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~awa.py | from typing import Any, Dict, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
class AwaEmbeddings(BaseModel, Embeddings):
"""Embedding documents and queries with Awa DB.
Attributes:
client: The AwaEmbedding client.
model: The name of the model used for embedding.
Default is "all-mpnet-base-v2".
"""
client: Any #: :meta private:
model: str = "all-mpnet-base-v2"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that awadb library is installed."""
try:
from awadb import AwaEmbedding
except ImportError as exc:
raise ImportError(
"Could not import awadb library. "
"Please install it with `pip install awadb`"
) from exc
values["client"] = AwaEmbedding()
return values
def set_model(self, model_name: str) -> None:
"""Set the model used for embedding.
The default model used is all-mpnet-base-v2
Args:
model_name: A string which represents the name of model.
"""
self.model = model_name
self.client.model_name = model_name
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text.
"""
return self.client.EmbeddingBatch(texts)
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.Embedding(text)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~notiondb.py | from typing import Any, Dict, List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
class NotionDBLoader(BaseLoader):
"""Load from `Notion DB`.
Reads content from pages within a Notion Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
request_timeout_sec (int): Timeout for Notion requests in seconds.
Defaults to 10.
"""
def __init__(
self,
integration_token: str,
database_id: str,
request_timeout_sec: Optional[int] = 10,
) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.request_timeout_sec = request_timeout_sec
def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
page_summaries = self._retrieve_page_summaries()
return list(self.load_page(page_summary) for page_summary in page_summaries)
def _retrieve_page_summaries(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[Dict[str, Any]]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
return pages
def load_page(self, page_summary: Dict[str, Any]) -> Document:
"""Read a page.
Args:
page_summary: Page summary from Notion API.
"""
page_id = page_summary["id"]
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
else []
)
elif prop_type == "url":
value = prop_data["url"]
elif prop_type == "unique_id":
value = (
f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}'
if prop_data["unique_id"]
else None
)
elif prop_type == "status":
value = prop_data["status"]["name"] if prop_data["status"] else None
elif prop_type == "people":
value = (
[item["name"] for item in prop_data["people"]]
if prop_data["people"]
else []
)
elif prop_type == "date":
value = prop_data["date"] if prop_data["date"] else None
elif prop_type == "last_edited_time":
value = (
prop_data["last_edited_time"]
if prop_data["last_edited_time"]
else None
)
elif prop_type == "created_time":
value = prop_data["created_time"] if prop_data["created_time"] else None
else:
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {}
) -> Any:
res = requests.request(
method,
url,
headers=self.headers,
json=query_dict,
timeout=self.request_timeout_sec,
)
res.raise_for_status()
return res.json()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~document_loaders~test_unstructured.py | import os
from contextlib import ExitStack
from pathlib import Path
from langchain.document_loaders import (
UnstructuredAPIFileIOLoader,
UnstructuredAPIFileLoader,
UnstructuredFileLoader,
)
EXAMPLE_DOCS_DIRECTORY = str(Path(__file__).parent.parent / "examples/")
def test_unstructured_loader_with_post_processor() -> None:
def add_the_end(text: str) -> str:
return text + "THE END!"
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
loader = UnstructuredFileLoader(
file_path=file_path,
post_processors=[add_the_end],
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
assert docs[0].page_content.endswith("THE END!")
def test_unstructured_api_file_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
loader = UnstructuredAPIFileLoader(
file_path=file_path,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_loader_multiple_files() -> None:
"""Test unstructured loader."""
file_paths = [
os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf"),
os.path.join(EXAMPLE_DOCS_DIRECTORY, "whatsapp_chat.txt"),
]
loader = UnstructuredAPIFileLoader(
file_path=file_paths,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_io_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf")
with open(file_path, "rb") as f:
loader = UnstructuredAPIFileIOLoader(
file=f,
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
file_filename=file_path,
)
docs = loader.load()
assert len(docs) > 1
def test_unstructured_api_file_loader_io_multiple_files() -> None:
"""Test unstructured loader."""
file_paths = [
os.path.join(EXAMPLE_DOCS_DIRECTORY, "layout-parser-paper.pdf"),
os.path.join(EXAMPLE_DOCS_DIRECTORY, "whatsapp_chat.txt"),
]
with ExitStack() as stack:
files = [stack.enter_context(open(file_path, "rb")) for file_path in file_paths]
loader = UnstructuredAPIFileIOLoader(
file=files, # type: ignore
api_key="FAKE_API_KEY",
strategy="fast",
mode="elements",
file_filenames=file_paths,
)
docs = loader.load()
assert len(docs) > 1
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~tools~file_management~test_utils.py | """Test the File Management utils."""
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from langchain.tools.file_management.utils import (
FileValidationError,
get_validated_relative_path,
)
def test_get_validated_relative_path_errs_on_absolute() -> None:
"""Safely resolve a path."""
root = Path(__file__).parent
user_path = "/bin/bash"
matches = f"Path {user_path} is outside of the allowed directory {root}"
with pytest.raises(FileValidationError, match=matches):
get_validated_relative_path(root, user_path)
def test_get_validated_relative_path_errs_on_parent_dir() -> None:
"""Safely resolve a path."""
root = Path(__file__).parent
user_path = "data/sub/../../../sibling"
matches = f"Path {user_path} is outside of the allowed directory {root}"
with pytest.raises(FileValidationError, match=matches):
get_validated_relative_path(root, user_path)
def test_get_validated_relative_path() -> None:
"""Safely resolve a path."""
root = Path(__file__).parent
user_path = "data/sub/file.txt"
expected = root / user_path
result = get_validated_relative_path(root, user_path)
assert result == expected
def test_get_validated_relative_path_errs_for_symlink_outside_root() -> None:
"""Test that symlink pointing outside of root directory is not allowed."""
with TemporaryDirectory() as temp_dir:
root = Path(temp_dir)
user_path = "symlink_outside_root"
outside_path = Path("/bin/bash")
symlink_path = root / user_path
symlink_path.symlink_to(outside_path)
matches = (
f"Path {user_path} is outside of the allowed directory {root.resolve()}"
)
with pytest.raises(FileValidationError, match=matches):
get_validated_relative_path(root, user_path)
symlink_path.unlink()
def test_get_validated_relative_path_for_symlink_inside_root() -> None:
"""Test that symlink pointing inside the root directory is allowed."""
with TemporaryDirectory() as temp_dir:
root = Path(temp_dir)
user_path = "symlink_inside_root"
target_path = "data/sub/file.txt"
symlink_path = root / user_path
target_path_ = root / target_path
symlink_path.symlink_to(target_path_)
expected = target_path_.resolve()
result = get_validated_relative_path(root, user_path)
assert result == expected
symlink_path.unlink()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~opensearch_vector_search.py | from __future__ import annotations
import uuid
import warnings
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import maximal_marginal_relevance
IMPORT_OPENSEARCH_PY_ERROR = (
"Could not import OpenSearch. Please install it with `pip install opensearch-py`."
)
SCRIPT_SCORING_SEARCH = "script_scoring"
PAINLESS_SCRIPTING_SEARCH = "painless_scripting"
MATCH_ALL_QUERY = {"match_all": {}} # type: Dict
def _import_opensearch() -> Any:
"""Import OpenSearch if available, otherwise raise error."""
try:
from opensearchpy import OpenSearch
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return OpenSearch
def _import_bulk() -> Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
def _import_not_found_error() -> Any:
"""Import not found error if available, otherwise raise error."""
try:
from opensearchpy.exceptions import NotFoundError
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return NotFoundError
def _get_opensearch_client(opensearch_url: str, **kwargs: Any) -> Any:
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url, **kwargs)
except ValueError as e:
raise ImportError(
f"OpenSearch client string provided is not in proper format. "
f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
raise RuntimeError(
f"The embeddings count, {embeddings_length} is more than the "
f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]."
)
def _validate_aoss_with_engines(is_aoss: bool, engine: str) -> None:
"""Validate AOSS with the engine."""
if is_aoss and engine != "nmslib" and engine != "faiss":
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `nmslib` or `faiss` engines"
)
def _is_aoss_enabled(http_auth: Any) -> bool:
"""Check if the service is http_auth is set as `aoss`."""
if (
http_auth is not None
and hasattr(http_auth, "service")
and http_auth.service == "aoss"
):
return True
return False
def _bulk_ingest_embeddings(
client: Any,
index_name: str,
embeddings: List[List[float]],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
vector_field: str = "vector_field",
text_field: str = "text",
mapping: Optional[Dict] = None,
max_chunk_bytes: Optional[int] = 1 * 1024 * 1024,
is_aoss: bool = False,
) -> List[str]:
"""Bulk Ingest Embeddings into given index."""
if not mapping:
mapping = dict()
bulk = _import_bulk()
not_found_error = _import_not_found_error()
requests = []
return_ids = []
mapping = mapping
try:
client.indices.get(index=index_name)
except not_found_error:
client.indices.create(index=index_name, body=mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": index_name,
vector_field: embeddings[i],
text_field: text,
"metadata": metadata,
}
if is_aoss:
request["id"] = _id
else:
request["_id"] = _id
requests.append(request)
return_ids.append(_id)
bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
if not is_aoss:
client.indices.refresh(index=index_name)
return return_ids
def _default_scripting_text_mapping(
dim: int,
vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {
"mappings": {
"properties": {
vector_field: {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_mapping(
dim: int,
engine: str = "nmslib",
space_type: str = "l2",
ef_search: int = 512,
ef_construction: int = 512,
m: int = 16,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, this is the default mapping to create index."""
return {
"settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}},
"mappings": {
"properties": {
vector_field: {
"type": "knn_vector",
"dimension": dim,
"method": {
"name": "hnsw",
"space_type": space_type,
"engine": engine,
"parameters": {"ef_construction": ef_construction, "m": m},
},
}
}
},
}
def _default_approximate_search_query(
query_vector: List[float],
k: int = 4,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, this is the default query."""
return {
"size": k,
"query": {"knn": {vector_field: {"vector": query_vector, "k": k}}},
}
def _approximate_search_query_with_boolean_filter(
query_vector: List[float],
boolean_filter: Dict,
k: int = 4,
vector_field: str = "vector_field",
subquery_clause: str = "must",
) -> Dict:
"""For Approximate k-NN Search, with Boolean Filter."""
return {
"size": k,
"query": {
"bool": {
"filter": boolean_filter,
subquery_clause: [
{"knn": {vector_field: {"vector": query_vector, "k": k}}}
],
}
},
}
def _approximate_search_query_with_efficient_filter(
query_vector: List[float],
efficient_filter: Dict,
k: int = 4,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, with Efficient Filter for Lucene and
Faiss Engines."""
search_query = _default_approximate_search_query(
query_vector, k=k, vector_field=vector_field
)
search_query["query"]["knn"][vector_field]["filter"] = efficient_filter
return search_query
def _default_script_query(
query_vector: List[float],
k: int = 4,
space_type: str = "l2",
pre_filter: Optional[Dict] = None,
vector_field: str = "vector_field",
) -> Dict:
"""For Script Scoring Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
return {
"size": k,
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
"field": vector_field,
"query_value": query_vector,
"space_type": space_type,
},
},
}
},
}
def __get_painless_scripting_source(
space_type: str, vector_field: str = "vector_field"
) -> str:
"""For Painless Scripting, it returns the script source based on space type."""
source_value = (
"(1.0 + " + space_type + "(params.query_value, doc['" + vector_field + "']))"
)
if space_type == "cosineSimilarity":
return source_value
else:
return "1/" + source_value
def _default_painless_scripting_query(
query_vector: List[float],
k: int = 4,
space_type: str = "l2Squared",
pre_filter: Optional[Dict] = None,
vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
source = __get_painless_scripting_source(space_type, vector_field=vector_field)
return {
"size": k,
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": source,
"params": {
"field": vector_field,
"query_value": query_vector,
},
},
}
},
}
def _get_kwargs_value(kwargs: Any, key: str, default_value: Any) -> Any:
"""Get the value of the key if present. Else get the default_value."""
if key in kwargs:
return kwargs.get(key)
return default_value
class OpenSearchVectorSearch(VectorStore):
"""`Amazon OpenSearch Vector Engine` vector store.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
opensearch_vector_search = OpenSearchVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self,
opensearch_url: str,
index_name: str,
embedding_function: Embeddings,
**kwargs: Any,
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
http_auth = _get_kwargs_value(kwargs, "http_auth", None)
self.is_aoss = _is_aoss_enabled(http_auth=http_auth)
self.client = _get_opensearch_client(opensearch_url, **kwargs)
self.engine = _get_kwargs_value(kwargs, "engine", None)
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def __add(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
index_name = _get_kwargs_value(kwargs, "index_name", self.index_name)
text_field = _get_kwargs_value(kwargs, "text_field", "text")
dim = len(embeddings[0])
engine = _get_kwargs_value(kwargs, "engine", "nmslib")
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
ef_search = _get_kwargs_value(kwargs, "ef_search", 512)
ef_construction = _get_kwargs_value(kwargs, "ef_construction", 512)
m = _get_kwargs_value(kwargs, "m", 16)
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
max_chunk_bytes = _get_kwargs_value(kwargs, "max_chunk_bytes", 1 * 1024 * 1024)
_validate_aoss_with_engines(self.is_aoss, engine)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m, vector_field
)
return _bulk_ingest_embeddings(
self.client,
index_name,
embeddings,
texts,
metadatas=metadatas,
ids=ids,
vector_field=vector_field,
text_field=text_field,
mapping=mapping,
max_chunk_bytes=max_chunk_bytes,
is_aoss=self.is_aoss,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.__add(
texts,
embeddings,
metadatas=metadatas,
ids=ids,
bulk_size=bulk_size,
kwargs=kwargs,
)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(
list(texts),
list(embeddings),
metadatas=metadatas,
ids=ids,
bulk_size=bulk_size,
kwargs=kwargs,
)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
boolean_filter: A Boolean filter is a post filter consists of a Boolean
query that contains a k-NN query and a filter.
subquery_clause: Query clause on the knn vector field; default: "must"
lucene_filter: the Lucene algorithm decides whether to perform an exact
k-NN search with pre-filtering or an approximate search with modified
post-filtering. (deprecated, use `efficient_filter`)
efficient_filter: the Lucene Engine or Faiss Engine decides whether to
perform an exact k-NN search with pre-filtering or an approximate search
with modified post-filtering.
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
"""
docs_with_scores = self.similarity_search_with_score(query, k, **kwargs)
return [doc[0] for doc in docs_with_scores]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs and it's scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents along with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
text_field = _get_kwargs_value(kwargs, "text_field", "text")
metadata_field = _get_kwargs_value(kwargs, "metadata_field", "metadata")
hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs)
documents_with_scores = [
(
Document(
page_content=hit["_source"][text_field],
metadata=hit["_source"]
if metadata_field == "*" or metadata_field not in hit["_source"]
else hit["_source"][metadata_field],
),
hit["_score"],
)
for hit in hits
]
return documents_with_scores
def _raw_similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[dict]:
"""Return raw opensearch documents (dict) including vectors,
scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of dict with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
embedding = self.embedding_function.embed_query(query)
search_type = _get_kwargs_value(kwargs, "search_type", "approximate_search")
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
index_name = _get_kwargs_value(kwargs, "index_name", self.index_name)
filter = _get_kwargs_value(kwargs, "filter", {})
if (
self.is_aoss
and search_type != "approximate_search"
and search_type != SCRIPT_SCORING_SEARCH
):
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `approximate_search` and `script_scoring`"
)
if search_type == "approximate_search":
boolean_filter = _get_kwargs_value(kwargs, "boolean_filter", {})
subquery_clause = _get_kwargs_value(kwargs, "subquery_clause", "must")
efficient_filter = _get_kwargs_value(kwargs, "efficient_filter", {})
# `lucene_filter` is deprecated, added for Backwards Compatibility
lucene_filter = _get_kwargs_value(kwargs, "lucene_filter", {})
if boolean_filter != {} and efficient_filter != {}:
raise ValueError(
"Both `boolean_filter` and `efficient_filter` are provided which "
"is invalid"
)
if lucene_filter != {} and efficient_filter != {}:
raise ValueError(
"Both `lucene_filter` and `efficient_filter` are provided which "
"is invalid. `lucene_filter` is deprecated"
)
if lucene_filter != {} and boolean_filter != {}:
raise ValueError(
"Both `lucene_filter` and `boolean_filter` are provided which "
"is invalid. `lucene_filter` is deprecated"
)
if (
efficient_filter == {}
and boolean_filter == {}
and lucene_filter == {}
and filter != {}
):
if self.engine in ["faiss", "lucene"]:
efficient_filter = filter
else:
boolean_filter = filter
if boolean_filter != {}:
search_query = _approximate_search_query_with_boolean_filter(
embedding,
boolean_filter,
k=k,
vector_field=vector_field,
subquery_clause=subquery_clause,
)
elif efficient_filter != {}:
search_query = _approximate_search_query_with_efficient_filter(
embedding, efficient_filter, k=k, vector_field=vector_field
)
elif lucene_filter != {}:
warnings.warn(
"`lucene_filter` is deprecated. Please use the keyword argument"
" `efficient_filter`"
)
search_query = _approximate_search_query_with_efficient_filter(
embedding, lucene_filter, k=k, vector_field=vector_field
)
else:
search_query = _default_approximate_search_query(
embedding, k=k, vector_field=vector_field
)
elif search_type == SCRIPT_SCORING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
search_query = _default_script_query(
embedding, k, space_type, pre_filter, vector_field
)
elif search_type == PAINLESS_SCRIPTING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2Squared")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
search_query = _default_painless_scripting_query(
embedding, k, space_type, pre_filter, vector_field
)
else:
raise ValueError("Invalid `search_type` provided as an argument")
response = self.client.search(index=index_name, body=search_query)
return [hit for hit in response["hits"]["hits"]]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
text_field = _get_kwargs_value(kwargs, "text_field", "text")
metadata_field = _get_kwargs_value(kwargs, "metadata_field", "metadata")
# Get embedding of the user query
embedding = self.embedding_function.embed_query(query)
# Do ANN/KNN search to get top fetch_k results where fetch_k >= k
results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs)
embeddings = [result["_source"][vector_field] for result in results]
# Rerank top k results using MMR, (mmr_selected is a list of indices)
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
Document(
page_content=results[i]["_source"][text_field],
metadata=results[i]["_source"][metadata_field],
)
for i in mmr_selected
]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from raw texts.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
opensearch_vector_search = OpenSearchVectorSearch.from_texts(
texts,
embeddings,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
embeddings = embedding.embed_documents(texts)
return cls.from_embeddings(
embeddings,
texts,
embedding,
metadatas=metadatas,
bulk_size=bulk_size,
ids=ids,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
embeddings: List[List[float]],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings.
Example:
.. code-block:: python
from langchain.vectorstores import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedder = OpenAIEmbeddings()
embeddings = embedder.embed_documents(["foo", "bar"])
opensearch_vector_search = OpenSearchVectorSearch.from_embeddings(
embeddings,
texts,
embedder,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(
kwargs, "opensearch_url", "OPENSEARCH_URL"
)
# List of arguments that needs to be removed from kwargs
# before passing kwargs to get opensearch client
keys_list = [
"opensearch_url",
"index_name",
"is_appx_search",
"vector_field",
"text_field",
"engine",
"space_type",
"ef_search",
"ef_construction",
"m",
"max_chunk_bytes",
"is_aoss",
]
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
# Get the index name from either from kwargs or ENV Variable
# before falling back to random generation
index_name = get_from_dict_or_env(
kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex
)
is_appx_search = _get_kwargs_value(kwargs, "is_appx_search", True)
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
text_field = _get_kwargs_value(kwargs, "text_field", "text")
max_chunk_bytes = _get_kwargs_value(kwargs, "max_chunk_bytes", 1 * 1024 * 1024)
http_auth = _get_kwargs_value(kwargs, "http_auth", None)
is_aoss = _is_aoss_enabled(http_auth=http_auth)
engine = None
if is_aoss and not is_appx_search:
raise ValueError(
"Amazon OpenSearch Service Serverless only "
"supports `approximate_search`"
)
if is_appx_search:
engine = _get_kwargs_value(kwargs, "engine", "nmslib")
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
ef_search = _get_kwargs_value(kwargs, "ef_search", 512)
ef_construction = _get_kwargs_value(kwargs, "ef_construction", 512)
m = _get_kwargs_value(kwargs, "m", 16)
_validate_aoss_with_engines(is_aoss, engine)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m, vector_field
)
else:
mapping = _default_scripting_text_mapping(dim)
[kwargs.pop(key, None) for key in keys_list]
client = _get_opensearch_client(opensearch_url, **kwargs)
_bulk_ingest_embeddings(
client,
index_name,
embeddings,
texts,
ids=ids,
metadatas=metadatas,
vector_field=vector_field,
text_field=text_field,
mapping=mapping,
max_chunk_bytes=max_chunk_bytes,
is_aoss=is_aoss,
)
kwargs["engine"] = engine
return cls(opensearch_url, index_name, embedding, **kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~runnable~branch.py | from typing import (
Any,
Awaitable,
Callable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from langchain.load.dump import dumpd
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable.base import (
Runnable,
RunnableLike,
RunnableSerializable,
coerce_to_runnable,
)
from langchain.schema.runnable.config import (
RunnableConfig,
ensure_config,
get_callback_manager_for_config,
patch_config,
)
from langchain.schema.runnable.utils import (
ConfigurableFieldSpec,
Input,
Output,
get_unique_config_specs,
)
class RunnableBranch(RunnableSerializable[Input, Output]):
"""A Runnable that selects which branch to run based on a condition.
The runnable is initialized with a list of (condition, runnable) pairs and
a default branch.
When operating on an input, the first condition that evaluates to True is
selected, and the corresponding runnable is run on the input.
If no condition evaluates to True, the default branch is run on the input.
Examples:
.. code-block:: python
from langchain.schema.runnable import RunnableBranch
branch = RunnableBranch(
(lambda x: isinstance(x, str), lambda x: x.upper()),
(lambda x: isinstance(x, int), lambda x: x + 1),
(lambda x: isinstance(x, float), lambda x: x * 2),
lambda x: "goodbye",
)
branch.invoke("hello") # "HELLO"
branch.invoke(None) # "goodbye"
"""
branches: Sequence[Tuple[Runnable[Input, bool], Runnable[Input, Output]]]
default: Runnable[Input, Output]
def __init__(
self,
*branches: Union[
Tuple[
Union[
Runnable[Input, bool],
Callable[[Input], bool],
Callable[[Input], Awaitable[bool]],
],
RunnableLike,
],
RunnableLike, # To accommodate the default branch
],
) -> None:
"""A Runnable that runs one of two branches based on a condition."""
if len(branches) < 2:
raise ValueError("RunnableBranch requires at least two branches")
default = branches[-1]
if not isinstance(
default, (Runnable, Callable, Mapping) # type: ignore[arg-type]
):
raise TypeError(
"RunnableBranch default must be runnable, callable or mapping."
)
default_ = cast(
Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default))
)
_branches = []
for branch in branches[:-1]:
if not isinstance(branch, (tuple, list)): # type: ignore[arg-type]
raise TypeError(
f"RunnableBranch branches must be "
f"tuples or lists, not {type(branch)}"
)
if not len(branch) == 2:
raise ValueError(
f"RunnableBranch branches must be "
f"tuples or lists of length 2, not {len(branch)}"
)
condition, runnable = branch
condition = cast(Runnable[Input, bool], coerce_to_runnable(condition))
runnable = coerce_to_runnable(runnable)
_branches.append((condition, runnable))
super().__init__(branches=_branches, default=default_)
class Config:
arbitrary_types_allowed = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""RunnableBranch is serializable if all its branches are serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""The namespace of a RunnableBranch is the namespace of its default branch."""
return cls.__module__.split(".")[:-1]
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
runnables = (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for runnable in runnables:
if runnable.get_input_schema(config).schema().get("type") is not None:
return runnable.get_input_schema(config)
return super().get_input_schema(config)
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for spec in step.config_specs
)
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""First evaluates the condition, then delegate to true or false branch."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = runnable.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = self.default.invoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except Exception as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""Async version of invoke."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = await condition.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = await runnable.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = await self.default.ainvoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except Exception as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~embeddings~test_qianfan_endpoint.py | """Test Baidu Qianfan Embedding Endpoint."""
from langchain.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
def test_embedding_multiple_documents() -> None:
documents = ["foo", "bar"]
embedding = QianfanEmbeddingsEndpoint()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
def test_embedding_query() -> None:
query = "foo"
embedding = QianfanEmbeddingsEndpoint()
output = embedding.embed_query(query)
assert len(output) == 384
def test_model() -> None:
documents = ["hi", "qianfan"]
embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1")
output = embedding.embed_documents(documents)
assert len(output) == 2
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~tot~checker.py | from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain_experimental.tot.thought import ThoughtValidity
class ToTChecker(Chain, ABC):
"""
Tree of Thought (ToT) checker.
This is an abstract ToT checker that must be implemented by the user. You
can implement a simple rule-based checker or a more sophisticated
neural network based classifier.
"""
output_key: str = "validity" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""The checker input keys.
:meta private:
"""
return ["problem_description", "thoughts"]
@property
def output_keys(self) -> List[str]:
"""The checker output keys.
:meta private:
"""
return [self.output_key]
@abstractmethod
def evaluate(
self,
problem_description: str,
thoughts: Tuple[str, ...] = (),
) -> ThoughtValidity:
"""
Evaluate the response to the problem description and return the solution type.
"""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, ThoughtValidity]:
return {self.output_key: self.evaluate(**inputs)}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~writer.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class Writer(LLM):
"""Writer large language models.
To use, you should have the environment variable ``WRITER_API_KEY`` and
``WRITER_ORG_ID`` set with your API key and organization ID respectively.
Example:
.. code-block:: python
from langchain.llms import Writer
writer = Writer(model_id="palmyra-base")
"""
writer_org_id: Optional[str] = None
"""Writer organization ID."""
model_id: str = "palmyra-instruct"
"""Model name to use."""
min_tokens: Optional[int] = None
"""Minimum number of tokens to generate."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
temperature: Optional[float] = None
"""What sampling temperature to use."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens regardless of frequency."""
repetition_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
best_of: Optional[int] = None
"""Generates this many completions server-side and returns the "best"."""
logprobs: bool = False
"""Whether to return log probabilities."""
n: Optional[int] = None
"""How many completions to generate."""
writer_api_key: Optional[str] = None
"""Writer API key."""
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and organization id exist in environment."""
writer_api_key = get_from_dict_or_env(
values, "writer_api_key", "WRITER_API_KEY"
)
values["writer_api_key"] = writer_api_key
writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID")
values["writer_org_id"] = writer_org_id
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"minTokens": self.min_tokens,
"maxTokens": self.max_tokens,
"temperature": self.temperature,
"topP": self.top_p,
"stop": self.stop,
"presencePenalty": self.presence_penalty,
"repetitionPenalty": self.repetition_penalty,
"bestOf": self.best_of,
"logprobs": self.logprobs,
"n": self.n,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id, "writer_org_id": self.writer_org_id},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "writer"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
"https://enterprise-api.writer.com/llm"
f"/organization/{self.writer_org_id}"
f"/model/{self.model_id}/completions"
)
params = {**self._default_params, **kwargs}
response = requests.post(
url=base_url,
headers={
"Authorization": f"{self.writer_api_key}",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": prompt, **params},
)
text = response.text
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~notion.py | from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class NotionDirectoryLoader(BaseLoader):
"""Load `Notion directory` dump."""
def __init__(self, path: str, *, encoding: str = "utf-8") -> None:
"""Initialize with a file path."""
self.file_path = path
self.encoding = encoding
def load(self) -> List[Document]:
"""Load documents."""
paths = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in paths:
with open(p, encoding=self.encoding) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~blockchain.py | import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BlockchainType(Enum):
"""Enumerator of the supported blockchains."""
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
class BlockchainDocumentLoader(BaseLoader):
"""Load elements from a blockchain smart contract.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockain APIs (e.g. Infura, Opensea, etc.)
"""
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~datetime.py | import random
from datetime import datetime, timedelta
from typing import List
from langchain.schema import BaseOutputParser, OutputParserException
from langchain.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> List[str]:
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that used as the datetime format."""
def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return f"""Напиши только дату в следующем формате: \
"{self.format}". Например: {examples}. Не пиши ничего больше."""
def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~graphs~graph_store.py | from abc import abstractmethod
from typing import Any, Dict, List
from langchain.graphs.graph_document import GraphDocument
class GraphStore:
"""An abstract class wrapper for graph operations."""
@property
@abstractmethod
def get_schema(self) -> str:
"""Returns the schema of the Graph database"""
pass
@property
@abstractmethod
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the schema of the Graph database"""
pass
@abstractmethod
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query the graph."""
pass
@abstractmethod
def refresh_schema(self) -> None:
"""Refreshes the graph schema information."""
pass
@abstractmethod
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""Take GraphDocument as input as uses it to construct a graph."""
pass
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~readthedocs.py | from pathlib import Path
from typing import Any, List, Optional, Sequence, Tuple, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class ReadTheDocsLoader(BaseLoader):
"""Load `ReadTheDocs` documentation directory."""
def __init__(
self,
path: Union[str, Path],
encoding: Optional[str] = None,
errors: Optional[str] = None,
custom_html_tag: Optional[Tuple[str, dict]] = None,
patterns: Sequence[str] = ("*.htm", "*.html"),
**kwargs: Optional[Any]
):
"""
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extracts the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specify how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
patterns: The file patterns to load, passed to `glob.rglob`.
kwargs: named arguments passed to `bs4.BeautifulSoup`.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.patterns = patterns
self.bs_kwargs = kwargs
def load(self) -> List[Document]:
"""Load documents."""
docs = []
for file_pattern in self.patterns:
for p in self.file_path.rglob(file_pattern):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
def _clean_data(self, data: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, **self.bs_kwargs)
# default tags
html_tags = [
("div", {"role": "main"}),
("main", {"id": "main-content"}),
]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
text = None
# reversed order. check the custom one first
for tag, attrs in html_tags[::-1]:
text = soup.find(tag, attrs)
# if found, break
if text is not None:
break
if text is not None:
text = text.get_text()
else:
text = ""
# trim empty lines
return "\n".join([t for t in text.split("\n") if t])
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~xml.py | import re
import xml.etree.ElementTree as ET
from typing import Any, Dict, List, Optional
from langchain.output_parsers.format_instructions import XML_FORMAT_INSTRUCTIONS
from langchain.schema import BaseOutputParser
class XMLOutputParser(BaseOutputParser):
"""Parse an output using xml format."""
tags: Optional[List[str]] = None
encoding_matcher: re.Pattern = re.compile(
r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
)
def get_format_instructions(self) -> str:
return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
def parse(self, text: str) -> Dict[str, List[Any]]:
text = text.strip("`").strip("xml")
encoding_match = self.encoding_matcher.search(text)
if encoding_match:
text = encoding_match.group(2)
if (text.startswith("<") or text.startswith("\n<")) and (
text.endswith(">") or text.endswith(">\n")
):
root = ET.fromstring(text)
return self._root_to_dict(root)
else:
raise ValueError(f"Could not parse output: {text}")
def _root_to_dict(self, root: ET.Element) -> Dict[str, List[Any]]:
"""Converts xml tree to python dictionary."""
result: Dict[str, List[Any]] = {root.tag: []}
for child in root:
if len(child) == 0:
result[root.tag].append({child.tag: child.text})
else:
result[root.tag].append(self._root_to_dict(child))
return result
@property
def _type(self) -> str:
return "xml"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~office365~send_message.py | from typing import List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to be sent.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365SendMessage(O365BaseTool):
"""Tool for sending an email in Office 365."""
name: str = "send_email"
description: str = (
"Use this tool to send an email with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(cc)
message.send()
output = "Message sent: " + str(message)
return output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~ainetwork~transfer.py | import json
from typing import Optional, Type
from langchain.callbacks.manager import AsyncCallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.ainetwork.base import AINBaseTool
class TransferSchema(BaseModel):
"""Schema for transfer operations."""
address: str = Field(..., description="Address to transfer AIN to")
amount: int = Field(..., description="Amount of AIN to transfer")
class AINTransfer(AINBaseTool):
"""Tool for transfer operations."""
name: str = "AINtransfer"
description: str = "Transfers AIN to a specified address"
args_schema: Type[TransferSchema] = TransferSchema
async def _arun(
self,
address: str,
amount: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
try:
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{type(e).__name__}: {str(e)}"
| [
"Transfers AIN to a specified address"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~gmail~get_thread.py | from typing import Dict, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
class GetThreadSchema(BaseModel):
"""Input for GetMessageTool."""
# From https://support.google.com/mail/answer/7190?hl=en
thread_id: str = Field(
...,
description="The thread ID.",
)
class GmailGetThread(GmailBaseTool):
"""Tool that gets a thread by ID from Gmail."""
name: str = "get_gmail_thread"
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Gmail query."
" The output is a JSON list of messages."
)
args_schema: Type[GetThreadSchema] = GetThreadSchema
def _run(
self,
thread_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
thread_data = query.execute()
if not isinstance(thread_data, dict):
raise ValueError("The output of the query must be a list.")
messages = thread_data["messages"]
thread_data["messages"] = []
keys_to_keep = ["id", "snippet", "snippet"]
# TODO: Parse body.
for message in messages:
thread_data["messages"].append(
{k: message[k] for k in keys_to_keep if k in message}
)
return thread_data
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~hugginggpt~task_planner.py | import json
import re
from abc import abstractmethod
from typing import Any, Dict, List, Optional, Union
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
from langchain.chains import LLMChain
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.tools.base import BaseTool
from langchain_experimental.pydantic_v1 import BaseModel
DEMONSTRATIONS = [
{
"role": "user",
"content": "покажи мне видео и изображение на основе текста 'мальчик бежит' и озвучь это", # noqa: E501
},
{
"role": "assistant",
"content": '[{{"task": "video_generator", "id": 0, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}, {{"task": "text_reader", "id": 1, "dep": [-1], "args": {{"text": "a boy is running" }}}}, {{"task": "image_generator", "id": 2, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}]', # noqa: E501
},
{
"role": "user",
"content": "У тебя есть несколько картинок e1.jpg, e2.png, e3.jpg, помоги мне посчитать количество овец?", # noqa: E501
},
{
"role": "assistant",
"content": '[ {{"task": "image_qa", "id": 0, "dep": [-1], "args": {{"image": "e1.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 1, "dep": [-1], "args": {{"image": "e2.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 2, "dep": [-1], "args": {{"image": "e3.jpg", "question": "How many sheep in the picture"}}}}]', # noqa: E501
},
]
class TaskPlaningChain(LLMChain):
"""Chain to execute tasks."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
demos: List[Dict] = DEMONSTRATIONS,
verbose: bool = True,
) -> LLMChain:
"""Get the response parser."""
system_template = """#1 Этап планирования задач: AI-ассистент может разбить ввод пользователя на несколько задач: [{{"task": задача, "id": id_задачи, "dep": id_зависимой_задачи, "args": {{"input name": текст может содержать <resource-dep_id>}}}}]. Специальный тег "dep_id" относится к сгенерированному тексту/изображению/аудио в зависимой задаче (Пожалуйста, учтите, генерирует ли зависимая задача ресурсы этого типа.) и "dep_id" должен быть в списке "dep". Поле "dep" обозначает id предыдущих обязательных задач, которые генерируют новый ресурс, на котором зависит текущая задача. Задача ДОЛЖНА быть выбрана из следующих инструментов (вместе с описанием инструмента, именем ввода и типом вывода): {tools}. Может быть несколько задач одного типа. Подумай шаг за шагом обо всех задачах, необходимых для решения запроса пользователя. Выделите как можно меньше задач, обеспечивая при этом возможность решения запроса пользователя. Обратите внимание на зависимости и порядок задач. Если ввод пользователя не может быть разобран, вам нужно ответить пустым JSON [].""" # noqa: E501
human_template = """Теперь я ввожу: {input}."""
system_message_prompt = SystemMessagePromptTemplate.from_template(
system_template
)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
demo_messages: List[
Union[HumanMessagePromptTemplate, AIMessagePromptTemplate]
] = []
for demo in demos:
if demo["role"] == "user":
demo_messages.append(
HumanMessagePromptTemplate.from_template(demo["content"])
)
else:
demo_messages.append(
AIMessagePromptTemplate.from_template(demo["content"])
)
# demo_messages.append(message)
prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, *demo_messages, human_message_prompt]
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class Step:
def __init__(
self, task: str, id: int, dep: List[int], args: Dict[str, str], tool: BaseTool
):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
class Plan:
def __init__(self, steps: List[Step]):
self.steps = steps
def __str__(self) -> str:
return str([str(step) for step in self.steps])
def __repr__(self) -> str:
return str(self)
class BasePlanner(BaseModel):
@abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
@abstractmethod
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
class PlanningOutputParser(BaseModel):
def parse(self, text: str, hf_tools: List[BaseTool]) -> Plan:
steps = []
for v in json.loads(re.findall(r"\[.*\]", text)[0]):
choose_tool = None
for tool in hf_tools:
if tool.name == v["task"]:
choose_tool = tool
break
if choose_tool:
steps.append(Step(v["task"], v["id"], v["dep"], v["args"], tool))
return Plan(steps=steps)
class TaskPlanner(BasePlanner):
llm_chain: LLMChain
output_parser: PlanningOutputParser
stop: Optional[List] = None
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decided what to do."""
inputs["tools"] = [
f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"]
]
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks)
return self.output_parser.parse(llm_response, inputs["hf_tools"])
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decided what to do."""
inputs["hf_tools"] = [
f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"]
]
llm_response = await self.llm_chain.arun(
**inputs, stop=self.stop, callbacks=callbacks
)
return self.output_parser.parse(llm_response, inputs["hf_tools"])
def load_chat_planner(llm: BaseLanguageModel) -> TaskPlanner:
llm_chain = TaskPlaningChain.from_llm(llm)
return TaskPlanner(llm_chain=llm_chain, output_parser=PlanningOutputParser())
| [
"У тебя есть несколько картинок e1.jpg, e2.png, e3.jpg, помоги мне посчитать количество овец?",
"Теперь я ввожу: {input}.",
"[{{\"task\": \"video_generator\", \"id\": 0, \"dep\": [-1], \"args\": {{\"prompt\": \"a boy is running\" }}}}, {{\"task\": \"text_reader\", \"id\": 1, \"dep\": [-1], \"args\": {{\"text\": \"a boy is running\" }}}}, {{\"task\": \"image_generator\", \"id\": 2, \"dep\": [-1], \"args\": {{\"prompt\": \"a boy is running\" }}}}]",
"[ {{\"task\": \"image_qa\", \"id\": 0, \"dep\": [-1], \"args\": {{\"image\": \"e1.jpg\", \"question\": \"How many sheep in the picture\"}}}}, {{\"task\": \"image_qa\", \"id\": 1, \"dep\": [-1], \"args\": {{\"image\": \"e2.jpg\", \"question\": \"How many sheep in the picture\"}}}}, {{\"task\": \"image_qa\", \"id\": 2, \"dep\": [-1], \"args\": {{\"image\": \"e3.jpg\", \"question\": \"How many sheep in the picture\"}}}}]",
"content",
"покажи мне видео и изображение на основе текста 'мальчик бежит' и озвучь это",
"#1 Этап планирования задач: AI-ассистент может разбить ввод пользователя на несколько задач: [{{\"task\": задача, \"id\": id_задачи, \"dep\": id_зависимой_задачи, \"args\": {{\"input name\": текст может содержать <resource-dep_id>}}}}]. Специальный тег \"dep_id\" относится к сгенерированному тексту/изображению/аудио в зависимой задаче (Пожалуйста, учтите, генерирует ли зависимая задача ресурсы этого типа.) и \"dep_id\" должен быть в списке \"dep\". Поле \"dep\" обозначает id предыдущих обязательных задач, которые генерируют новый ресурс, на котором зависит текущая задача. Задача ДОЛЖНА быть выбрана из следующих инструментов (вместе с описанием инструмента, именем ввода и типом вывода): {tools}. Может быть несколько задач одного типа. Подумай шаг за шагом обо всех задачах, необходимых для решения запроса пользователя. Выделите как можно меньше задач, обеспечивая при этом возможность решения запроса пользователя. Обратите внимание на зависимости и порядок задач. Если ввод пользователя не может быть разобран, вам нужно ответить пустым JSON []."
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~obsidian.py | import logging
import re
from pathlib import Path
from typing import List
import yaml
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ObsidianLoader(BaseLoader):
"""Load `Obsidian` files from directory."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.DOTALL)
TAG_REGEX = re.compile(r"[^\S\/]#([a-zA-Z_]+[-_/\w]*)")
DATAVIEW_LINE_REGEX = re.compile(r"^\s*(\w+)::\s*(.*)$", re.MULTILINE)
DATAVIEW_INLINE_BRACKET_REGEX = re.compile(r"\[(\w+)::\s*(.*)\]", re.MULTILINE)
DATAVIEW_INLINE_PAREN_REGEX = re.compile(r"\((\w+)::\s*(.*)\)", re.MULTILINE)
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize with a path.
Args:
path: Path to the directory containing the Obsidian files.
encoding: Charset encoding, defaults to "UTF-8"
collect_metadata: Whether to collect metadata from the front matter.
Defaults to True.
"""
self.file_path = path
self.encoding = encoding
self.collect_metadata = collect_metadata
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
if not match:
return {}
try:
front_matter = yaml.safe_load(match.group(1))
# If tags are a string, split them into a list
if "tags" in front_matter and isinstance(front_matter["tags"], str):
front_matter["tags"] = front_matter["tags"].split(", ")
return front_matter
except yaml.parser.ParserError:
logger.warning("Encountered non-yaml frontmatter")
return {}
def _to_langchain_compatible_metadata(self, metadata: dict) -> dict:
"""Convert a dictionary to a compatible with langchain."""
result = {}
for key, value in metadata.items():
if type(value) in {str, int, float}:
result[key] = value
else:
result[key] = str(value)
return result
def _parse_document_tags(self, content: str) -> set:
"""Return a set of all tags in within the document."""
if not self.collect_metadata:
return set()
match = self.TAG_REGEX.findall(content)
if not match:
return set()
return {tag for tag in match}
def _parse_dataview_fields(self, content: str) -> dict:
"""Parse obsidian dataview plugin fields from the content and return it
as a dict."""
if not self.collect_metadata:
return {}
return {
**{
match[0]: match[1]
for match in self.DATAVIEW_LINE_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_PAREN_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_BRACKET_REGEX.findall(content)
},
}
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def load(self) -> List[Document]:
"""Load documents."""
paths = list(Path(self.file_path).glob("**/*.md"))
docs = []
for path in paths:
with open(path, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
tags = self._parse_document_tags(text)
dataview_fields = self._parse_dataview_fields(text)
text = self._remove_front_matter(text)
metadata = {
"source": str(path.name),
"path": str(path),
"created": path.stat().st_ctime,
"last_modified": path.stat().st_mtime,
"last_accessed": path.stat().st_atime,
**self._to_langchain_compatible_metadata(front_matter),
**dataview_fields,
}
if tags or front_matter.get("tags"):
metadata["tags"] = ",".join(tags | set(front_matter.get("tags", [])))
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~office365~send_event.py | """Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.office365.base import O365BaseTool
class SendEventSchema(BaseModel):
"""Input for CreateEvent Tool."""
body: str = Field(
...,
description="The message body to include in the event.",
)
attendees: List[str] = Field(
...,
description="The list of attendees for the event.",
)
subject: str = Field(
...,
description="The subject of the event.",
)
start_datetime: str = Field(
description=" The start datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
end_datetime: str = Field(
description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class O365SendEvent(O365BaseTool):
"""Tool for sending calendar events in Office 365."""
name: str = "send_event"
description: str = (
"Use this tool to create and send an event with the provided event fields."
)
args_schema: Type[SendEventSchema] = SendEventSchema
def _run(
self,
body: str,
attendees: List[str],
subject: str,
start_datetime: str,
end_datetime: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
event = calendar.new_event()
event.body = body
event.subject = subject
event.start = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z")
event.end = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z")
for attendee in attendees:
event.attendees.add(attendee)
# TO-DO: Look into PytzUsageWarning
event.save()
output = "Event sent: " + str(event)
return output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~spacy_embeddings.py | import importlib.util
from typing import Any, Dict, List
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
class SpacyEmbeddings(BaseModel, Embeddings):
"""Embeddings by SpaCy models.
It only supports the 'en_core_web_sm' model.
Attributes:
nlp (Any): The Spacy model loaded into memory.
Methods:
embed_documents(texts: List[str]) -> List[List[float]]:
Generates embeddings for a list of documents.
embed_query(text: str) -> List[float]:
Generates an embedding for a single piece of text.
"""
nlp: Any # The Spacy model loaded into memory
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid # Forbid extra attributes during model initialization
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""
Validates that the Spacy package and the 'en_core_web_sm' model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the Spacy package or the 'en_core_web_sm'
model are not installed.
"""
# Check if the Spacy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
"Spacy package not found. "
"Please install it with `pip install spacy`."
)
try:
# Try to load the 'en_core_web_sm' Spacy model
import spacy
values["nlp"] = spacy.load("en_core_web_sm")
except OSError:
# If the model is not found, raise a ValueError
raise ValueError(
"Spacy model 'en_core_web_sm' not found. "
"Please install it with"
" `python -m spacy download en_core_web_sm`."
)
return values # Return the validated values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generates embeddings for a list of documents.
Args:
texts (List[str]): The documents to generate embeddings for.
Returns:
A list of embeddings, one for each document.
"""
return [self.nlp(text).vector.tolist() for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist()
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generates embeddings for a list of documents.
This method is not implemented and raises a NotImplementedError.
Args:
texts (List[str]): The documents to generate embeddings for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
async def aembed_query(self, text: str) -> List[float]:
"""
Asynchronously generates an embedding for a single piece of text.
This method is not implemented and raises a NotImplementedError.
Args:
text (str): The text to generate an embedding for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~pgembedding.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.orm import Session, relationship
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
Base = declarative_base() # type: Any
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for all SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first() # type: ignore
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = "langchain_pg_embedding"
collection_id = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.REAL)) # type: ignore
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
class QueryResult:
"""Result from a query."""
EmbeddingStore: EmbeddingStore
distance: float
class PGEmbedding(VectorStore):
"""`Postgres` with the `pg_embedding` extension as a vector store.
pg_embedding uses sequential scan by default. but you can create a HNSW index
using the create_hnsw_index method.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN)
- `EUCLIDEAN` is the euclidean distance.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
self._conn = self.connect()
self.create_hnsw_extension()
self.create_tables_if_not_exists()
self.create_collection()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
def create_hnsw_extension(self) -> None:
try:
with Session(self._conn) as session:
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS embedding")
session.execute(statement)
session.commit()
except Exception as e:
self.logger.exception(e)
def create_tables_if_not_exists(self) -> None:
with self._conn.begin():
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def create_hnsw_index(
self,
max_elements: int = 10000,
dims: int = ADA_TOKEN_COUNT,
m: int = 8,
ef_construction: int = 16,
ef_search: int = 16,
) -> None:
create_index_query = sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS langchain_pg_embedding_idx "
"ON langchain_pg_embedding USING hnsw (embedding) "
"WITH ("
"maxelements = {}, "
"dims = {}, "
"m = {}, "
"efconstruction = {}, "
"efsearch = {}"
");".format(max_elements, dims, m, ef_construction, ef_search)
)
# Execute the queries
try:
with Session(self._conn) as session:
# Create the HNSW index
session.execute(create_index_query)
session.commit()
print("HNSW extension and index created successfully.")
except Exception as e:
print(f"Failed to create HNSW extension or index: {e}")
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def _initialize_from_embeddings(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
pre_delete_collection=pre_delete_collection,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: List[str],
embeddings: List[List[float]],
metadatas: List[dict],
ids: List[str],
**kwargs: Any,
) -> None:
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
with Session(self._conn) as session:
collection = self.get_collection(session)
set_enable_seqscan_stmt = sqlalchemy.text("SET enable_seqscan = off")
session.execute(set_enable_seqscan_stmt)
if not collection:
raise ValueError("Collection not found")
filter_by = EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
IN = "in"
if isinstance(value, dict) and IN in map(str.lower, value):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
filter_by_metadata = EmbeddingStore.cmetadata[key].astext.in_(
value_case_insensitive[IN]
)
filter_clauses.append(filter_by_metadata)
elif isinstance(value, dict) and "substring" in map(
str.lower, value
):
filter_by_metadata = EmbeddingStore.cmetadata[key].astext.ilike(
f"%{value['substring']}%"
)
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
results: List[QueryResult] = (
session.query(
EmbeddingStore,
func.abs(EmbeddingStore.embedding.op("<->")(embedding)).label(
"distance"
),
) # Specify the columns you need here, e.g., EmbeddingStore.embedding
.filter(filter_by)
.order_by(
func.abs(EmbeddingStore.embedding.op("<->")(embedding)).asc()
) # Using PostgreSQL specific operator with the correct column name
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[PGEmbedding],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
embeddings = embedding.embed_documents(list(texts))
return cls._initialize_from_embeddings(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls._initialize_from_embeddings(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGEmbedding],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="POSTGRES_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the POSTGRES_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGEmbedding],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~milvus.py | from __future__ import annotations
import logging
from typing import Any, Iterable, List, Optional, Tuple, Union
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
DEFAULT_MILVUS_CONNECTION = {
"host": "localhost",
"port": "19530",
"user": "",
"password": "",
"secure": False,
}
class Milvus(VectorStore):
"""`Milvus` vector store.
You need to install `pymilvus` and run Milvus.
See the following documentation for how to run a Milvus instance:
https://milvus.io/docs/install_standalone-docker.md
If looking for a hosted Milvus, take a look at this documentation:
https://zilliz.com/cloud and make use of the Zilliz vectorstore found in
this project.
IF USING L2/IP metric, IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA.
Args:
embedding_function (Embeddings): Function used to embed the text.
collection_name (str): Which Milvus collection to use. Defaults to
"LangChainCollection".
connection_args (Optional[dict[str, any]]): The connection args used for
this class comes in the form of a dict.
consistency_level (str): The consistency level to use for a collection.
Defaults to "Session".
index_params (Optional[dict]): Which index params to use. Defaults to
HNSW/AUTOINDEX depending on service.
search_params (Optional[dict]): Which search params to use. Defaults to
default of index.
drop_old (Optional[bool]): Whether to drop the current collection. Defaults
to False.
primary_field (str): Name of the primary key field. Defaults to "pk".
text_field (str): Name of the text field. Defaults to "text".
vector_field (str): Name of the vector field. Defaults to "vector".
The connection args used for this class comes in the form of a dict,
here are a few of the options:
address (str): The actual address of Milvus
instance. Example address: "localhost:19530"
uri (str): The uri of Milvus instance. Example uri:
"http://randomwebsite:19530",
"tcp:foobarsite:19530",
"https://ok.s3.south.com:19530".
host (str): The host of Milvus instance. Default at "localhost",
PyMilvus will fill in the default host if only port is provided.
port (str/int): The port of Milvus instance. Default at 19530, PyMilvus
will fill in the default port if only host is provided.
user (str): Use which user to connect to Milvus instance. If user and
password are provided, we will add related header in every RPC call.
password (str): Required when user is provided. The password
corresponding to the user.
secure (bool): Default is false. If set to true, tls will be enabled.
client_key_path (str): If use tls two-way authentication, need to
write the client.key path.
client_pem_path (str): If use tls two-way authentication, need to
write the client.pem path.
ca_pem_path (str): If use tls two-way authentication, need to write
the ca.pem path.
server_pem_path (str): If use tls one-way authentication, need to
write the server.pem path.
server_name (str): If use tls, need to write the common name.
Example:
.. code-block:: python
from langchain.vectorstores import Milvus
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
# Connect to a milvus instance on localhost
milvus_store = Milvus(
embedding_function = Embeddings,
collection_name = "LangChainCollection",
drop_old = True,
)
Raises:
ValueError: If the pymilvus python package is not installed.
"""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[dict[str, Any]] = None,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: Optional[bool] = False,
*,
primary_field: str = "pk",
text_field: str = "text",
vector_field: str = "vector",
):
"""Initialize the Milvus vector store."""
try:
from pymilvus import Collection, utility
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please install it with `pip install pymilvus`."
)
# Default search params when one is not provided.
self.default_search_params = {
"IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "L2", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "L2", "params": {}},
}
self.embedding_func = embedding_function
self.collection_name = collection_name
self.index_params = index_params
self.search_params = search_params
self.consistency_level = consistency_level
# In order for a collection to be compatible, pk needs to be auto'id and int
self._primary_field = primary_field
# In order for compatibility, the text field will need to be called "text"
self._text_field = text_field
# In order for compatibility, the vector field needs to be called "vector"
self._vector_field = vector_field
self.fields: list[str] = []
# Create the connection to the server
if connection_args is None:
connection_args = DEFAULT_MILVUS_CONNECTION
self.alias = self._create_connection_alias(connection_args)
self.col: Optional[Collection] = None
# Grab the existing collection if it exists
if utility.has_collection(self.collection_name, using=self.alias):
self.col = Collection(
self.collection_name,
using=self.alias,
)
# If need to drop old, drop it
if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
# Initialize the vector store
self._init()
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
def _create_connection_alias(self, connection_args: dict) -> str:
"""Create the connection to the Milvus server."""
from pymilvus import MilvusException, connections
# Grab the connection arguments that are used for checking existing connection
host: str = connection_args.get("host", None)
port: Union[str, int] = connection_args.get("port", None)
address: str = connection_args.get("address", None)
uri: str = connection_args.get("uri", None)
user = connection_args.get("user", None)
# Order of use is host/port, uri, address
if host is not None and port is not None:
given_address = str(host) + ":" + str(port)
elif uri is not None:
given_address = uri.split("https://")[1]
elif address is not None:
given_address = address
else:
given_address = None
logger.debug("Missing standard address type for reuse attempt")
# User defaults to empty string when getting connection info
if user is not None:
tmp_user = user
else:
tmp_user = ""
# If a valid address was given, then check if a connection exists
if given_address is not None:
for con in connections.list_connections():
addr = connections.get_connection_addr(con[0])
if (
con[1]
and ("address" in addr)
and (addr["address"] == given_address)
and ("user" in addr)
and (addr["user"] == tmp_user)
):
logger.debug("Using previous connection: %s", con[0])
return con[0]
# Generate a new connection if one doesn't exist
alias = uuid4().hex
try:
connections.connect(alias=alias, **connection_args)
logger.debug("Created new connection using: %s", alias)
return alias
except MilvusException as e:
logger.error("Failed to create new connection using: %s", alias)
raise e
def _init(
self, embeddings: Optional[list] = None, metadatas: Optional[list[dict]] = None
) -> None:
if embeddings is not None:
self._create_collection(embeddings, metadatas)
self._extract_fields()
self._create_index()
self._create_search_params()
self._load()
def _create_collection(
self, embeddings: list, metadatas: Optional[list[dict]] = None
) -> None:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
MilvusException,
)
from pymilvus.orm.types import infer_dtype_bydata
# Determine embedding dim
dim = len(embeddings[0])
fields = []
# Determine metadata schema
if metadatas:
# Create FieldSchema for each entry in metadata.
for key, value in metadatas[0].items():
# Infer the corresponding datatype of the metadata
dtype = infer_dtype_bydata(value)
# Datatype isn't compatible
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
"Failure to create collection, unrecognized dtype for key: %s",
key,
)
raise ValueError(f"Unrecognized datatype for {key}.")
# Dataype is a string/varchar equivalent
elif dtype == DataType.VARCHAR:
fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65_535))
else:
fields.append(FieldSchema(key, dtype))
# Create the text field
fields.append(
FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535)
)
# Create the primary key field
fields.append(
FieldSchema(
self._primary_field, DataType.INT64, is_primary=True, auto_id=True
)
)
# Create the vector field, supports binary or float vectors
fields.append(
FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim)
)
# Create the schema for the collection
schema = CollectionSchema(fields)
# Create the collection
try:
self.col = Collection(
name=self.collection_name,
schema=schema,
consistency_level=self.consistency_level,
using=self.alias,
)
except MilvusException as e:
logger.error(
"Failed to create collection: %s error: %s", self.collection_name, e
)
raise e
def _extract_fields(self) -> None:
"""Grab the existing fields from the Collection"""
from pymilvus import Collection
if isinstance(self.col, Collection):
schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
# Since primary field is auto-id, no need to track it
self.fields.remove(self._primary_field)
def _get_index(self) -> Optional[dict[str, Any]]:
"""Return the vector index information if it exists"""
from pymilvus import Collection
if isinstance(self.col, Collection):
for x in self.col.indexes:
if x.field_name == self._vector_field:
return x.to_dict()
return None
def _create_index(self) -> None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
# If no index params, use a default HNSW based one
if self.index_params is None:
self.index_params = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
try:
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
# If default did not work, most likely on Zilliz Cloud
except MilvusException:
# Use AUTOINDEX based index
self.index_params = {
"metric_type": "L2",
"index_type": "AUTOINDEX",
"params": {},
}
self.col.create_index(
self._vector_field,
index_params=self.index_params,
using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index on collection: %s", self.collection_name
)
raise e
def _create_search_params(self) -> None:
"""Generate search params based on the current index type"""
from pymilvus import Collection
if isinstance(self.col, Collection) and self.search_params is None:
index = self._get_index()
if index is not None:
index_type: str = index["index_param"]["index_type"]
metric_type: str = index["index_param"]["metric_type"]
self.search_params = self.default_search_params[index_type]
self.search_params["metric_type"] = metric_type
def _load(self) -> None:
"""Load the collection if available."""
from pymilvus import Collection
if isinstance(self.col, Collection) and self._get_index() is not None:
self.col.load()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
"""
from pymilvus import Collection, MilvusException
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
# If the collection hasn't been initialized yet, perform all steps to do so
if not isinstance(self.col, Collection):
self._init(embeddings, metadatas)
# Dict to hold all insert columns
insert_dict: dict[str, list] = {
self._text_field: texts,
self._vector_field: embeddings,
}
# Collect the metadata into the insert dict.
if metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Total insert count
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self.col, Collection)
for i in range(0, total_count, batch_size):
# Grab end index
end = min(i + batch_size, total_count)
# Convert dict to list of lists batch for insertion
insert_list = [insert_dict[x][i:end] for x in self.fields]
# Insert into the collection.
try:
res: Collection
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
pks.extend(res.primary_keys)
except MilvusException as e:
logger.error(
"Failed to insert batch starting at entity: %s/%s", i, total_count
)
raise e
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
query (str): The text to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
query (str): The text being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[float], List[Tuple[Document, any, any]]:
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score.
For more information about the search parameters, take a look at the pymilvus
documentation found here:
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The amount of results to return. Defaults to 4.
param (dict): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Tuple[Document, float]]: Result doc and score.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ret = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
pair = (doc, result.score)
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
embedding (str): The embedding vector being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug("No existing collection to search.")
return []
if param is None:
param = self.search_params
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=fetch_k,
expr=expr,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
# Organize results.
ids = []
documents = []
scores = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
documents.append(doc)
scores.append(result.score)
ids.append(result.id)
vectors = self.col.query(
expr=f"{self._primary_field} in {ids}",
output_fields=[self._primary_field, self._vector_field],
timeout=timeout,
)
# Reorganize the results from query to match search order.
vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors}
ordered_result_embeddings = [vectors[x] for x in ids]
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "LangChainCollection",
connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION,
consistency_level: str = "Session",
index_params: Optional[dict] = None,
search_params: Optional[dict] = None,
drop_old: bool = False,
**kwargs: Any,
) -> Milvus:
"""Create a Milvus collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use. Defaults
to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Milvus: Milvus Vector Store
"""
vector_db = cls(
embedding_function=embedding,
collection_name=collection_name,
connection_args=connection_args,
consistency_level=consistency_level,
index_params=index_params,
search_params=search_params,
drop_old=drop_old,
**kwargs,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~llms~anthropic_functions.py | import json
from collections import defaultdict
from html.parser import HTMLParser
from typing import Any, DefaultDict, Dict, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
SystemMessage,
)
from langchain_experimental.pydantic_v1 import root_validator
prompt = """Помимо ответов, ты можешь использовать инструменты. \
У тебя есть доступ к следующим инструментам.
{tools}
Чтобы использовать инструмент, ты можешь использовать \
теги <tool></tool> для указания имени, \
и теги <tool_input></tool_input> для указания параметров. \
Каждый параметр должен передаваться в виде <$param_name>$value</$param_name>, \
где $param_name - это имя конкретного параметра, а $value \
- это значение этого параметра.
Затем ты получишь ответ в виде тега <observation></observation>
Например, если у тебя есть инструмент под названием 'search', который принимает один \
параметр 'query', который может выполнять поиск в Google, чтобы найти \
погоду в Сан-Франциско, ты должен ответить:
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
<observation>64 degrees</observation>"""
class TagParser(HTMLParser):
def __init__(self) -> None:
"""A heavy-handed solution, but it's fast for prototyping.
Might be re-implemented later to restrict scope to the limited grammar, and
more efficiency.
Uses an HTML parser to parse a limited grammar that allows
for syntax of the form:
INPUT -> JUNK? VALUE*
JUNK -> JUNK_CHARACTER+
JUNK_CHARACTER -> whitespace | ,
VALUE -> <IDENTIFIER>DATA</IDENTIFIER> | OBJECT
OBJECT -> <IDENTIFIER>VALUE+</IDENTIFIER>
IDENTIFIER -> [a-Z][a-Z0-9_]*
DATA -> .*
Interprets the data to allow repetition of tags and recursion
to support representation of complex types.
^ Just another approximately wrong grammar specification.
"""
super().__init__()
self.parse_data: DefaultDict[str, List[Any]] = defaultdict(list)
self.stack: List[DefaultDict[str, List[str]]] = [self.parse_data]
self.success = True
self.depth = 0
self.data: Optional[str] = None
def handle_starttag(self, tag: str, attrs: Any) -> None:
"""Hook when a new tag is encountered."""
self.depth += 1
self.stack.append(defaultdict(list))
self.data = None
def handle_endtag(self, tag: str) -> None:
"""Hook when a tag is closed."""
self.depth -= 1
top_of_stack = dict(self.stack.pop(-1)) # Pop the dictionary we don't need it
# If a lead node
is_leaf = self.data is not None
# Annoying to type here, code is tested, hopefully OK
value = self.data if is_leaf else top_of_stack
# Difficult to type this correctly with mypy (maybe impossible?)
# Can be nested indefinitely, so requires self referencing type
self.stack[-1][tag].append(value) # type: ignore
# Reset the data so we if we encounter a sequence of end tags, we
# don't confuse an outer end tag for belonging to a leaf node.
self.data = None
def handle_data(self, data: str) -> None:
"""Hook when handling data."""
stripped_data = data.strip()
# The only data that's allowed is whitespace or a comma surrounded by whitespace
if self.depth == 0 and stripped_data not in (",", ""):
# If this is triggered the parse should be considered invalid.
self.success = False
if stripped_data: # ignore whitespace-only strings
self.data = stripped_data
def _destrip(tool_input: Any) -> Any:
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
class AnthropicFunctions(BaseChatModel):
llm: BaseChatModel
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
values["llm"] = values.get("llm") or ChatAnthropic(**values)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
forced = False
function_call = ""
if "functions" in kwargs:
content = prompt.format(tools=json.dumps(kwargs["functions"], indent=2))
system = SystemMessage(content=content)
messages = [system] + messages
del kwargs["functions"]
if stop is None:
stop = ["</tool_input>"]
else:
stop.append("</tool_input>")
if "function_call" in kwargs:
forced = True
function_call = kwargs["function_call"]["name"]
AIMessage(content=f"<tool>{function_call}</tool>")
del kwargs["function_call"]
else:
if "function_call" in kwargs:
raise ValueError(
"if `function_call` provided, `functions` must also be"
)
response = self.model.predict_messages(
messages, stop=stop, callbacks=run_manager, **kwargs
)
completion = response.content
if forced:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": function_call,
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content="", additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif "<tool>" in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
msg = completion.split("<tool>")[0]
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": tag_parser.parse_data["tool"][0],
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
return ChatResult(generations=[ChatGeneration(message=response)])
@property
def _llm_type(self) -> str:
return "anthropic_functions"
| [
"Помимо ответов, ты можешь использовать инструменты. У тебя есть доступ к следующим инструментам.\n\n{tools}\n\nЧтобы использовать инструмент, ты можешь использовать теги <tool></tool> для указания имени, и теги <tool_input></tool_input> для указания параметров. Каждый параметр должен передаваться в виде <$param_name>$value</$param_name>, где $param_name - это имя конкретного параметра, а $value - это значение этого параметра.\n\nЗатем ты получишь ответ в виде тега <observation></observation>\nНапример, если у тебя есть инструмент под названием 'search', который принимает один параметр 'query', который может выполнять поиск в Google, чтобы найти погоду в Сан-Франциско, ты должен ответить:\n\n<tool>search</tool><tool_input><query>weather in SF</query></tool_input>\n<observation>64 degrees</observation>",
"<tool></tool>"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~requests.py | """Lightweight wrapper around requests library, with async support."""
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Dict, Optional
import aiohttp
import requests
from langchain.pydantic_v1 import BaseModel, Extra
class Requests(BaseModel):
"""Wrapper around requests to handle auth and async.
The main purpose of this wrapper is to handle authentication (by saving
headers) and enable easy async methods on the same base object.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
auth: Optional[Any] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def get(self, url: str, **kwargs: Any) -> requests.Response:
"""GET the URL and return the text."""
return requests.get(url, headers=self.headers, auth=self.auth, **kwargs)
def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""POST to the URL and return the text."""
return requests.post(
url, json=data, headers=self.headers, auth=self.auth, **kwargs
)
def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PATCH the URL and return the text."""
return requests.patch(
url, json=data, headers=self.headers, auth=self.auth, **kwargs
)
def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response:
"""PUT the URL and return the text."""
return requests.put(
url, json=data, headers=self.headers, auth=self.auth, **kwargs
)
def delete(self, url: str, **kwargs: Any) -> requests.Response:
"""DELETE the URL and return the text."""
return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs)
@asynccontextmanager
async def _arequest(
self, method: str, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""Make an async request."""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.request(
method, url, headers=self.headers, auth=self.auth, **kwargs
) as response:
yield response
else:
async with self.aiosession.request(
method, url, headers=self.headers, auth=self.auth, **kwargs
) as response:
yield response
@asynccontextmanager
async def aget(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""GET the URL and return the text asynchronously."""
async with self._arequest("GET", url, **kwargs) as response:
yield response
@asynccontextmanager
async def apost(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""POST to the URL and return the text asynchronously."""
async with self._arequest("POST", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def apatch(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PATCH the URL and return the text asynchronously."""
async with self._arequest("PATCH", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def aput(
self, url: str, data: Dict[str, Any], **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""PUT the URL and return the text asynchronously."""
async with self._arequest("PUT", url, json=data, **kwargs) as response:
yield response
@asynccontextmanager
async def adelete(
self, url: str, **kwargs: Any
) -> AsyncGenerator[aiohttp.ClientResponse, None]:
"""DELETE the URL and return the text asynchronously."""
async with self._arequest("DELETE", url, **kwargs) as response:
yield response
class TextRequestsWrapper(BaseModel):
"""Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
auth: Optional[Any] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def requests(self) -> Requests:
return Requests(
headers=self.headers, aiosession=self.aiosession, auth=self.auth
)
def get(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text."""
return self.requests.get(url, **kwargs).text
def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text."""
return self.requests.post(url, data, **kwargs).text
def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text."""
return self.requests.put(url, data, **kwargs).text
def delete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text."""
return self.requests.delete(url, **kwargs).text
async def aget(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text asynchronously."""
async with self.requests.aget(url, **kwargs) as response:
return await response.text()
async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text asynchronously."""
async with self.requests.apost(url, data, **kwargs) as response:
return await response.text()
async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, data, **kwargs) as response:
return await response.text()
async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text asynchronously."""
async with self.requests.aput(url, data, **kwargs) as response:
return await response.text()
async def adelete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text asynchronously."""
async with self.requests.adelete(url, **kwargs) as response:
return await response.text()
# For backwards compatibility
RequestsWrapper = TextRequestsWrapper
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~retrievers~self_query~test_weaviate.py | from datetime import date, datetime
from typing import Dict, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
)
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
DEFAULT_TRANSLATOR = WeaviateTranslator()
def test_visit_comparison() -> None:
comp = Comparison(comparator=Comparator.EQ, attribute="foo", value="1")
expected = {"operator": "Equal", "path": ["foo"], "valueText": "1"}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_comparison_integer() -> None:
comp = Comparison(comparator=Comparator.GTE, attribute="foo", value=1)
expected = {"operator": "GreaterThanEqual", "path": ["foo"], "valueInt": 1}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_comparison_number() -> None:
comp = Comparison(comparator=Comparator.GT, attribute="foo", value=1.4)
expected = {"operator": "GreaterThan", "path": ["foo"], "valueNumber": 1.4}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_comparison_boolean() -> None:
comp = Comparison(comparator=Comparator.NE, attribute="foo", value=False)
expected = {"operator": "NotEqual", "path": ["foo"], "valueBoolean": False}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_comparison_datetime() -> None:
comp = Comparison(
comparator=Comparator.LTE,
attribute="foo",
value=datetime(2023, 9, 13, 4, 20, 0),
)
expected = {
"operator": "LessThanEqual",
"path": ["foo"],
"valueDate": "2023-09-13T04:20:00Z",
}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_comparison_date() -> None:
comp = Comparison(
comparator=Comparator.LT, attribute="foo", value=date(2023, 9, 13)
)
expected = {
"operator": "LessThan",
"path": ["foo"],
"valueDate": "2023-09-13T00:00:00Z",
}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_operation() -> None:
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.EQ, attribute="foo", value="hello"),
Comparison(
comparator=Comparator.GTE, attribute="bar", value=date(2023, 9, 13)
),
Comparison(comparator=Comparator.LTE, attribute="abc", value=1.4),
],
)
expected = {
"operands": [
{"operator": "Equal", "path": ["foo"], "valueText": "hello"},
{
"operator": "GreaterThanEqual",
"path": ["bar"],
"valueDate": "2023-09-13T00:00:00Z",
},
{"operator": "LessThanEqual", "path": ["abc"], "valueNumber": 1.4},
],
"operator": "And",
}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
def test_visit_structured_query() -> None:
query = "What is the capital of France?"
structured_query = StructuredQuery(
query=query,
filter=None,
)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.EQ, attribute="foo", value="1")
structured_query = StructuredQuery(
query=query,
filter=comp,
)
expected = (
query,
{"where_filter": {"path": ["foo"], "operator": "Equal", "valueText": "1"}},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.EQ, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
],
)
structured_query = StructuredQuery(
query=query,
filter=op,
)
expected = (
query,
{
"where_filter": {
"operator": "And",
"operands": [
{"path": ["foo"], "operator": "Equal", "valueInt": 2},
{"path": ["bar"], "operator": "Equal", "valueText": "baz"},
],
}
},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~modal.py | import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
logger = logging.getLogger(__name__)
class Modal(LLM):
"""Modal large language models.
To use, you should have the ``modal-client`` python package installed.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import Modal
modal = Modal(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "modal"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Modal endpoint."""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response = requests.post(
url=self.endpoint_url,
headers={
"Content-Type": "application/json",
},
json={"prompt": prompt, **params},
)
try:
if prompt in response.json()["prompt"]:
response_json = response.json()
except KeyError:
raise KeyError("LangChain requires 'prompt' key in response.")
text = response_json["prompt"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~knn.py | """KNN Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
class KNNRetriever(BaseRetriever):
"""`KNN` retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
index: Any
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""
k: int = 4
"""Number of results to return."""
relevancy_threshold: Optional[float] = None
"""Threshold for relevancy."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> KNNRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_embeds = np.array(self.embeddings.embed_query(query))
# calc L2 norm
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
similarities = index_embeds.dot(query_embeds)
sorted_ix = np.argsort(-similarities)
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = [
Document(page_content=self.texts[row])
for row in sorted_ix[0 : self.k]
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
)
]
return top_k_results
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~graph_qa~sparql.py | """
Question answering over an RDF or OWL graph using SPARQL.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
SPARQL_GENERATION_SELECT_PROMPT,
SPARQL_GENERATION_UPDATE_PROMPT,
SPARQL_INTENT_PROMPT,
SPARQL_QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.graphs.rdf_graph import RdfGraph
from langchain.prompts.base import BasePromptTemplate
from langchain.pydantic_v1 import Field
from langchain.schema.language_model import BaseLanguageModel
class GraphSparqlQAChain(Chain):
"""Question-answering against an RDF or OWL graph by generating SPARQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: RdfGraph = Field(exclude=True)
sparql_generation_select_chain: LLMChain
sparql_generation_update_chain: LLMChain
sparql_intent_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
_output_keys = [self.output_key]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT,
sparql_select_prompt: BasePromptTemplate = SPARQL_GENERATION_SELECT_PROMPT,
sparql_update_prompt: BasePromptTemplate = SPARQL_GENERATION_UPDATE_PROMPT,
sparql_intent_prompt: BasePromptTemplate = SPARQL_INTENT_PROMPT,
**kwargs: Any,
) -> GraphSparqlQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt)
sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt)
sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt)
return cls(
qa_chain=qa_chain,
sparql_generation_select_chain=sparql_generation_select_chain,
sparql_generation_update_chain=sparql_generation_update_chain,
sparql_intent_chain=sparql_intent_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Generate SPARQL query, use it to retrieve a response from the gdb and answer
the question.
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
prompt = inputs[self.input_key]
_intent = self.sparql_intent_chain.run({"prompt": prompt}, callbacks=callbacks)
intent = _intent.strip()
if "SELECT" in intent and "UPDATE" not in intent:
sparql_generation_chain = self.sparql_generation_select_chain
intent = "SELECT"
elif "UPDATE" in intent and "SELECT" not in intent:
sparql_generation_chain = self.sparql_generation_update_chain
intent = "UPDATE"
else:
raise ValueError(
"I am sorry, but this prompt seems to fit none of the currently "
"supported SPARQL query types, i.e., SELECT and UPDATE."
)
_run_manager.on_text("Identified intent:", end="\n", verbose=self.verbose)
_run_manager.on_text(intent, color="green", end="\n", verbose=self.verbose)
generated_sparql = sparql_generation_chain.run(
{"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks
)
_run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose)
_run_manager.on_text(
generated_sparql, color="green", end="\n", verbose=self.verbose
)
if intent == "SELECT":
context = self.graph.query(generated_sparql)
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(context), color="green", end="\n", verbose=self.verbose
)
result = self.qa_chain(
{"prompt": prompt, "context": context},
callbacks=callbacks,
)
res = result[self.qa_chain.output_key]
elif intent == "UPDATE":
self.graph.update(generated_sparql)
res = "Successfully inserted triples into the graph."
else:
raise ValueError("Unsupported SPARQL query type.")
return {self.output_key: res}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~office365~events_search.py | """Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Extra, Field
from langchain.tools.office365.base import O365BaseTool
from langchain.tools.office365.utils import clean_body
class SearchEventsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
start_datetime: str = Field(
description=(
" The start datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
end_datetime: str = Field(
description=(
" The end datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the event's body is truncated to meet token number limits. Set to "
"False for searches that will retrieve very few results, otherwise, set to "
"True."
),
)
class O365SearchEvents(O365BaseTool):
"""Class for searching calendar events in Office 365
Free, but setup is required
"""
name: str = "events_search"
args_schema: Type[BaseModel] = SearchEventsInput
description: str = (
" Use this tool to search for the user's calendar events."
" The input must be the start and end datetimes for the search query."
" The output is a JSON list of all the events in the user's calendar"
" between the start and end times. You can assume that the user can "
" not schedule any meeting over existing meetings, and that the user "
"is busy during meetings. Any times without events are free for the user. "
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
TRUNCATE_LIMIT = 150
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
# Process the date range parameters
start_datetime_query = dt.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z")
end_datetime_query = dt.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z")
# Run the query
q = calendar.new_query("start").greater_equal(start_datetime_query)
q.chain("and").on_attribute("end").less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results)
# Generate output dict
output_events = []
for event in events:
output_event = {}
output_event["organizer"] = event.organizer
output_event["subject"] = event.subject
if truncate:
output_event["body"] = clean_body(event.body)[:TRUNCATE_LIMIT]
else:
output_event["body"] = clean_body(event.body)
# Get the time zone from the search parameters
time_zone = start_datetime_query.tzinfo
# Assign the datetimes in the search time zone
output_event["start_datetime"] = event.start.astimezone(time_zone).strftime(
"%Y-%m-%dT%H:%M:%S%z"
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
"%Y-%m-%dT%H:%M:%S%z"
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime("%Y-%m-%dT%H:%M:%S%z")
output_events.append(output_event)
return output_events
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~roam.py | from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class RoamLoader(BaseLoader):
"""Load `Roam` files from a directory."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~deeplake.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
try:
import deeplake
from deeplake.core.fast_forwarding import version_compare
from deeplake.core.vectorstore import DeepLakeVectorStore
_DEEPLAKE_INSTALLED = True
except ImportError:
_DEEPLAKE_INSTALLED = False
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class DeepLake(VectorStore):
"""`Activeloop Deep Lake` vector store.
We integrated deeplake's similarity search and filtering for fast prototyping.
Now, it supports Tensor Query Language (TQL) for production use cases
over billion rows.
Why Deep Lake?
- Not only stores embeddings, but also the original data with version control.
- Serverless, doesn't require another service and can be used with major
cloud providers (S3, GCS, etc.)
- More than just a multi-modal vector store. You can use the dataset
to fine-tune your own LLM models.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "./deeplake/"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding: Optional[Embeddings] = None,
embedding_function: Optional[Embeddings] = None,
read_only: bool = False,
ingestion_batch_size: int = 1000,
num_workers: int = 0,
verbose: bool = True,
exec_option: Optional[str] = None,
runtime: Optional[Dict] = None,
**kwargs: Any,
) -> None:
"""Creates an empty DeepLakeVectorStore or loads an existing one.
The DeepLakeVectorStore is located at the specified ``path``.
Examples:
>>> # Create a vector store with default tensors
>>> deeplake_vectorstore = DeepLake(
... path = <path_for_storing_Data>,
... )
>>>
>>> # Create a vector store in the Deep Lake Managed Tensor Database
>>> data = DeepLake(
... path = "hub://org_id/dataset_name",
... runtime = {"tensor_db": True},
... )
Args:
dataset_path (str): Path to existing dataset or where to create
a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH.
token (str, optional): Activeloop token, for fetching credentials
to the dataset at path if it is a Deep Lake dataset.
Tokens are normally autogenerated. Optional.
embedding (Embeddings, optional): Function to convert
either documents or query. Optional.
embedding_function (Embeddings, optional): Function to convert
either documents or query. Optional. Deprecated: keeping this
parameter for backwards compatibility.
read_only (bool): Open dataset in read-only mode. Default is False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch.
Default is 1000.
num_workers (int): Number of workers to use during data ingestion.
Default is 0.
verbose (bool): Print dataset summary after each operation.
Default is True.
exec_option (str, optional): DeepLakeVectorStore supports 3 ways to perform
searching - "python", "compute_engine", "tensor_db" and auto.
Default is None.
- ``auto``- Selects the best execution method based on the storage
location of the Vector Store. It is the default option.
- ``python`` - Pure-python implementation that runs on the client.
WARNING: using this with big datasets can lead to memory
issues. Data can be stored anywhere.
- ``compute_engine`` - C++ implementation of the Deep Lake Compute
Engine that runs on the client. Can be used for any data stored in
or connected to Deep Lake. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database that is
responsible for storage and query execution. Only for data stored in
the Deep Lake Managed Database. Use runtime = {"db_engine": True}
during dataset creation.
runtime (Dict, optional): Parameters for creating the Vector Store in
Deep Lake's Managed Tensor Database. Not applicable when loading an
existing Vector Store. To create a Vector Store in the Managed Tensor
Database, set `runtime = {"tensor_db": True}`.
**kwargs: Other optional keyword arguments.
Raises:
ValueError: If some condition is not met.
"""
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.verbose = verbose
if _DEEPLAKE_INSTALLED is False:
raise ImportError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake[enterprise]`."
)
if (
runtime == {"tensor_db": True}
and version_compare(deeplake.__version__, "3.6.7") == -1
):
raise ImportError(
"To use tensor_db option you need to update deeplake to `3.6.7` or "
"higher. "
f"Currently installed deeplake version is {deeplake.__version__}. "
)
self.dataset_path = dataset_path
if embedding_function:
logger.warning(
"Using embedding function is deprecated and will be removed "
"in the future. Please use embedding instead."
)
self.vectorstore = DeepLakeVectorStore(
path=self.dataset_path,
embedding_function=embedding_function or embedding,
read_only=read_only,
token=token,
exec_option=exec_option,
verbose=verbose,
runtime=runtime,
**kwargs,
)
self._embedding_function = embedding_function or embedding
self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id"
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Examples:
>>> ids = deeplake_vectorstore.add_texts(
... texts = <list_of_texts>,
... metadatas = <list_of_metadata_jsons>,
... ids = <list_of_ids>,
... )
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
embedding_function (Optional[Embeddings], optional): Embedding function
to use to convert the text into embeddings.
**kwargs (Any): Any additional keyword arguments passed is not supported
by this method.
Returns:
List[str]: List of IDs of the added texts.
"""
if kwargs:
unsupported_items = "`, `".join(set(kwargs.keys()))
raise TypeError(
f"`{unsupported_items}` is/are not a valid argument to add_text method"
)
kwargs = {}
if ids:
if self._id_tensor_name == "ids": # for backwards compatibility
kwargs["ids"] = ids
else:
kwargs["id"] = ids
if metadatas is None:
metadatas = [{}] * len(list(texts))
if not isinstance(texts, list):
texts = list(texts)
if texts is None:
raise ValueError("`texts` parameter shouldn't be None.")
elif len(texts) == 0:
raise ValueError("`texts` parameter shouldn't be empty.")
return self.vectorstore.add(
text=texts,
metadata=metadatas,
embedding_data=texts,
embedding_tensor="embedding",
embedding_function=self._embedding_function.embed_documents, # type: ignore
return_ids=True,
**kwargs,
)
def _search_tql(
self,
tql: Optional[str],
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Function for performing tql_search.
Args:
tql (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(
query=tql,
exec_option=exec_option,
)
metadatas = result["metadata"]
texts = result["text"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if kwargs:
unsupported_argument = next(iter(kwargs))
if kwargs[unsupported_argument] is not False:
raise ValueError(
f"specifying {unsupported_argument} is "
"not supported with tql search."
)
return docs
def _search(
self,
query: Optional[str] = None,
embedding: Optional[Union[List[float], np.ndarray]] = None,
embedding_function: Optional[Callable] = None,
k: int = 4,
distance_metric: str = "L2",
use_maximal_marginal_relevance: bool = False,
fetch_k: Optional[int] = 20,
filter: Optional[Union[Dict, Callable]] = None,
return_score: bool = False,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""
Return docs similar to query.
Args:
query (str, optional): Text to look up similar docs.
embedding (Union[List[float], np.ndarray], optional): Query's embedding.
embedding_function (Callable, optional): Function to convert `query`
into embedding.
k (int): Number of Documents to return.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max`
for L-infinity distance, `cos` for cosine similarity, 'dot' for dot
product.
filter (Union[Dict, Callable], optional): Additional filter prior
to the embedding search.
- ``Dict`` - Key-value search on tensors of htype json, on an
AND basis (a sample must satisfy all key-value filters to be True)
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with `deeplake.filter`.
use_maximal_marginal_relevance (bool): Use maximal marginal relevance.
fetch_k (int): Number of Documents for MMR algorithm.
return_score (bool): Return the score.
exec_option (str, optional): Supports 3 ways to perform searching.
Could be "python", "compute_engine" or "tensor_db".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List of Documents by the specified distance metric,
if return_score True, return a tuple of (Document, score)
Raises:
ValueError: if both `embedding` and `embedding_function` are not specified.
"""
if kwargs.get("tql"):
return self._search_tql(
tql=kwargs["tql"],
exec_option=exec_option,
return_score=return_score,
embedding=embedding,
embedding_function=embedding_function,
distance_metric=distance_metric,
use_maximal_marginal_relevance=use_maximal_marginal_relevance,
filter=filter,
)
if embedding_function:
if isinstance(embedding_function, Embeddings):
_embedding_function = embedding_function.embed_query
else:
_embedding_function = embedding_function
elif self._embedding_function:
_embedding_function = self._embedding_function.embed_query
else:
_embedding_function = None
if embedding is None:
if _embedding_function is None:
raise ValueError(
"Either `embedding` or `embedding_function` needs to be"
" specified."
)
embedding = _embedding_function(query) if query else None
if isinstance(embedding, list):
embedding = np.array(embedding, dtype=np.float32)
if len(embedding.shape) > 1:
embedding = embedding[0]
result = self.vectorstore.search(
embedding=embedding,
k=fetch_k if use_maximal_marginal_relevance else k,
distance_metric=distance_metric,
filter=filter,
exec_option=exec_option,
return_tensors=["embedding", "metadata", "text"],
)
scores = result["score"]
embeddings = result["embedding"]
metadatas = result["metadata"]
texts = result["text"]
if use_maximal_marginal_relevance:
lambda_mult = kwargs.get("lambda_mult", 0.5)
indices = maximal_marginal_relevance( # type: ignore
embedding, # type: ignore
embeddings,
k=min(k, len(texts)),
lambda_mult=lambda_mult,
)
scores = [scores[i] for i in indices]
texts = [texts[i] for i in indices]
metadatas = [metadatas[i] for i in indices]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search(
... query=<your_query>,
... k=<num_items>,
... exec_option=<preferred_exec_option>,
... )
>>> # Run tql search:
>>> data = vector_store.similarity_search(
... query=None,
... tql="SELECT * WHERE id == <id>",
... exec_option="compute_engine",
... )
Args:
k (int): Number of Documents to return. Defaults to 4.
query (str): Text to look up similar documents.
**kwargs: Additional keyword arguments include:
embedding (Callable): Embedding function to use. Defaults to None.
distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max'
for L-infinity, 'cos' for cosine, 'dot' for dot product.
Defaults to 'L2'.
filter (Union[Dict, Callable], optional): Additional filter
before embedding search.
- Dict: Key-value search on tensors of htype json,
(sample must satisfy all key-value filters)
Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}}
- Function: Compatible with `deeplake.filter`.
Defaults to None.
exec_option (str): Supports 3 ways to perform searching.
'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'.
- 'python': Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- 'compute_engine': C++ implementation of the Compute Engine for
the client. Not for in-memory or local datasets.
- 'tensor_db': Managed Tensor Database for storage and query.
Only for data in Deep Lake Managed Database.
Use `runtime = {"db_engine": True}` during dataset creation.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
query=query,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: Union[List[float], np.ndarray],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
embedding=embedding,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(
query=query,
k=k,
return_score=True,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(
embedding=embedding,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
**kwargs,
)
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Examples:
>>> # Search using an embedding
>>> data = vector_store.max_marginal_relevance_search(
... query = <query_to_search>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents for MMR algorithm.
lambda_mult: Value between 0 and 1. 0 corresponds
to maximum diversity and 1 to minimum.
Defaults to 0.5.
exec_option (str): Supports 3 ways to perform searching.
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database. To store
datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
**kwargs: Additional keyword arguments
Returns:
List of Documents selected by maximal marginal relevance.
Raises:
ValueError: when MRR search is on but embedding function is
not specified.
"""
embedding_function = kwargs.get("embedding") or self._embedding_function
if embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on"
" `creation` or during add call."
)
return self._search(
query=query,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
embedding_function=embedding_function, # type: ignore
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding, **kwargs)
deeplake_dataset.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
)
return deeplake_dataset
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
"""
filter = kwargs.get("filter")
delete_all = kwargs.get("delete_all")
self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all)
return True
@classmethod
def force_delete_by_path(cls, path: str) -> None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
deeplake.delete(path, large_ok=True, force=True)
def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
def ds(self) -> Any:
logger.warning(
"this method is deprecated and will be removed, "
"better to use `db.vectorstore.dataset` instead."
)
return self.vectorstore.dataset
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~output_parsers~test_xml_parser.py | """Test XMLOutputParser"""
import pytest
from langchain.output_parsers.xml import XMLOutputParser
DEF_RESULT_ENCODING = """<?xml version="1.0" encoding="UTF-8"?>
<foo>
<bar>
<baz></baz>
<baz>slim.shady</baz>
</bar>
<baz>tag</baz>
</foo>"""
DEF_RESULT_EXPECTED = {
"foo": [
{"bar": [{"baz": None}, {"baz": "slim.shady"}]},
{"baz": "tag"},
],
}
@pytest.mark.parametrize(
"result",
[DEF_RESULT_ENCODING, DEF_RESULT_ENCODING[DEF_RESULT_ENCODING.find("\n") :]],
)
def test_xml_output_parser(result: str) -> None:
"""Test XMLOutputParser."""
xml_parser = XMLOutputParser()
xml_result = xml_parser.parse(result)
assert DEF_RESULT_EXPECTED == xml_result
@pytest.mark.parametrize("result", ["foo></foo>", "<foo></foo", "foo></foo", "foofoo"])
def test_xml_output_parser_fail(result: str) -> None:
"""Test XMLOutputParser where complete output is not in XML format."""
xml_parser = XMLOutputParser()
with pytest.raises(ValueError) as e:
xml_parser.parse(result)
assert "Could not parse output" in str(e)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~graph_qa~arangodb.py | """Question answering over a graph."""
from __future__ import annotations
import re
from typing import Any, Dict, List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.graph_qa.prompts import (
AQL_FIX_PROMPT,
AQL_GENERATION_PROMPT,
AQL_QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.graphs.arangodb_graph import ArangoGraph
from langchain.pydantic_v1 import Field
from langchain.schema import BasePromptTemplate
class ArangoGraphQAChain(Chain):
"""Chain for question-answering against a graph by generating AQL statements.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
graph: ArangoGraph = Field(exclude=True)
aql_generation_chain: LLMChain
aql_fix_chain: LLMChain
qa_chain: LLMChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
# Specifies the maximum number of AQL Query Results to return
top_k: int = 10
# Specifies the set of AQL Query Examples that promote few-shot-learning
aql_examples: str = ""
# Specify whether to return the AQL Query in the output dictionary
return_aql_query: bool = False
# Specify whether to return the AQL JSON Result in the output dictionary
return_aql_result: bool = False
# Specify the maximum amount of AQL Generation attempts that should be made
max_aql_generation_attempts: int = 3
@property
def input_keys(self) -> List[str]:
return [self.input_key]
@property
def output_keys(self) -> List[str]:
return [self.output_key]
@property
def _chain_type(self) -> str:
return "graph_aql_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
qa_prompt: BasePromptTemplate = AQL_QA_PROMPT,
aql_generation_prompt: BasePromptTemplate = AQL_GENERATION_PROMPT,
aql_fix_prompt: BasePromptTemplate = AQL_FIX_PROMPT,
**kwargs: Any,
) -> ArangoGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt)
aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt)
return cls(
qa_chain=qa_chain,
aql_generation_chain=aql_generation_chain,
aql_fix_chain=aql_fix_chain,
**kwargs,
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""
Generate an AQL statement from user input, use it retrieve a response
from an ArangoDB Database instance, and respond to the user input
in natural language.
Users can modify the following ArangoGraphQAChain Class Variables:
:var top_k: The maximum number of AQL Query Results to return
:type top_k: int
:var aql_examples: A set of AQL Query Examples that are passed to
the AQL Generation Prompt Template to promote few-shot-learning.
Defaults to an empty string.
:type aql_examples: str
:var return_aql_query: Whether to return the AQL Query in the
output dictionary. Defaults to False.
:type return_aql_query: bool
:var return_aql_result: Whether to return the AQL Query in the
output dictionary. Defaults to False
:type return_aql_result: bool
:var max_aql_generation_attempts: The maximum amount of AQL
Generation attempts to be made prior to raising the last
AQL Query Execution Error. Defaults to 3.
:type max_aql_generation_attempts: int
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
user_input = inputs[self.input_key]
#########################
# Generate AQL Query #
aql_generation_output = self.aql_generation_chain.run(
{
"adb_schema": self.graph.schema,
"aql_examples": self.aql_examples,
"user_input": user_input,
},
callbacks=callbacks,
)
#########################
aql_query = ""
aql_error = ""
aql_result = None
aql_generation_attempt = 1
while (
aql_result is None
and aql_generation_attempt < self.max_aql_generation_attempts + 1
):
#####################
# Extract AQL Query #
pattern = r"```(?i:aql)?(.*?)```"
matches = re.findall(pattern, aql_generation_output, re.DOTALL)
if not matches:
_run_manager.on_text(
"Invalid Response: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_generation_output, color="red", end="\n", verbose=self.verbose
)
raise ValueError(f"Response is Invalid: {aql_generation_output}")
aql_query = matches[0]
#####################
_run_manager.on_text(
f"AQL Query ({aql_generation_attempt}):", verbose=self.verbose
)
_run_manager.on_text(
aql_query, color="green", end="\n", verbose=self.verbose
)
#####################
# Execute AQL Query #
from arango import AQLQueryExecuteError
try:
aql_result = self.graph.query(aql_query, self.top_k)
except AQLQueryExecuteError as e:
aql_error = e.error_message
_run_manager.on_text(
"AQL Query Execution Error: ", end="\n", verbose=self.verbose
)
_run_manager.on_text(
aql_error, color="yellow", end="\n\n", verbose=self.verbose
)
########################
# Retry AQL Generation #
aql_generation_output = self.aql_fix_chain.run(
{
"adb_schema": self.graph.schema,
"aql_query": aql_query,
"aql_error": aql_error,
},
callbacks=callbacks,
)
########################
#####################
aql_generation_attempt += 1
if aql_result is None:
m = f"""
Maximum amount of AQL Query Generation attempts reached.
Unable to execute the AQL Query due to the following error:
{aql_error}
"""
raise ValueError(m)
_run_manager.on_text("AQL Result:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(aql_result), color="green", end="\n", verbose=self.verbose
)
########################
# Interpret AQL Result #
result = self.qa_chain(
{
"adb_schema": self.graph.schema,
"user_input": user_input,
"aql_query": aql_query,
"aql_result": aql_result,
},
callbacks=callbacks,
)
########################
# Return results #
result = {self.output_key: result[self.qa_chain.output_key]}
if self.return_aql_query:
result["aql_query"] = aql_query
if self.return_aql_result:
result["aql_result"] = aql_result
return result
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~memory~test_xata.py | """Test Xata chat memory store functionality.
Before running this test, please create a Xata database.
"""
import json
import os
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import XataChatMessageHistory
from langchain.schema.messages import _message_to_dict
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_xata_chat_memory(self) -> None:
message_history = XataChatMessageHistory(
api_key=os.getenv("XATA_API_KEY", ""),
db_url=os.getenv("XATA_DB_URL", ""),
session_id="integration-test-session",
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Redis, so the next test run won't pick it up
memory.chat_memory.clear()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~chat_message_histories~cassandra.py | """Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import typing
from typing import List
if typing.TYPE_CHECKING:
from cassandra.cluster import Session
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Cassandra.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: a Cassandra `Session` object (an open DB connection)
keyspace: name of the keyspace to use.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
"""
def __init__(
self,
session_id: str,
session: Session,
keyspace: str,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: int | None = DEFAULT_TTL_SECONDS,
) -> None:
try:
from cassio.history import StoredBlobHistory
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
self.blob_history = StoredBlobHistory(session, keyspace, table_name)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
message_blobs = self.blob_history.retrieve(
self.session_id,
)
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table"""
self.blob_history.store(
self.session_id, json.dumps(_message_to_dict(message)), self.ttl_seconds
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.blob_history.clear_session_id(self.session_id)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~chat_history.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
class BaseChatMessageHistory(ABC):
"""Abstract base class for storing chat message history.
See `ChatMessageHistory` for default implementation.
Example:
.. code-block:: python
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
@property
def messages(self):
with open(os.path.join(storage_path, session_id), 'r:utf-8') as f:
messages = json.loads(f.read())
return messages_from_dict(messages)
def add_message(self, message: BaseMessage) -> None:
messages = self.messages.append(_message_to_dict(message))
with open(os.path.join(storage_path, session_id), 'w') as f:
json.dump(f, messages)
def clear(self):
with open(os.path.join(storage_path, session_id), 'w') as f:
f.write("[]")
"""
messages: List[BaseMessage]
"""A list of Messages stored in-memory."""
def add_user_message(self, message: str) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
"""
self.add_message(HumanMessage(content=message))
def add_ai_message(self, message: str) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
"""
self.add_message(AIMessage(content=message))
@abstractmethod
def add_message(self, message: BaseMessage) -> None:
"""Add a Message object to the store.
Args:
message: A BaseMessage object to store.
"""
raise NotImplementedError()
@abstractmethod
def clear(self) -> None:
"""Remove all messages from the store"""
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~smith~evaluation~string_run_evaluator.py | """Run evaluator wrapper for string evaluators."""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from langsmith import EvaluationResult, RunEvaluator
from langsmith.schemas import DataType, Example, Run
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.evaluation.schema import StringEvaluator
from langchain.load.dump import dumpd
from langchain.load.load import load
from langchain.load.serializable import Serializable
from langchain.schema import RUN_KEY, messages_from_dict
from langchain.schema.messages import BaseMessage, get_buffer_string
def _get_messages_from_run_dict(messages: List[dict]) -> List[BaseMessage]:
if not messages:
return []
first_message = messages[0]
if "lc" in first_message:
return [load(dumpd(message)) for message in messages]
else:
return messages_from_dict(messages)
class StringRunMapper(Serializable):
"""Extract items to evaluate from the run object."""
@property
def output_keys(self) -> List[str]:
"""The keys to extract from the run."""
return ["prediction", "input"]
@abstractmethod
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
def __call__(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(f"Run {run.id} has no outputs to evaluate.")
return self.map(run)
class LLMStringRunMapper(StringRunMapper):
"""Extract items to evaluate from the run object."""
def serialize_chat_messages(self, messages: List[Dict]) -> str:
"""Extract the input messages from the run."""
if isinstance(messages, list) and messages:
if isinstance(messages[0], dict):
chat_messages = _get_messages_from_run_dict(messages)
elif isinstance(messages[0], list):
# Runs from Tracer have messages as a list of lists of dicts
chat_messages = _get_messages_from_run_dict(messages[0])
else:
raise ValueError(f"Could not extract messages to evaluate {messages}")
return get_buffer_string(chat_messages)
raise ValueError(f"Could not extract messages to evaluate {messages}")
def serialize_inputs(self, inputs: Dict) -> str:
if "prompts" in inputs: # Should we even accept this?
input_ = "\n\n".join(inputs["prompts"])
elif "prompt" in inputs:
input_ = inputs["prompt"]
elif "messages" in inputs:
input_ = self.serialize_chat_messages(inputs["messages"])
else:
raise ValueError("LLM Run must have either messages or prompts as inputs.")
return input_
def serialize_outputs(self, outputs: Dict) -> str:
if not outputs.get("generations"):
raise ValueError("Cannot evaluate LLM Run without generations.")
generations: List[Dict] = outputs["generations"]
if not generations:
raise ValueError("Cannot evaluate LLM run with empty generations.")
first_generation: Dict = generations[0]
if isinstance(first_generation, list):
# Runs from Tracer have generations as a list of lists of dicts
# Whereas Runs from the API have a list of dicts
first_generation = first_generation[0]
if "message" in first_generation:
output_ = self.serialize_chat_messages([first_generation["message"]])
else:
output_ = first_generation["text"]
return output_
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if run.run_type != "llm":
raise ValueError("LLM RunMapper only supports LLM runs.")
elif not run.outputs:
if run.error:
raise ValueError(
f"Cannot evaluate errored LLM run {run.id}: {run.error}"
)
else:
raise ValueError(
f"Run {run.id} has no outputs. Cannot evaluate this run."
)
else:
try:
inputs = self.serialize_inputs(run.inputs)
except Exception as e:
raise ValueError(
f"Could not parse LM input from run inputs {run.inputs}"
) from e
try:
output_ = self.serialize_outputs(run.outputs)
except Exception as e:
raise ValueError(
f"Could not parse LM prediction from run outputs {run.outputs}"
) from e
return {"input": inputs, "prediction": output_}
class ChainStringRunMapper(StringRunMapper):
"""Extract items to evaluate from the run object from a chain."""
input_key: Optional[str] = None
"""The key from the model Run's inputs to use as the eval input.
If not provided, will use the only input key or raise an
error if there are multiple."""
prediction_key: Optional[str] = None
"""The key from the model Run's outputs to use as the eval prediction.
If not provided, will use the only output key or raise an error
if there are multiple."""
def _get_key(self, source: Dict, key: Optional[str], which: str) -> str:
if key is not None:
return source[key]
elif len(source) == 1:
return next(iter(source.values()))
else:
raise ValueError(
f"Could not map run {which} with multiple keys: "
f"{source}\nPlease manually specify a {which}_key"
)
def map(self, run: Run) -> Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(
f"Run with ID {run.id} lacks outputs required for evaluation."
" Ensure the Run has valid outputs."
)
if self.input_key is not None and self.input_key not in run.inputs:
raise ValueError(
f"Run with ID {run.id} is missing the expected input key"
f" '{self.input_key}'.\nAvailable input keys in this Run"
f" are: {run.inputs.keys()}.\nAdjust the evaluator's"
f" input_key or ensure your input data includes key"
f" '{self.input_key}'."
)
elif self.prediction_key is not None and self.prediction_key not in run.outputs:
available_keys = ", ".join(run.outputs.keys())
raise ValueError(
f"Run with ID {run.id} doesn't have the expected prediction key"
f" '{self.prediction_key}'. Available prediction keys in this Run are:"
f" {available_keys}. Adjust the evaluator's prediction_key or"
" ensure the Run object's outputs the expected key."
)
else:
input_ = self._get_key(run.inputs, self.input_key, "input")
prediction = self._get_key(run.outputs, self.prediction_key, "prediction")
return {
"input": input_,
"prediction": prediction,
}
class ToolStringRunMapper(StringRunMapper):
"""Map an input to the tool."""
def map(self, run: Run) -> Dict[str, str]:
if not run.outputs:
raise ValueError(f"Run {run.id} has no outputs to evaluate.")
return {"input": run.inputs["input"], "prediction": run.outputs["output"]}
class StringExampleMapper(Serializable):
"""Map an example, or row in the dataset, to the inputs of an evaluation."""
reference_key: Optional[str] = None
@property
def output_keys(self) -> List[str]:
"""The keys to extract from the run."""
return ["reference"]
def serialize_chat_messages(self, messages: List[Dict]) -> str:
"""Extract the input messages from the run."""
chat_messages = _get_messages_from_run_dict(messages)
return get_buffer_string(chat_messages)
def map(self, example: Example) -> Dict[str, str]:
"""Maps the Example, or dataset row to a dictionary."""
if not example.outputs:
raise ValueError(
f"Example {example.id} has no outputs to use as a reference."
)
if self.reference_key is None:
if len(example.outputs) > 1:
raise ValueError(
f"Example {example.id} has multiple outputs, so you must"
" specify a reference_key."
)
else:
output = list(example.outputs.values())[0]
elif self.reference_key not in example.outputs:
raise ValueError(
f"Example {example.id} does not have reference key"
f" {self.reference_key}."
)
else:
output = example.outputs[self.reference_key]
return {
"reference": self.serialize_chat_messages([output])
if isinstance(output, dict) and output.get("type") and output.get("data")
else str(output)
}
def __call__(self, example: Example) -> Dict[str, str]:
"""Maps the Run and Example to a dictionary."""
if not example.outputs:
raise ValueError(
f"Example {example.id} has no outputs to use as areference label."
)
return self.map(example)
class StringRunEvaluatorChain(Chain, RunEvaluator):
"""Evaluate Run and optional examples."""
run_mapper: StringRunMapper
"""Maps the Run to a dictionary with 'input' and 'prediction' strings."""
example_mapper: Optional[StringExampleMapper] = None
"""Maps the Example (dataset row) to a dictionary
with a 'reference' string."""
name: str
"""The name of the evaluation metric."""
string_evaluator: StringEvaluator
"""The evaluation chain."""
@property
def input_keys(self) -> List[str]:
return ["run", "example"]
@property
def output_keys(self) -> List[str]:
return ["feedback"]
def _prepare_input(self, inputs: Dict[str, Any]) -> Dict[str, str]:
run: Run = inputs["run"]
example: Optional[Example] = inputs.get("example")
evaluate_strings_inputs = self.run_mapper(run)
if not self.string_evaluator.requires_input:
# Hide warning about unused input
evaluate_strings_inputs.pop("input", None)
if example and self.example_mapper and self.string_evaluator.requires_reference:
evaluate_strings_inputs.update(self.example_mapper(example))
elif self.string_evaluator.requires_reference:
raise ValueError(
f"Evaluator {self.name} requires an reference"
" example from the dataset,"
f" but none was provided for run {run.id}."
)
return evaluate_strings_inputs
def _prepare_output(self, output: Dict[str, Any]) -> Dict[str, Any]:
evaluation_result = EvaluationResult(
key=self.name, comment=output.get("reasoning"), **output
)
if RUN_KEY in output:
# TODO: Not currently surfaced. Update
evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY]
return {"feedback": evaluation_result}
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = self.string_evaluator.evaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
async def _acall(
self,
inputs: Dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = await self.string_evaluator.aevaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
def _prepare_evaluator_output(self, output: Dict[str, Any]) -> EvaluationResult:
feedback: EvaluationResult = output["feedback"]
if RUN_KEY not in feedback.evaluator_info:
feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
return feedback
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = self({"run": run, "example": example}, include_run_info=True)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
# TODO: Add run ID once we can declare it via callbacks
)
async def aevaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = await self.acall(
{"run": run, "example": example}, include_run_info=True
)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
)
@classmethod
def from_run_and_data_type(
cls,
evaluator: StringEvaluator,
run_type: str,
data_type: DataType,
input_key: Optional[str] = None,
prediction_key: Optional[str] = None,
reference_key: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> StringRunEvaluatorChain:
"""
Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator (StringEvaluator): The string evaluator to use.
run_type (str): The type of run being evaluated.
Supported types are LLM and Chain.
data_type (DataType): The type of dataset used in the run.
input_key (str, optional): The key used to map the input from the run.
prediction_key (str, optional): The key used to map the prediction from the run.
reference_key (str, optional): The key used to map the reference from the dataset.
tags (List[str], optional): List of tags to attach to the evaluation chain.
Returns:
StringRunEvaluatorChain: The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
""" # noqa: E501
# Configure how run inputs/predictions are passed to the evaluator
if run_type == "llm":
run_mapper: StringRunMapper = LLMStringRunMapper()
elif run_type == "chain":
run_mapper = ChainStringRunMapper(
input_key=input_key, prediction_key=prediction_key
)
else:
raise ValueError(
f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'."
)
# Configure how example rows are fed as a reference string to the evaluator
if (
reference_key is not None
or data_type in (DataType.llm, DataType.chat)
or evaluator.requires_reference
):
example_mapper = StringExampleMapper(reference_key=reference_key)
elif evaluator.requires_reference:
raise ValueError(
f"Evaluator {evaluator.evaluation_name} requires a reference"
" example from the dataset. Please specify the reference key from"
" amongst the dataset outputs keys."
)
else:
example_mapper = None
return cls(
name=evaluator.evaluation_name,
run_mapper=run_mapper,
example_mapper=example_mapper,
string_evaluator=evaluator,
tags=tags,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~labelstudio_callback.py | import os
import warnings
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
ChatMessage,
Generation,
LLMResult,
)
class LabelStudioMode(Enum):
"""Label Studio mode enumerator."""
PROMPT = "prompt"
CHAT = "chat"
def get_default_label_configs(
mode: Union[str, LabelStudioMode]
) -> Tuple[str, LabelStudioMode]:
"""Get default Label Studio configs for the given mode.
Parameters:
mode: Label Studio mode ("prompt" or "chat")
Returns: Tuple of Label Studio config and mode
"""
_default_label_configs = {
LabelStudioMode.PROMPT.value: """
<View>
<Style>
.prompt-box {
background-color: white;
border-radius: 10px;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
padding: 20px;
}
</Style>
<View className="root">
<View className="prompt-box">
<Text name="prompt" value="$prompt"/>
</View>
<TextArea name="response" toName="prompt"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="prompt"/>
</View>""",
LabelStudioMode.CHAT.value: """
<View>
<View className="root">
<Paragraphs name="dialogue"
value="$prompt"
layout="dialogue"
textKey="content"
nameKey="role"
granularity="sentence"/>
<Header value="Final response:"/>
<TextArea name="response" toName="dialogue"
maxSubmissions="1" editable="true"
required="true"/>
</View>
<Header value="Rate the response:"/>
<Rating name="rating" toName="dialogue"/>
</View>""",
}
if isinstance(mode, str):
mode = LabelStudioMode(mode)
return _default_label_configs[mode.value], mode
class LabelStudioCallbackHandler(BaseCallbackHandler):
"""Label Studio callback handler.
Provides the ability to send predictions to Label Studio
for human evaluation, feedback and annotation.
Parameters:
api_key: Label Studio API key
url: Label Studio URL
project_id: Label Studio project ID
project_name: Label Studio project name
project_config: Label Studio project config (XML)
mode: Label Studio mode ("prompt" or "chat")
Examples:
>>> from langchain.llms import OpenAI
>>> from langchain.callbacks import LabelStudioCallbackHandler
>>> handler = LabelStudioCallbackHandler(
... api_key='<your_key_here>',
... url='http://localhost:8080',
... project_name='LangChain-%Y-%m-%d',
... mode='prompt'
... )
>>> llm = OpenAI(callbacks=[handler])
>>> llm.predict('Tell me a story about a dog.')
"""
DEFAULT_PROJECT_NAME: str = "LangChain-%Y-%m-%d"
def __init__(
self,
api_key: Optional[str] = None,
url: Optional[str] = None,
project_id: Optional[int] = None,
project_name: str = DEFAULT_PROJECT_NAME,
project_config: Optional[str] = None,
mode: Union[str, LabelStudioMode] = LabelStudioMode.PROMPT,
):
super().__init__()
# Import LabelStudio SDK
try:
import label_studio_sdk as ls
except ImportError:
raise ImportError(
f"You're using {self.__class__.__name__} in your code,"
f" but you don't have the LabelStudio SDK "
f"Python package installed or upgraded to the latest version. "
f"Please run `pip install -U label-studio-sdk`"
f" before using this callback."
)
# Check if Label Studio API key is provided
if not api_key:
if os.getenv("LABEL_STUDIO_API_KEY"):
api_key = str(os.getenv("LABEL_STUDIO_API_KEY"))
else:
raise ValueError(
f"You're using {self.__class__.__name__} in your code,"
f" Label Studio API key is not provided. "
f"Please provide Label Studio API key: "
f"go to the Label Studio instance, navigate to "
f"Account & Settings -> Access Token and copy the key. "
f"Use the key as a parameter for the callback: "
f"{self.__class__.__name__}"
f"(label_studio_api_key='<your_key_here>', ...) or "
f"set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>"
)
self.api_key = api_key
if not url:
if os.getenv("LABEL_STUDIO_URL"):
url = os.getenv("LABEL_STUDIO_URL")
else:
warnings.warn(
f"Label Studio URL is not provided, "
f"using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}"
f"If you want to provide your own URL, use the parameter: "
f"{self.__class__.__name__}"
f"(label_studio_url='<your_url_here>', ...) "
f"or set the environment variable LABEL_STUDIO_URL=<your_url_here>"
)
url = ls.LABEL_STUDIO_DEFAULT_URL
self.url = url
# Maps run_id to prompts
self.payload: Dict[str, Dict] = {}
self.ls_client = ls.Client(url=self.url, api_key=self.api_key)
self.project_name = project_name
if project_config:
self.project_config = project_config
self.mode = None
else:
self.project_config, self.mode = get_default_label_configs(mode)
self.project_id = project_id or os.getenv("LABEL_STUDIO_PROJECT_ID")
if self.project_id is not None:
self.ls_project = self.ls_client.get_project(int(self.project_id))
else:
project_title = datetime.today().strftime(self.project_name)
existing_projects = self.ls_client.get_projects(title=project_title)
if existing_projects:
self.ls_project = existing_projects[0]
self.project_id = self.ls_project.id
else:
self.ls_project = self.ls_client.create_project(
title=project_title, label_config=self.project_config
)
self.project_id = self.ls_project.id
self.parsed_label_config = self.ls_project.parsed_label_config
# Find the first TextArea tag
# "from_name", "to_name", "value" will be used to create predictions
self.from_name, self.to_name, self.value, self.input_type = (
None,
None,
None,
None,
)
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info["type"] == "TextArea":
self.from_name = tag_name
self.to_name = tag_info["to_name"][0]
self.value = tag_info["inputs"][0]["value"]
self.input_type = tag_info["inputs"][0]["type"]
break
if not self.from_name:
error_message = (
f'Label Studio project "{self.project_name}" '
f"does not have a TextArea tag. "
f"Please add a TextArea tag to the project."
)
if self.mode == LabelStudioMode.PROMPT:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
' and select "Generative AI -> '
'Supervised Language Model Fine-tuning" template.'
)
else:
error_message += (
"\nHINT: go to project Settings -> "
"Labeling Interface -> Browse Templates"
" and check available templates under "
'"Generative AI" section.'
)
raise ValueError(error_message)
def add_prompts_generations(
self, run_id: str, generations: List[List[Generation]]
) -> None:
# Create tasks in Label Studio
tasks = []
prompts = self.payload[run_id]["prompts"]
model_version = (
self.payload[run_id]["kwargs"]
.get("invocation_params", {})
.get("model_name")
)
for prompt, generation in zip(prompts, generations):
tasks.append(
{
"data": {
self.value: prompt,
"run_id": run_id,
},
"predictions": [
{
"result": [
{
"from_name": self.from_name,
"to_name": self.to_name,
"type": "textarea",
"value": {"text": [g.text for g in generation]},
}
],
"model_version": model_version,
}
],
}
)
self.ls_project.import_tasks(tasks)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> None:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Text":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Text>.\n"
f"Read more here https://labelstud.io/tags/text"
)
run_id = str(kwargs["run_id"])
self.payload[run_id] = {"prompts": prompts, "kwargs": kwargs}
def _get_message_role(self, message: BaseMessage) -> str:
"""Get the role of the message."""
if isinstance(message, ChatMessage):
return message.role
else:
return message.__class__.__name__
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != "Paragraphs":
raise ValueError(
f'\nLabel Studio project "{self.project_name}" '
f"has an input type <{self.input_type}>. "
f'To make it work with the mode="chat", '
f"the input type should be <Paragraphs>.\n"
f"Read more here https://labelstud.io/tags/paragraphs"
)
prompts = []
for message_list in messages:
dialog = []
for message in message_list:
dialog.append(
{
"role": self._get_message_role(message),
"content": message.content,
}
)
prompts.append(dialog)
self.payload[str(run_id)] = {
"prompts": prompts,
"tags": tags,
"metadata": metadata,
"run_id": run_id,
"parent_run_id": parent_run_id,
"kwargs": kwargs,
}
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Create a new Label Studio task for each prompt and generation."""
run_id = str(kwargs["run_id"])
# Submit results to Label Studio
self.add_prompts_generations(run_id, response.generations)
# Pop current run from `self.runs`
self.payload.pop(run_id)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
| [
"[]",
"prompt"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~agent_toolkits~powerbi~chat_base.py | """Power BI agent."""
from typing import Any, Dict, List, Optional
from langchain.agents import AgentExecutor
from langchain.agents.agent import AgentOutputParser
from langchain.agents.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
)
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.callbacks.base import BaseCallbackManager
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory
from langchain.utilities.powerbi import PowerBIDataset
def create_pbi_chat_agent(
llm: BaseChatModel,
toolkit: Optional[PowerBIToolkit] = None,
powerbi: Optional[PowerBIDataset] = None,
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = POWERBI_CHAT_PREFIX,
suffix: str = POWERBI_CHAT_SUFFIX,
examples: Optional[str] = None,
input_variables: Optional[List[str]] = None,
memory: Optional[BaseChatMemory] = None,
top_k: int = 10,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a Power BI agent from a Chat LLM and tools.
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
"""
if toolkit is None:
if powerbi is None:
raise ValueError("Must provide either a toolkit or powerbi dataset")
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
agent = ConversationalChatAgent.from_llm_and_tools(
llm=llm,
tools=tools,
system_message=prefix.format(top_k=top_k).format(tables=tables),
human_message=suffix,
input_variables=input_variables,
callback_manager=callback_manager,
output_parser=output_parser,
verbose=verbose,
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
memory=memory
or ConversationBufferMemory(memory_key="chat_history", return_messages=True),
verbose=verbose,
**(agent_executor_kwargs or {}),
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~facebook_chat.py | import datetime
import json
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
"""
sender = row["sender_name"]
text = row["content"]
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{sender} on {date}: {text}\n\n"
class FacebookChatLoader(BaseLoader):
"""Load `Facebook Chat` messages directory dump."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message.get("content") and isinstance(message["content"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_transformers~doctran_text_qa.py | from typing import Any, Optional, Sequence
from langchain.schema import BaseDocumentTransformer, Document
from langchain.utils import get_from_env
class DoctranQATransformer(BaseDocumentTransformer):
"""Extract QA from text documents using doctran.
Arguments:
openai_api_key: OpenAI API key. Can also be specified via environment variable
``OPENAI_API_KEY``.
Example:
.. code-block:: python
from langchain.document_transformers import DoctranQATransformer
# Pass in openai_api_key or set env var OPENAI_API_KEY
qa_transformer = DoctranQATransformer()
transformed_document = await qa_transformer.atransform_documents(documents)
"""
def __init__(
self,
openai_api_key: Optional[str] = None,
openai_api_model: Optional[str] = None,
) -> None:
self.openai_api_key = openai_api_key or get_from_env(
"openai_api_key", "OPENAI_API_KEY"
)
self.openai_api_model = openai_api_model or get_from_env(
"openai_api_model", "OPENAI_API_MODEL"
)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Extracts QA from text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(
openai_api_key=self.openai_api_key, openai_model=self.openai_api_model
)
except ImportError:
raise ImportError(
"Install doctran to use this parser. (pip install doctran)"
)
for d in documents:
doctran_doc = (
await doctran.parse(content=d.page_content).interrogate().execute()
)
questions_and_answers = doctran_doc.extracted_properties.get(
"questions_and_answers"
)
d.metadata["questions_and_answers"] = questions_and_answers
return documents
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~gcs_directory.py | from typing import Callable, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.gcs_file import GCSFileLoader
from langchain.utilities.vertexai import get_client_info
class GCSDirectoryLoader(BaseLoader):
"""Load from GCS directory."""
def __init__(
self,
project_name: str,
bucket: str,
prefix: str = "",
loader_func: Optional[Callable[[str], BaseLoader]] = None,
):
"""Initialize with bucket and key name.
Args:
project_name: The ID of the project for the GCS bucket.
bucket: The name of the GCS bucket.
prefix: The prefix of the GCS bucket.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the GCSFileLoader
would use its default loader.
"""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
self._loader_func = loader_func
def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(
project=self.project_name,
client_info=get_client_info(module="google-cloud-storage"),
)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(
self.project_name, self.bucket, blob.name, loader_func=self._loader_func
)
docs.extend(loader.load())
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~docarray~hnsw.py | from __future__ import annotations
from typing import Any, List, Literal, Optional
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.docarray.base import (
DocArrayIndex,
_check_docarray_import,
)
class DocArrayHnswSearch(DocArrayIndex):
"""`HnswLib` storage using `DocArray` package.
To use it, you should have the ``docarray`` package with version >=0.32.0 installed.
You can install it with `pip install "langchain[docarray]"`.
"""
@classmethod
def from_params(
cls,
embedding: Embeddings,
work_dir: str,
n_dim: int,
dist_metric: Literal["cosine", "ip", "l2"] = "cosine",
max_elements: int = 1024,
index: bool = True,
ef_construction: int = 200,
ef: int = 10,
M: int = 16,
allow_replace_deleted: bool = True,
num_threads: int = 1,
**kwargs: Any,
) -> DocArrayHnswSearch:
"""Initialize DocArrayHnswSearch store.
Args:
embedding (Embeddings): Embedding function.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
dist_metric (str): Distance metric for DocArrayHnswSearch can be one of:
"cosine", "ip", and "l2". Defaults to "cosine".
max_elements (int): Maximum number of vectors that can be stored.
Defaults to 1024.
index (bool): Whether an index should be built for this field.
Defaults to True.
ef_construction (int): defines a construction time/accuracy trade-off.
Defaults to 200.
ef (int): parameter controlling query time/accuracy trade-off.
Defaults to 10.
M (int): parameter that defines the maximum number of outgoing
connections in the graph. Defaults to 16.
allow_replace_deleted (bool): Enables replacing of deleted elements
with new added ones. Defaults to True.
num_threads (int): Sets the number of cpu threads to use. Defaults to 1.
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import HnswDocumentIndex
doc_cls = cls._get_doc_cls(
dim=n_dim,
space=dist_metric,
max_elements=max_elements,
index=index,
ef_construction=ef_construction,
ef=ef,
M=M,
allow_replace_deleted=allow_replace_deleted,
num_threads=num_threads,
**kwargs,
)
doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore
return cls(doc_index, embedding)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
work_dir: Optional[str] = None,
n_dim: Optional[int] = None,
**kwargs: Any,
) -> DocArrayHnswSearch:
"""Create an DocArrayHnswSearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
**kwargs: Other keyword arguments to be passed to the __init__ method.
Returns:
DocArrayHnswSearch Vector Store
"""
if work_dir is None:
raise ValueError("`work_dir` parameter has not been set.")
if n_dim is None:
raise ValueError("`n_dim` parameter has not been set.")
store = cls.from_params(embedding, work_dir, n_dim, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~image_captions.py | from typing import Any, List, Tuple, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class ImageCaptionLoader(BaseLoader):
"""Load image captions.
By default, the loader utilizes the pre-trained
Salesforce BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
def __init__(
self,
path_images: Union[str, List[str]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""
Initialize with a list of image paths
Args:
path_images: A list of image paths.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(path_images, str):
self.image_paths = [path_images]
else:
self.image_paths = path_images
self.blip_processor = blip_processor
self.blip_model = blip_model
def load(self) -> List[Document]:
"""
Load from a list of image files
"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for path_image in self.image_paths:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, path_image=path_image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, path_image: str
) -> Tuple[str, dict]:
"""
Helper function for getting the captions and metadata of an image
"""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
try:
if path_image.startswith("http://") or path_image.startswith("https://"):
image = Image.open(requests.get(path_image, stream=True).raw).convert(
"RGB"
)
else:
image = Image.open(path_image).convert("RGB")
except Exception:
raise ValueError(f"Could not get image data for {path_image}")
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
metadata: dict = {"image_path": path_image}
return caption, metadata
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~baichuan.py | import hashlib
import json
import logging
import time
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type, Union
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
from langchain.pydantic_v1 import Field, SecretStr, root_validator
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
)
from langchain.schema.messages import (
AIMessageChunk,
BaseMessageChunk,
ChatMessageChunk,
HumanMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.baichuan-ai.com/v1"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
def _to_secret(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr if needed."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
# signature generation
def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int) -> str:
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp)
md5 = hashlib.md5()
md5.update(input_str.encode("utf-8"))
return md5.hexdigest()
class ChatBaichuan(BaseChatModel):
"""Baichuan chat models API by Baichuan Intelligent Technology.
For more information, see https://platform.baichuan-ai.com/docs/api
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"baichuan_api_key": "BAICHUAN_API_KEY",
"baichuan_secret_key": "BAICHUAN_SECRET_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
baichuan_api_base: str = Field(default=DEFAULT_API_BASE)
"""Baichuan custom endpoints"""
baichuan_api_key: Optional[str] = None
"""Baichuan API Key"""
baichuan_secret_key: Optional[SecretStr] = None
"""Baichuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = 60
"""request timeout for chat http requests"""
model = "Baichuan2-53B"
"""model name of Baichuan, default is `Baichuan2-53B`."""
temperature: float = 0.3
"""What sampling temperature to use."""
top_k: int = 5
"""What search sampling control to use."""
top_p: float = 0.85
"""What probability mass to use."""
with_search_enhance: bool = False
"""Whether to use search enhance, default is False."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["baichuan_api_base"] = get_from_dict_or_env(
values,
"baichuan_api_base",
"BAICHUAN_API_BASE",
DEFAULT_API_BASE,
)
values["baichuan_api_key"] = get_from_dict_or_env(
values,
"baichuan_api_key",
"BAICHUAN_API_KEY",
)
values["baichuan_secret_key"] = _to_secret(
get_from_dict_or_env(
values,
"baichuan_secret_key",
"BAICHUAN_SECRET_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Baichuan API."""
normal_params = {
"model": self.model,
"top_p": self.top_p,
"top_k": self.top_k,
"with_search_enhance": self.with_search_enhance,
}
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if response.get("code") != 0:
raise ValueError(f"Error from Baichuan api response: {response}")
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
response = json.loads(chunk)
if response.get("code") != 0:
raise ValueError(f"Error from Baichuan api response: {response}")
data = response.get("data")
for m in data.get("messages"):
chunk = _convert_delta_to_message_chunk(m, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
if self.baichuan_secret_key is None:
raise ValueError("Baichuan secret key is not set.")
parameters = {**self._default_params, **kwargs}
model = parameters.pop("model")
headers = parameters.pop("headers", {})
payload = {
"model": model,
"messages": [_convert_message_to_dict(m) for m in messages],
"parameters": parameters,
}
timestamp = int(time.time())
url = self.baichuan_api_base
if self.streaming:
url = f"{url}/stream"
url = f"{url}/chat"
res = requests.post(
url=url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.baichuan_api_key}",
"X-BC-Timestamp": str(timestamp),
"X-BC-Signature": _signature(
secret_key=self.baichuan_secret_key,
payload=payload,
timestamp=timestamp,
),
"X-BC-Sign-Algo": "MD5",
**headers,
},
json=payload,
stream=self.streaming,
)
return res
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for m in response["data"]["messages"]:
message = _convert_dict_to_message(m)
gen = ChatGeneration(message=message)
generations.append(gen)
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "baichuan-chat"
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~memory~test_upstash_redis.py | import json
import pytest
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories.upstash_redis import (
UpstashRedisChatMessageHistory,
)
from langchain.schema.messages import _message_to_dict
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
@pytest.mark.requires("upstash_redis")
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup Upstash Redis as a message store
message_history = UpstashRedisChatMessageHistory(
url=URL, token=TOKEN, ttl=10, session_id="my-test-session"
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Redis, so the next test run won't pick it up
memory.chat_memory.clear()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~dashvector.py | from __future__ import annotations
import logging
import uuid
from typing import (
Any,
Iterable,
List,
Optional,
Tuple,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_env
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class DashVector(VectorStore):
"""`DashVector` vector store.
To use, you should have the ``dashvector`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import dashvector
from langchain.embeddings.openai import OpenAIEmbeddings
import dashvector
client = dashvector.Client.init(api_key="***")
client.create("langchain")
collection = client.get("langchain")
embeddings = OpenAIEmbeddings()
vectorstore = Dashvector(collection, embeddings.embed_query, "text")
"""
def __init__(
self,
collection: Any,
embedding: Embeddings,
text_field: str,
):
"""Initialize with DashVector collection."""
try:
import dashvector
except ImportError:
raise ValueError(
"Could not import dashvector python package. "
"Please install it with `pip install dashvector`."
)
if not isinstance(collection, dashvector.Collection):
raise ValueError(
f"collection should be an instance of dashvector.Collection, "
f"bug got {type(collection)}"
)
self._collection = collection
self._embedding = embedding
self._text_field = text_field
def _similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query vector, along with scores"""
# query by vector
ret = self._collection.query(embedding, topk=k, filter=filter)
if not ret:
raise ValueError(
f"Fail to query docs by vector, error {self._collection.message}"
)
docs = []
for doc in ret:
metadata = doc.fields
text = metadata.pop(self._text_field)
score = doc.score
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 25,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids associated with the texts.
batch_size: Optional batch size to upsert docs.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids = ids or [str(uuid.uuid4().hex) for _ in texts]
text_list = list(texts)
for i in range(0, len(text_list), batch_size):
# batch end
end = min(i + batch_size, len(text_list))
batch_texts = text_list[i:end]
batch_ids = ids[i:end]
batch_embeddings = self._embedding.embed_documents(list(batch_texts))
# batch metadatas
if metadatas:
batch_metadatas = metadatas[i:end]
else:
batch_metadatas = [{} for _ in range(i, end)]
for metadata, text in zip(batch_metadatas, batch_texts):
metadata[self._text_field] = text
# batch upsert to collection
docs = list(zip(batch_ids, batch_embeddings, batch_metadatas))
ret = self._collection.upsert(docs)
if not ret:
raise ValueError(
f"Fail to upsert docs to dashvector vector database,"
f"Error: {ret.message}"
)
return ids
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool:
"""Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
True if deletion is successful,
False otherwise.
"""
return bool(self._collection.delete(ids))
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to search documents similar to.
k: Number of documents to return. Default to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_relevance_scores(query, k, filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
filter: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query text , alone with relevance scores.
Less is more similar, more is more dissimilar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Tuples of (doc, similarity_score)
"""
embedding = self._embedding.embed_query(query)
return self._similarity_search_with_score_by_vector(
embedding, k=k, filter=filter
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self._similarity_search_with_score_by_vector(
embedding, k, filter
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, filter
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents selected by maximal marginal relevance.
"""
# query by vector
ret = self._collection.query(
embedding, topk=fetch_k, filter=filter, include_vector=True
)
if not ret:
raise ValueError(
f"Fail to query docs by vector, error {self._collection.message}"
)
candidate_embeddings = [doc.vector for doc in ret]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), candidate_embeddings, lambda_mult, k
)
metadatas = [ret.output[i].fields for i in mmr_selected]
return [
Document(page_content=metadata.pop(self._text_field), metadata=metadata)
for metadata in metadatas
]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
dashvector_api_key: Optional[str] = None,
collection_name: str = "langchain",
text_field: str = "text",
batch_size: int = 25,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> DashVector:
"""Return DashVector VectorStore initialized from texts and embeddings.
This is the quick way to get started with dashvector vector store.
Example:
.. code-block:: python
from langchain.vectorstores import DashVector
from langchain.embeddings import OpenAIEmbeddings
import dashvector
embeddings = OpenAIEmbeddings()
dashvector = DashVector.from_documents(
docs,
embeddings,
dashvector_api_key="{DASHVECTOR_API_KEY}"
)
"""
try:
import dashvector
except ImportError:
raise ValueError(
"Could not import dashvector python package. "
"Please install it with `pip install dashvector`."
)
dashvector_api_key = dashvector_api_key or get_from_env(
"dashvector_api_key", "DASHVECTOR_API_KEY"
)
dashvector_client = dashvector.Client(api_key=dashvector_api_key)
dashvector_client.delete(collection_name)
collection = dashvector_client.get(collection_name)
if not collection:
dim = len(embedding.embed_query(texts[0]))
# create collection if not existed
resp = dashvector_client.create(collection_name, dimension=dim)
if resp:
collection = dashvector_client.get(collection_name)
else:
raise ValueError(
"Fail to create collection. " f"Error: {resp.message}."
)
dashvector_vector_db = cls(collection, embedding, text_field)
dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size)
return dashvector_vector_db
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~azure_cognitive_services~image_analysis.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import root_validator
from langchain.tools.azure_cognitive_services.utils import detect_file_src_type
from langchain.tools.base import BaseTool
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class AzureCogsImageAnalysisTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Image Analysis API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
vision_service: Any #: :meta private:
analysis_options: Any #: :meta private:
name: str = "azure_cognitive_services_image_analysis"
description: str = (
"A wrapper around Azure Cognitive Services Image Analysis. "
"Useful for when you need to analyze images. "
"Input should be a url to an image."
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.vision as sdk
values["vision_service"] = sdk.VisionServiceOptions(
endpoint=azure_cogs_endpoint, key=azure_cogs_key
)
values["analysis_options"] = sdk.ImageAnalysisOptions()
values["analysis_options"].features = (
sdk.ImageAnalysisFeature.CAPTION
| sdk.ImageAnalysisFeature.OBJECTS
| sdk.ImageAnalysisFeature.TAGS
| sdk.ImageAnalysisFeature.TEXT
)
except ImportError:
raise ImportError(
"azure-ai-vision is not installed. "
"Run `pip install azure-ai-vision` to install."
)
return values
def _image_analysis(self, image_path: str) -> Dict:
try:
import azure.ai.vision as sdk
except ImportError:
pass
image_src_type = detect_file_src_type(image_path)
if image_src_type == "local":
vision_source = sdk.VisionSource(filename=image_path)
elif image_src_type == "remote":
vision_source = sdk.VisionSource(url=image_path)
else:
raise ValueError(f"Invalid image path: {image_path}")
image_analyzer = sdk.ImageAnalyzer(
self.vision_service, vision_source, self.analysis_options
)
result = image_analyzer.analyze()
res_dict = {}
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
if result.caption is not None:
res_dict["caption"] = result.caption.content
if result.objects is not None:
res_dict["objects"] = [obj.name for obj in result.objects]
if result.tags is not None:
res_dict["tags"] = [tag.name for tag in result.tags]
if result.text is not None:
res_dict["text"] = [line.content for line in result.text.lines]
else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
raise RuntimeError(
f"Image analysis failed.\n"
f"Reason: {error_details.reason}\n"
f"Details: {error_details.message}"
)
return res_dict
def _format_image_analysis_result(self, image_analysis_result: Dict) -> str:
formatted_result = []
if "caption" in image_analysis_result:
formatted_result.append("Caption: " + image_analysis_result["caption"])
if (
"objects" in image_analysis_result
and len(image_analysis_result["objects"]) > 0
):
formatted_result.append(
"Objects: " + ", ".join(image_analysis_result["objects"])
)
if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0:
formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"]))
if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0:
formatted_result.append("Text: " + ", ".join(image_analysis_result["text"]))
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
image_analysis_result = self._image_analysis(query)
if not image_analysis_result:
return "No good image analysis result was found"
return self._format_image_analysis_result(image_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~bm25.py | from __future__ import annotations
from typing import Any, Callable, Dict, Iterable, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
def default_preprocessing_func(text: str) -> List[str]:
return text.split()
class BM25Retriever(BaseRetriever):
"""`BM25` retriever without Elasticsearch."""
vectorizer: Any
""" BM25 vectorizer."""
docs: List[Document]
""" List of documents."""
k: int = 4
""" Number of documents to return."""
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func
""" Preprocessing function to use on the text before BM25 vectorization."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
bm25_params: Optional[Dict[str, Any]] = None,
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func,
**kwargs: Any,
) -> BM25Retriever:
"""
Create a BM25Retriever from a list of texts.
Args:
texts: A list of texts to vectorize.
metadatas: A list of metadata dicts to associate with each text.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
try:
from rank_bm25 import BM25Okapi
except ImportError:
raise ImportError(
"Could not import rank_bm25, please install with `pip install "
"rank_bm25`."
)
texts_processed = [preprocess_func(t) for t in texts]
bm25_params = bm25_params or {}
vectorizer = BM25Okapi(texts_processed, **bm25_params)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
return cls(
vectorizer=vectorizer, docs=docs, preprocess_func=preprocess_func, **kwargs
)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
bm25_params: Optional[Dict[str, Any]] = None,
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func,
**kwargs: Any,
) -> BM25Retriever:
"""
Create a BM25Retriever from a list of Documents.
Args:
documents: A list of Documents to vectorize.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts,
bm25_params=bm25_params,
metadatas=metadatas,
preprocess_func=preprocess_func,
**kwargs,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
processed_query = self.preprocess_func(query)
return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=self.k)
return return_docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~schema~runnable~test_runnable.py | import sys
from functools import partial
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
Union,
cast,
)
from uuid import UUID
import pytest
from freezegun import freeze_time
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.callbacks.manager import Callbacks, collect_runs
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.log_stream import RunLog, RunLogPatch
from langchain.callbacks.tracers.schemas import Run
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models.fake import FakeListChatModel
from langchain.llms.fake import FakeListLLM, FakeStreamingListLLM
from langchain.load.dump import dumpd, dumps
from langchain.output_parsers.list import CommaSeparatedListOutputParser
from langchain.prompts import PromptTemplate
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import (
ChatPromptTemplate,
ChatPromptValue,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.pydantic_v1 import BaseModel
from langchain.schema.document import Document
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
HumanMessage,
SystemMessage,
)
from langchain.schema.output_parser import BaseOutputParser, StrOutputParser
from langchain.schema.retriever import BaseRetriever
from langchain.schema.runnable import (
RouterRunnable,
Runnable,
RunnableBranch,
RunnableConfig,
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
RunnableSequence,
RunnableWithFallbacks,
)
from langchain.schema.runnable.base import ConfigurableField, RunnableGenerator
from langchain.schema.runnable.utils import (
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
add,
)
from langchain.tools.base import BaseTool, tool
from langchain.tools.json.tool import JsonListKeysTool, JsonSpec
class FakeTracer(BaseTracer):
"""Fake tracer that records LangChain execution.
It replaces run ids with deterministic UUIDs for snapshotting."""
def __init__(self) -> None:
"""Initialize the tracer."""
super().__init__()
self.runs: List[Run] = []
self.uuids_map: Dict[UUID, UUID] = {}
self.uuids_generator = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
def _replace_uuid(self, uuid: UUID) -> UUID:
if uuid not in self.uuids_map:
self.uuids_map[uuid] = next(self.uuids_generator)
return self.uuids_map[uuid]
def _copy_run(self, run: Run) -> Run:
return run.copy(
update={
"id": self._replace_uuid(run.id),
"parent_run_id": self.uuids_map[run.parent_run_id]
if run.parent_run_id
else None,
"child_runs": [self._copy_run(child) for child in run.child_runs],
"execution_order": None,
"child_execution_order": None,
}
)
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
self.runs.append(self._copy_run(run))
class FakeRunnable(Runnable[str, int]):
def invoke(
self,
input: str,
config: Optional[RunnableConfig] = None,
) -> int:
return len(input)
class FakeRetriever(BaseRetriever):
def _get_relevant_documents(
self,
query: str,
*,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
async def _aget_relevant_documents(
self,
query: str,
*,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
def test_schemas(snapshot: SnapshotAssertion) -> None:
fake = FakeRunnable() # str -> int
assert fake.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
assert fake.config_schema(include=["tags", "metadata", "run_name"]).schema() == {
"title": "FakeRunnableConfig",
"type": "object",
"properties": {
"metadata": {"title": "Metadata", "type": "object"},
"run_name": {"title": "Run Name", "type": "string"},
"tags": {"items": {"type": "string"}, "title": "Tags", "type": "array"},
},
}
fake_bound = FakeRunnable().bind(a="b") # str -> int
assert fake_bound.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_bound.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,)) # str -> int
assert fake_w_fallbacks.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_w_fallbacks.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
def typed_lambda_impl(x: str) -> int:
return len(x)
typed_lambda = RunnableLambda(typed_lambda_impl) # str -> int
assert typed_lambda.input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "string",
}
assert typed_lambda.output_schema.schema() == {
"title": "RunnableLambdaOutput",
"type": "integer",
}
async def typed_async_lambda_impl(x: str) -> int:
return len(x)
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl) # str -> int
assert typed_async_lambda.input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "string",
}
assert typed_async_lambda.output_schema.schema() == {
"title": "RunnableLambdaOutput",
"type": "integer",
}
fake_ret = FakeRetriever() # str -> List[Document]
assert fake_ret.input_schema.schema() == {
"title": "FakeRetrieverInput",
"type": "string",
}
assert fake_ret.output_schema.schema() == {
"title": "FakeRetrieverOutput",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
"definitions": {
"Document": {
"title": "Document",
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
"type": "object",
"properties": {
"page_content": {"title": "Page Content", "type": "string"},
"metadata": {"title": "Metadata", "type": "object"},
"type": {
"title": "Type",
"enum": ["Document"],
"default": "Document",
"type": "string",
},
},
"required": ["page_content"],
}
},
}
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
assert fake_llm.input_schema.schema() == snapshot
assert fake_llm.output_schema.schema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
fake_chat = FakeListChatModel(responses=["a"]) # str -> List[List[str]]
assert fake_chat.input_schema.schema() == snapshot
assert fake_chat.output_schema.schema() == snapshot
chat_prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="history"),
("human", "Hello, how are you?"),
]
)
assert chat_prompt.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"history": {
"title": "History",
"type": "array",
"items": {
"anyOf": [
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
]
},
}
},
"definitions": {
"AIMessage": {
"title": "AIMessage",
"description": "A Message from an AI.",
"type": "object",
"properties": {
"content": {"title": "Content", "type": "string"},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": "A Message from a human.",
"type": "object",
"properties": {
"content": {"title": "Content", "type": "string"},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": "A Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
"type": "object",
"properties": {
"content": {"title": "Content", "type": "string"},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": "A Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa: E501
"type": "object",
"properties": {
"content": {"title": "Content", "type": "string"},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "A Message for passing the result of executing a function back to a model.", # noqa: E501
"type": "object",
"properties": {
"content": {"title": "Content", "type": "string"},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
},
"required": ["content", "name"],
},
},
}
assert chat_prompt.output_schema.schema() == snapshot
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert prompt.output_schema.schema() == snapshot
prompt_mapper = PromptTemplate.from_template("Hello, {name}!").map()
assert prompt_mapper.input_schema.schema() == {
"definitions": {
"PromptInput": {
"properties": {"name": {"title": "Name", "type": "string"}},
"title": "PromptInput",
"type": "object",
}
},
"items": {"$ref": "#/definitions/PromptInput"},
"type": "array",
"title": "RunnableEachInput",
}
assert prompt_mapper.output_schema.schema() == snapshot
list_parser = CommaSeparatedListOutputParser()
assert list_parser.input_schema.schema() == snapshot
assert list_parser.output_schema.schema() == {
"title": "CommaSeparatedListOutputParserOutput",
"type": "array",
"items": {"type": "string"},
}
seq = prompt | fake_llm | list_parser
assert seq.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert seq.output_schema.schema() == {
"type": "array",
"items": {"type": "string"},
"title": "CommaSeparatedListOutputParserOutput",
}
router: Runnable = RouterRunnable({})
assert router.input_schema.schema() == {
"title": "RouterRunnableInput",
"$ref": "#/definitions/RouterInput",
"definitions": {
"RouterInput": {
"title": "RouterInput",
"type": "object",
"properties": {
"key": {"title": "Key", "type": "string"},
"input": {"title": "Input"},
},
"required": ["key", "input"],
}
},
}
assert router.output_schema.schema() == {"title": "RouterRunnableOutput"}
seq_w_map: Runnable = (
prompt
| fake_llm
| {
"original": RunnablePassthrough(input_type=str),
"as_list": list_parser,
"length": typed_lambda_impl,
}
)
assert seq_w_map.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert seq_w_map.output_schema.schema() == {
"title": "RunnableParallelOutput",
"type": "object",
"properties": {
"original": {"title": "Original", "type": "string"},
"length": {"title": "Length", "type": "integer"},
"as_list": {
"title": "As List",
"type": "array",
"items": {"type": "string"},
},
},
}
json_list_keys_tool = JsonListKeysTool(spec=JsonSpec(dict_={}))
assert json_list_keys_tool.input_schema.schema() == {
"title": "json_spec_list_keysSchema",
"type": "object",
"properties": {"tool_input": {"title": "Tool Input", "type": "string"}},
"required": ["tool_input"],
}
assert json_list_keys_tool.output_schema.schema() == {
"title": "JsonListKeysToolOutput"
}
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
def test_lambda_schemas() -> None:
first_lambda = lambda x: x["hello"] # noqa: E731
assert RunnableLambda(first_lambda).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}},
}
second_lambda = lambda x, y: (x["hello"], x["bye"], y["bah"]) # noqa: E731
assert RunnableLambda(
second_lambda, # type: ignore[arg-type]
).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}, "bye": {"title": "Bye"}},
}
def get_value(input): # type: ignore[no-untyped-def]
return input["variable_name"]
assert RunnableLambda(get_value).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"variable_name": {"title": "Variable Name"}},
}
async def aget_value(input): # type: ignore[no-untyped-def]
return (input["variable_name"], input.get("another"))
assert RunnableLambda(aget_value).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {
"another": {"title": "Another"},
"variable_name": {"title": "Variable Name"},
},
}
async def aget_values(input): # type: ignore[no-untyped-def]
return {
"hello": input["variable_name"],
"bye": input["variable_name"],
"byebye": input["yo"],
}
assert RunnableLambda(aget_values).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {
"variable_name": {"title": "Variable Name"},
"yo": {"title": "Yo"},
},
}
def test_schema_complex_seq() -> None:
prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?")
prompt2 = ChatPromptTemplate.from_template(
"what country is the city {city} in? respond in {language}"
)
model = FakeListChatModel(responses=[""])
chain1 = prompt1 | model | StrOutputParser()
chain2: Runnable = (
{"city": chain1, "language": itemgetter("language")}
| prompt2
| model
| StrOutputParser()
)
assert chain2.input_schema.schema() == {
"title": "RunnableParallelInput",
"type": "object",
"properties": {
"person": {"title": "Person", "type": "string"},
"language": {"title": "Language"},
},
}
assert chain2.output_schema.schema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
assert chain2.with_types(input_type=str).input_schema.schema() == {
"title": "RunnableBindingInput",
"type": "string",
}
assert chain2.with_types(input_type=int).output_schema.schema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
class InputType(BaseModel):
person: str
assert chain2.with_types(input_type=InputType).input_schema.schema() == {
"title": "InputType",
"type": "object",
"properties": {"person": {"title": "Person", "type": "string"}},
"required": ["person"],
}
def test_schema_chains() -> None:
model = FakeListChatModel(responses=[""])
stuff_chain = load_summarize_chain(model)
assert stuff_chain.input_schema.schema() == {
"title": "CombineDocumentsInput",
"type": "object",
"properties": {
"input_documents": {
"title": "Input Documents",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
}
},
"definitions": {
"Document": {
"title": "Document",
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
"type": "object",
"properties": {
"page_content": {"title": "Page Content", "type": "string"},
"metadata": {"title": "Metadata", "type": "object"},
"type": {
"title": "Type",
"type": "string",
"enum": ["Document"],
"default": "Document",
},
},
"required": ["page_content"],
}
},
}
assert stuff_chain.output_schema.schema() == {
"title": "CombineDocumentsOutput",
"type": "object",
"properties": {"output_text": {"title": "Output Text", "type": "string"}},
}
mapreduce_chain = load_summarize_chain(
model, "map_reduce", return_intermediate_steps=True
)
assert mapreduce_chain.input_schema.schema() == {
"title": "CombineDocumentsInput",
"type": "object",
"properties": {
"input_documents": {
"title": "Input Documents",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
}
},
"definitions": {
"Document": {
"title": "Document",
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
"type": "object",
"properties": {
"page_content": {"title": "Page Content", "type": "string"},
"metadata": {"title": "Metadata", "type": "object"},
"type": {
"title": "Type",
"type": "string",
"enum": ["Document"],
"default": "Document",
},
},
"required": ["page_content"],
}
},
}
assert mapreduce_chain.output_schema.schema() == {
"title": "MapReduceDocumentsOutput",
"type": "object",
"properties": {
"output_text": {"title": "Output Text", "type": "string"},
"intermediate_steps": {
"title": "Intermediate Steps",
"type": "array",
"items": {"type": "string"},
},
},
}
maprerank_chain = load_qa_chain(model, "map_rerank", metadata_keys=["hello"])
assert maprerank_chain.input_schema.schema() == {
"title": "CombineDocumentsInput",
"type": "object",
"properties": {
"input_documents": {
"title": "Input Documents",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
}
},
"definitions": {
"Document": {
"title": "Document",
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
"type": "object",
"properties": {
"page_content": {"title": "Page Content", "type": "string"},
"metadata": {"title": "Metadata", "type": "object"},
"type": {
"title": "Type",
"type": "string",
"enum": ["Document"],
"default": "Document",
},
},
"required": ["page_content"],
}
},
}
assert maprerank_chain.output_schema.schema() == {
"title": "MapRerankOutput",
"type": "object",
"properties": {
"output_text": {"title": "Output Text", "type": "string"},
"hello": {"title": "Hello"},
},
}
def test_configurable_fields() -> None:
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
assert fake_llm.invoke("...") == "a"
fake_llm_configurable = fake_llm.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
assert fake_llm_configurable.invoke("...") == "a"
assert fake_llm_configurable.config_schema(include=["configurable"]).schema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
}
},
}
},
}
fake_llm_configured = fake_llm_configurable.with_config(
configurable={"llm_responses": ["b"]}
)
assert fake_llm_configured.invoke("...") == "b"
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.invoke({"name": "John"}) == StringPromptValue(text="Hello, John!")
prompt_configurable = prompt.configurable_fields(
template=ConfigurableField(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
)
)
assert prompt_configurable.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John!"
)
assert prompt_configurable.config_schema(include=["configurable"]).schema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
}
},
}
},
}
prompt_configured = prompt_configurable.with_config(
configurable={"prompt_template": "Hello, {name}! {name}!"}
)
assert prompt_configured.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John! John!"
)
assert prompt_configurable.with_config(
configurable={"prompt_template": "Hello {name} in {lang}"}
).input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
}
chain_configurable = prompt_configurable | fake_llm_configurable | StrOutputParser()
assert chain_configurable.invoke({"name": "John"}) == "a"
assert chain_configurable.config_schema(include=["configurable"]).schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
},
},
}
},
}
assert (
chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).invoke({"name": "John", "lang": "en"})
== "c"
)
assert chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
}
chain_with_map_configurable: Runnable = prompt_configurable | {
"llm1": fake_llm_configurable | StrOutputParser(),
"llm2": fake_llm_configurable | StrOutputParser(),
"llm3": fake_llm.configurable_fields(
responses=ConfigurableField("other_responses")
)
| StrOutputParser(),
}
assert chain_with_map_configurable.invoke({"name": "John"}) == {
"llm1": "a",
"llm2": "a",
"llm3": "a",
}
assert chain_with_map_configurable.config_schema(
include=["configurable"]
).schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"other_responses": {
"title": "Other Responses",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
},
},
}
},
}
assert chain_with_map_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name}!",
"llm_responses": ["c"],
"other_responses": ["d"],
}
).invoke({"name": "John"}) == {"llm1": "c", "llm2": "c", "llm3": "d"}
def test_configurable_alts_factory() -> None:
fake_llm = FakeListLLM(responses=["a"]).configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=partial(FakeListLLM, responses=["b"]),
)
assert fake_llm.invoke("...") == "a"
assert fake_llm.with_config(configurable={"llm": "chat"}).invoke("...") == "b"
def test_configurable_fields_example() -> None:
fake_chat = FakeListChatModel(responses=["b"]).configurable_fields(
responses=ConfigurableFieldMultiOption(
id="chat_responses",
name="Chat Responses",
options={
"hello": "A good morning to you!",
"bye": "See you later!",
"helpful": "How can I help you?",
},
default=["hello", "bye"],
)
)
fake_llm = (
FakeListLLM(responses=["a"])
.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
.configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=fake_chat | StrOutputParser(),
)
)
prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields(
template=ConfigurableFieldSingleOption(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
options={
"hello": "Hello, {name}!",
"good_morning": "A very good morning to you, {name}!",
},
default="hello",
)
)
# deduplication of configurable fields
chain_configurable = prompt | fake_llm | (lambda x: {"name": x}) | prompt | fake_llm
assert chain_configurable.invoke({"name": "John"}) == "a"
assert chain_configurable.config_schema(include=["configurable"]).schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"LLM": {
"title": "LLM",
"description": "An enumeration.",
"enum": ["chat", "default"],
"type": "string",
},
"Chat_Responses": {
"description": "An enumeration.",
"enum": ["hello", "bye", "helpful"],
"title": "Chat Responses",
"type": "string",
},
"Prompt_Template": {
"description": "An enumeration.",
"enum": ["hello", "good_morning"],
"title": "Prompt Template",
"type": "string",
},
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"chat_responses": {
"default": ["hello", "bye"],
"items": {"$ref": "#/definitions/Chat_Responses"},
"title": "Chat Responses",
"type": "array",
},
"llm": {
"title": "LLM",
"default": "default",
"allOf": [{"$ref": "#/definitions/LLM"}],
},
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "hello",
"allOf": [{"$ref": "#/definitions/Prompt_Template"}],
},
},
},
},
}
with pytest.raises(ValueError):
chain_configurable.with_config(configurable={"llm123": "chat"})
assert (
chain_configurable.with_config(configurable={"llm": "chat"}).invoke(
{"name": "John"}
)
== "A good morning to you!"
)
assert (
chain_configurable.with_config(
configurable={"llm": "chat", "chat_responses": ["helpful"]}
).invoke({"name": "John"})
== "How can I help you?"
)
@pytest.mark.asyncio
async def test_passthrough_tap_async(mocker: MockerFixture) -> None:
fake = FakeRunnable()
mock = mocker.Mock()
seq: Runnable = fake | RunnablePassthrough(mock)
assert await seq.ainvoke("hello") == 5
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert [
part async for part in seq.astream("hello", dict(metadata={"key": "value"}))
] == [5]
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert seq.invoke("hello") == 5
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert [part for part in seq.stream("hello", dict(metadata={"key": "value"}))] == [
5
]
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
@pytest.mark.asyncio
async def test_with_config(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.with_config(tags=["a-tag"]).invoke("hello") == 5
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"])),
]
spy.reset_mock()
fake_1: Runnable = RunnablePassthrough()
fake_2: Runnable = RunnablePassthrough()
spy_seq_step = mocker.spy(fake_1.__class__, "invoke")
sequence = fake_1.with_config(tags=["a-tag"]) | fake_2.with_config(
tags=["b-tag"], max_concurrency=5
)
assert sequence.invoke("hello") == "hello"
assert len(spy_seq_step.call_args_list) == 2
for i, call in enumerate(spy_seq_step.call_args_list):
assert call.args[1] == "hello"
if i == 0:
assert call.args[2].get("tags") == ["a-tag"]
assert call.args[2].get("max_concurrency") is None
else:
assert call.args[2].get("tags") == ["b-tag"]
assert call.args[2].get("max_concurrency") == 5
mocker.stop(spy_seq_step)
assert [
*fake.with_config(tags=["a-tag"]).stream(
"hello", dict(metadata={"key": "value"})
)
] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"], metadata={"key": "value"})),
]
spy.reset_mock()
assert fake.with_config(recursion_limit=5).batch(
["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})]
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(
sorted(spy.call_args_list, key=lambda x: 0 if x.args[0] == "hello" else 1)
):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.with_config(metadata={"a": "b"}).batch(
["hello", "wooorld"], dict(tags=["a-tag"])
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {"a": "b"}
spy.reset_mock()
handler = ConsoleCallbackHandler()
assert (
await fake.with_config(metadata={"a": "b"}).ainvoke(
"hello", config={"callbacks": [handler]}
)
== 5
)
assert spy.call_args_list == [
mocker.call("hello", dict(callbacks=[handler], metadata={"a": "b"})),
]
spy.reset_mock()
assert [
part async for part in fake.with_config(metadata={"a": "b"}).astream("hello")
] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(metadata={"a": "b"})),
]
spy.reset_mock()
assert await fake.with_config(recursion_limit=5, tags=["c"]).abatch(
["hello", "wooorld"], dict(metadata={"key": "value"})
) == [
5,
7,
]
assert spy.call_args_list == [
mocker.call(
"hello",
dict(
metadata={"key": "value"},
tags=["c"],
callbacks=None,
recursion_limit=5,
),
),
mocker.call(
"wooorld",
dict(
metadata={"key": "value"},
tags=["c"],
callbacks=None,
recursion_limit=5,
),
),
]
@pytest.mark.asyncio
async def test_default_method_implementations(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.invoke("hello", dict(tags=["a-tag"])) == 5
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"])),
]
spy.reset_mock()
assert [*fake.stream("hello", dict(metadata={"key": "value"}))] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(metadata={"key": "value"})),
]
spy.reset_mock()
assert fake.batch(
["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})]
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.batch(["hello", "wooorld"], dict(tags=["a-tag"])) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
spy.reset_mock()
assert await fake.ainvoke("hello", config={"callbacks": []}) == 5
assert spy.call_args_list == [
mocker.call("hello", dict(callbacks=[])),
]
spy.reset_mock()
assert [part async for part in fake.astream("hello")] == [5]
assert spy.call_args_list == [
mocker.call("hello", None),
]
spy.reset_mock()
assert await fake.abatch(["hello", "wooorld"], dict(metadata={"key": "value"})) == [
5,
7,
]
assert spy.call_args_list == [
mocker.call(
"hello",
dict(
metadata={"key": "value"},
tags=[],
callbacks=None,
recursion_limit=25,
),
),
mocker.call(
"wooorld",
dict(
metadata={"key": "value"},
tags=[],
callbacks=None,
recursion_limit=25,
),
),
]
@pytest.mark.asyncio
async def test_prompt() -> None:
prompt = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
expected = ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert prompt.invoke({"question": "What is your name?"}) == expected
assert prompt.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [*prompt.stream({"question": "What is your name?"})] == [expected]
assert await prompt.ainvoke({"question": "What is your name?"}) == expected
assert await prompt.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [
part async for part in prompt.astream({"question": "What is your name?"})
] == [expected]
stream_log = [
part async for part in prompt.astream_log({"question": "What is your name?"})
]
assert len(stream_log[0].ops) == 1
assert stream_log[0].ops[0]["op"] == "replace"
assert stream_log[0].ops[0]["path"] == ""
assert stream_log[0].ops[0]["value"]["logs"] == {}
assert stream_log[0].ops[0]["value"]["final_output"] is None
assert stream_log[0].ops[0]["value"]["streamed_output"] == []
assert isinstance(stream_log[0].ops[0]["value"]["id"], str)
assert stream_log[1:] == [
RunLogPatch(
{
"op": "replace",
"path": "/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
),
RunLogPatch({"op": "add", "path": "/streamed_output/-", "value": expected}),
]
stream_log_state = [
part
async for part in prompt.astream_log(
{"question": "What is your name?"}, diff=False
)
]
# remove random id
stream_log[0].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].state["id"] = "00000000-0000-0000-0000-000000000000"
# assert output with diff=False matches output with diff=True
assert stream_log_state[-1].ops == [op for chunk in stream_log for op in chunk.ops]
assert stream_log_state[-1] == RunLog(
*[op for chunk in stream_log for op in chunk.ops],
state={
"final_output": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
"id": "00000000-0000-0000-0000-000000000000",
"logs": {},
"streamed_output": [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
],
},
)
def test_prompt_template_params() -> None:
prompt = ChatPromptTemplate.from_template(
"Respond to the following question: {question}"
)
result = prompt.invoke(
{
"question": "test",
"topic": "test",
}
)
assert result == ChatPromptValue(
messages=[HumanMessage(content="Respond to the following question: test")]
)
with pytest.raises(KeyError):
prompt.invoke({})
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_prompt_with_chat_model(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == AIMessage(content="foo")
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "batch")
chat_spy = mocker.spy(chat.__class__, "batch")
tracer = FakeTracer()
assert chain.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
dict(callbacks=[tracer]),
) == [
AIMessage(content="foo"),
AIMessage(content="foo"),
]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert chat_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert (
len(
[
r
for r in tracer.runs
if r.parent_run_id is None and len(r.child_runs) == 2
]
)
== 2
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "stream")
tracer = FakeTracer()
assert [
*chain.stream({"question": "What is your name?"}, dict(callbacks=[tracer]))
] == [
AIMessageChunk(content="f"),
AIMessageChunk(content="o"),
AIMessageChunk(content="o"),
]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_prompt_with_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain = prompt | llm
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == llm
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
llm_spy = mocker.spy(llm.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
dict(callbacks=[tracer]),
) == ["bar", "foo"]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert llm_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "astream")
tracer = FakeTracer()
assert [
token
async for token in chain.astream(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
] == ["bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
prompt_spy.reset_mock()
llm_spy.reset_mock()
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# remove ids from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert stream_log == [
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"logs": {},
"final_output": None,
"streamed_output": [],
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "FakeListLLM",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "llm",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM/final_output",
"value": {
"generations": [
[{"generation_info": None, "text": "foo", "type": "Generation"}]
],
"llm_output": None,
"run": None,
},
},
{
"op": "add",
"path": "/logs/FakeListLLM/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch({"op": "add", "path": "/streamed_output/-", "value": "foo"}),
RunLogPatch(
{"op": "replace", "path": "/final_output", "value": {"output": "foo"}}
),
]
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_stream_log_retriever() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{documents}"
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain: Runnable = (
{"documents": FakeRetriever(), "question": itemgetter("question")}
| prompt
| {"one": llm, "two": llm}
)
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# remove ids from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert stream_log[:-9] in [
[
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"logs": {},
"final_output": None,
"streamed_output": [],
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableParallel",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableLambda",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:question"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda/final_output",
"value": {"output": "What is your name?"},
},
{
"op": "add",
"path": "/logs/RunnableLambda/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "Retriever",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:documents"],
"type": "retriever",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
]
},
},
{
"op": "add",
"path": "/logs/Retriever/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
],
"question": "What is your name?",
},
},
{
"op": "add",
"path": "/logs/RunnableParallel/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="[Document(page_content='foo'), Document(page_content='bar')]" # noqa: E501
),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
],
[
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {"final_output": None, "logs": {}, "streamed_output": []},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableParallel",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "Retriever",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:documents"],
"type": "retriever",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableLambda",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:question"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda/final_output",
"value": {"output": "What is your name?"},
},
{
"op": "add",
"path": "/logs/RunnableLambda/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
]
},
},
{
"op": "add",
"path": "/logs/Retriever/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
],
"question": "What is your name?",
},
},
{
"op": "add",
"path": "/logs/RunnableParallel/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="[Document(page_content='foo'), Document(page_content='bar')]" # noqa: E501
),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
],
]
assert sorted(cast(RunLog, add(stream_log)).state["logs"]) == [
"ChatPromptTemplate",
"FakeListLLM",
"FakeListLLM:2",
"Retriever",
"RunnableLambda",
"RunnableParallel",
"RunnableParallel:2",
]
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_prompt_with_llm_and_async_lambda(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
async def passthrough(input: Any) -> Any:
return input
chain = prompt | llm | passthrough
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [llm]
assert chain.last == RunnableLambda(func=passthrough)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
@freeze_time("2023-01-01")
def test_prompt_with_chat_model_and_parser(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == ["foo", "bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
def test_combining_sequences(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
if sys.version_info >= (3, 9):
assert dumps(chain, pretty=True) == snapshot
prompt2 = (
SystemMessagePromptTemplate.from_template("You are a nicer assistant.")
+ "{question}"
)
chat2 = FakeListChatModel(responses=["baz, qux"])
parser2 = CommaSeparatedListOutputParser()
input_formatter: RunnableLambda[List[str], Dict[str, Any]] = RunnableLambda(
lambda x: {"question": x[0] + x[1]}
)
chain2 = cast(RunnableSequence, input_formatter | prompt2 | chat2 | parser2)
assert isinstance(chain, RunnableSequence)
assert chain2.first == input_formatter
assert chain2.middle == [prompt2, chat2]
assert chain2.last == parser2
if sys.version_info >= (3, 9):
assert dumps(chain2, pretty=True) == snapshot
combined_chain = cast(RunnableSequence, chain | chain2)
assert combined_chain.first == prompt
assert combined_chain.middle == [
chat,
parser,
input_formatter,
prompt2,
chat2,
]
assert combined_chain.last == parser2
if sys.version_info >= (3, 9):
assert dumps(combined_chain, pretty=True) == snapshot
# Test invoke
tracer = FakeTracer()
assert combined_chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == ["baz", "qux"]
if sys.version_info >= (3, 9):
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
def test_seq_dict_prompt_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
retriever = FakeRetriever()
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ """Context:
{documents}
Question:
{question}"""
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain: Runnable = (
{
"question": RunnablePassthrough[str]() | passthrough,
"documents": passthrough | retriever,
"just_to_test_lambda": passthrough,
}
| prompt
| chat
| parser
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert isinstance(chain.first, RunnableParallel)
assert chain.middle == [prompt, chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke("What is your name?", dict(callbacks=[tracer])) == [
"foo",
"bar",
]
assert prompt_spy.call_args.args[1] == {
"documents": [Document(page_content="foo"), Document(page_content="bar")],
"question": "What is your name?",
"just_to_test_lambda": "What is your name?",
}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="""Context:
[Document(page_content='foo'), Document(page_content='bar')]
Question:
What is your name?"""
),
]
)
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 4
map_run = parent_run.child_runs[0]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat,
"llm": llm,
}
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == {
"chat": AIMessage(content="i'm a chatbot"),
"llm": "i'm a textbot",
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 2
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_router_runnable(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
chain1 = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
chain2 = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
router = RouterRunnable({"math": chain1, "english": chain2})
chain: Runnable = {
"key": lambda x: x["key"],
"input": {"question": lambda x: x["question"]},
} | router
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
# Test invoke
router_spy = mocker.spy(router.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer]))
== "4"
)
assert router_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "RunnableSequence" # TODO: should be RunnableRouter
assert len(router_run.child_runs) == 2
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_higher_order_lambda_runnable(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
math_chain = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
english_chain = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
input_map: Runnable = RunnableParallel(
key=lambda x: x["key"],
input={"question": lambda x: x["question"]},
)
def router(input: Dict[str, Any]) -> Runnable:
if input["key"] == "math":
return itemgetter("input") | math_chain
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
chain: Runnable = input_map | router
if sys.version_info >= (3, 9):
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
# Test invoke
math_spy = mocker.spy(math_chain.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer]))
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "router"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
# Test ainvoke
async def arouter(input: Dict[str, Any]) -> Runnable:
if input["key"] == "math":
return itemgetter("input") | math_chain
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
achain: Runnable = input_map | arouter
math_spy = mocker.spy(math_chain.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await achain.ainvoke(
{"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer])
)
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "arouter"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_map(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": passthrough,
}
)
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == {
"chat": AIMessage(content="i'm a chatbot"),
"llm": "i'm a textbot",
"passthrough": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 3
def test_map_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
def test_map_stream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == "i'm a textbot"
@pytest.mark.asyncio
async def test_map_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
# Test astream_log state accumulation
final_state = None
streamed_ops = []
async for chunk in chain.astream_log({"question": "What is your name?"}):
streamed_ops.extend(chunk.ops)
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert isinstance(final_state.state["id"], str)
assert len(final_state.ops) == len(streamed_ops)
assert len(final_state.state["logs"]) == 5
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"][
"final_output"
] == prompt.invoke({"question": "What is your name?"})
assert final_state.state["logs"]["RunnableParallel"]["name"] == "RunnableParallel"
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeListChatModel",
"FakeStreamingListLLM",
"RunnableParallel",
"RunnablePassthrough",
]
# Test astream_log with include filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, include_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 1
assert final_state.state["logs"]["FakeListChatModel"]["name"] == "FakeListChatModel"
# Test astream_log with exclude filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, exclude_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 4
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"]["final_output"] == (
prompt.invoke({"question": "What is your name?"})
)
assert final_state.state["logs"]["RunnableParallel"]["name"] == "RunnableParallel"
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeStreamingListLLM",
"RunnableParallel",
"RunnablePassthrough",
]
@pytest.mark.asyncio
async def test_map_astream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == llm_res
def test_with_config_with_config() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.with_config({"metadata": {"a": "b"}}).with_config(tags=["a-tag"])
) == dumpd(llm.with_config({"metadata": {"a": "b"}, "tags": ["a-tag"]}))
def test_metadata_is_merged() -> None:
"""Test metadata and tags defined in with_config and at are merged/concatend."""
foo = RunnableLambda(lambda x: x).with_config({"metadata": {"my_key": "my_value"}})
expected_metadata = {
"my_key": "my_value",
"my_other_key": "my_other_value",
}
with collect_runs() as cb:
foo.invoke("hi", {"metadata": {"my_other_key": "my_other_value"}})
run = cb.traced_runs[0]
assert run.extra["metadata"] == expected_metadata
def test_tags_are_appended() -> None:
"""Test tags from with_config are concatenated with those in invocation."""
foo = RunnableLambda(lambda x: x).with_config({"tags": ["my_key"]})
with collect_runs() as cb:
foo.invoke("hi", {"tags": ["invoked_key"]})
run = cb.traced_runs[0]
assert isinstance(run.tags, list)
assert sorted(run.tags) == sorted(["my_key", "invoked_key"])
def test_bind_bind() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.bind(stop=["Thought:"], one="two").bind(
stop=["Observation:"], hello="world"
)
) == dumpd(llm.bind(stop=["Observation:"], one="two", hello="world"))
def test_deep_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.stream({"question": "What up"})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
for chunk in (chain | RunnablePassthrough()).stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
def test_deep_stream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.stream({"question": "What up"})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain | RunnablePassthrough.assign(
hello=itemgetter("str") | llm
)
assert chain_with_assign.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
for chunk in chain_with_assign.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert chain_with_assign.invoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain | RunnablePassthrough.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign_shadow.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
for chunk in chain_with_assign_shadow.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert chain_with_assign_shadow.invoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
@pytest.mark.asyncio
async def test_deep_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.astream({"question": "What up"})
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
async for chunk in (chain | RunnablePassthrough()).astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
@pytest.mark.asyncio
async def test_deep_astream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.astream({"question": "What up"})
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain | RunnablePassthrough.assign(
hello=itemgetter("str") | llm,
)
assert chain_with_assign.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
async for chunk in chain_with_assign.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert await chain_with_assign.ainvoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain | RunnablePassthrough.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign_shadow.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
async for chunk in chain_with_assign_shadow.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert await chain_with_assign_shadow.ainvoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
def test_runnable_sequence_transform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = llm | StrOutputParser()
stream = chain.transform(llm.stream("Hi there!"))
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
@pytest.mark.asyncio
async def test_runnable_sequence_atransform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = llm | StrOutputParser()
stream = chain.atransform(llm.astream("Hi there!"))
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
@pytest.fixture()
def llm_with_fallbacks() -> RunnableWithFallbacks:
error_llm = FakeListLLM(responses=["foo"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
return error_llm.with_fallbacks([pass_llm])
@pytest.fixture()
def llm_with_multi_fallbacks() -> RunnableWithFallbacks:
error_llm = FakeListLLM(responses=["foo"], i=1)
error_llm_2 = FakeListLLM(responses=["baz"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
return error_llm.with_fallbacks([error_llm_2, pass_llm])
@pytest.fixture()
def llm_chain_with_fallbacks() -> Runnable:
error_llm = FakeListLLM(responses=["foo"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
prompt = PromptTemplate.from_template("what did baz say to {buz}")
return RunnableParallel({"buz": lambda x: x}) | (prompt | error_llm).with_fallbacks(
[prompt | pass_llm]
)
@pytest.mark.parametrize(
"runnable",
["llm_with_fallbacks", "llm_with_multi_fallbacks", "llm_chain_with_fallbacks"],
)
@pytest.mark.asyncio
async def test_llm_with_fallbacks(
runnable: RunnableWithFallbacks, request: Any, snapshot: SnapshotAssertion
) -> None:
runnable = request.getfixturevalue(runnable)
assert runnable.invoke("hello") == "bar"
assert runnable.batch(["hi", "hey", "bye"]) == ["bar"] * 3
assert list(runnable.stream("hello")) == ["bar"]
assert await runnable.ainvoke("hello") == "bar"
assert await runnable.abatch(["hi", "hey", "bye"]) == ["bar"] * 3
assert list(await runnable.ainvoke("hello")) == list("bar")
if sys.version_info >= (3, 9):
assert dumps(runnable, pretty=True) == snapshot
class FakeSplitIntoListParser(BaseOutputParser[List[str]]):
"""Parse the output of an LLM call to a comma-separated list."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return True
def get_format_instructions(self) -> str:
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz`"
)
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
def test_each_simple() -> None:
"""Test that each() works with a simple runnable."""
parser = FakeSplitIntoListParser()
assert parser.invoke("first item, second item") == ["first item", "second item"]
assert parser.map().invoke(["a, b", "c"]) == [["a", "b"], ["c"]]
assert parser.map().map().invoke([["a, b", "c"], ["c, e"]]) == [
[["a", "b"], ["c"]],
[["c", "e"]],
]
def test_each(snapshot: SnapshotAssertion) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
first_llm = FakeStreamingListLLM(responses=["first item, second item, third item"])
parser = FakeSplitIntoListParser()
second_llm = FakeStreamingListLLM(responses=["this", "is", "a", "test"])
chain = prompt | first_llm | parser | second_llm.map()
assert dumps(chain, pretty=True) == snapshot
output = chain.invoke({"question": "What up"})
assert output == ["this", "is", "a"]
assert (parser | second_llm.map()).invoke("first item, second item") == [
"test",
"this",
]
def test_recursive_lambda() -> None:
def _simple_recursion(x: int) -> Union[int, Runnable]:
if x < 10:
return RunnableLambda(lambda *args: _simple_recursion(x + 1))
else:
return x
runnable = RunnableLambda(_simple_recursion)
assert runnable.invoke(5) == 10
with pytest.raises(RecursionError):
runnable.invoke(0, {"recursion_limit": 9})
def test_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
elif x == 2:
raise RuntimeError("x is 2")
else:
return x
_lambda_mock = mocker.Mock(side_effect=_lambda)
runnable = RunnableLambda(_lambda_mock)
with pytest.raises(ValueError):
runnable.invoke(1)
assert _lambda_mock.call_count == 1
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
runnable.with_retry(
stop_after_attempt=2,
retry_if_exception_type=(ValueError,),
).invoke(1)
assert _lambda_mock.call_count == 2 # retried
_lambda_mock.reset_mock()
with pytest.raises(RuntimeError):
runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).invoke(2)
assert _lambda_mock.call_count == 1 # did not retry
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).batch([1, 2, 0])
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
_lambda_mock.reset_mock()
output = runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).batch([1, 2, 0], return_exceptions=True)
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
assert len(output) == 3
assert isinstance(output[0], ValueError)
assert isinstance(output[1], RuntimeError)
assert output[2] == 0
_lambda_mock.reset_mock()
@pytest.mark.asyncio
async def test_async_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
elif x == 2:
raise RuntimeError("x is 2")
else:
return x
_lambda_mock = mocker.Mock(side_effect=_lambda)
runnable = RunnableLambda(_lambda_mock)
with pytest.raises(ValueError):
await runnable.ainvoke(1)
assert _lambda_mock.call_count == 1
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError, KeyError),
).ainvoke(1)
assert _lambda_mock.call_count == 2 # retried
_lambda_mock.reset_mock()
with pytest.raises(RuntimeError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).ainvoke(2)
assert _lambda_mock.call_count == 1 # did not retry
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).abatch([1, 2, 0])
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
_lambda_mock.reset_mock()
output = await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).abatch([1, 2, 0], return_exceptions=True)
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
assert len(output) == 3
assert isinstance(output[0], ValueError)
assert isinstance(output[1], RuntimeError)
assert output[2] == 0
_lambda_mock.reset_mock()
@freeze_time("2023-01-01")
def test_seq_batch_return_exceptions(mocker: MockerFixture) -> None:
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) -> None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
raise NotImplementedError()
def _batch(
self,
inputs: List[str],
) -> List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + "a")
return outputs
def batch(
self,
inputs: List[str],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[str]:
return self._batch_with_config(
self._batch,
inputs,
config,
return_exceptions=return_exceptions,
**kwargs,
)
chain = (
ControlledExceptionRunnable("bux")
| ControlledExceptionRunnable("bar")
| ControlledExceptionRunnable("baz")
| ControlledExceptionRunnable("foo")
)
assert isinstance(chain, RunnableSequence)
# Test batch
with pytest.raises(ValueError):
chain.batch(["foo", "bar", "baz", "qux"])
spy = mocker.spy(ControlledExceptionRunnable, "batch")
tracer = FakeTracer()
inputs = ["foo", "bar", "baz", "qux"]
outputs = chain.batch(inputs, dict(callbacks=[tracer]), return_exceptions=True)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == "quxaaaa"
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [
# inputs to sequence step 0
# same as inputs to sequence.batch()
["foo", "bar", "baz", "qux"],
# inputs to sequence step 1
# == outputs of sequence step 0 as no exceptions were raised
["fooa", "bara", "baza", "quxa"],
# inputs to sequence step 2
# 'bar' was dropped as it raised an exception in step 1
["fooaa", "bazaa", "quxaa"],
# inputs to sequence step 3
# 'baz' was dropped as it raised an exception in step 2
["fooaaa", "quxaaa"],
]
parent_runs = sorted(
(r for r in tracer.runs if r.parent_run_id is None),
key=lambda run: inputs.index(run.inputs["input"]),
)
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs["input"] == "foo"
assert parent_run_foo.error == repr(ValueError())
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs] == [
None,
None,
None,
repr(ValueError()),
]
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs["input"] == "bar"
assert parent_run_bar.error == repr(ValueError())
assert len(parent_run_bar.child_runs) == 2
assert [r.error for r in parent_run_bar.child_runs] == [
None,
repr(ValueError()),
]
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs["input"] == "baz"
assert parent_run_baz.error == repr(ValueError())
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs] == [
None,
None,
repr(ValueError()),
]
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs["input"] == "qux"
assert parent_run_qux.error is None
assert parent_run_qux.outputs["output"] == "quxaaaa"
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None]
@pytest.mark.asyncio
@freeze_time("2023-01-01")
async def test_seq_abatch_return_exceptions(mocker: MockerFixture) -> None:
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) -> None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
raise NotImplementedError()
async def _abatch(
self,
inputs: List[str],
) -> List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + "a")
return outputs
async def abatch(
self,
inputs: List[str],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[str]:
return await self._abatch_with_config(
self._abatch,
inputs,
config,
return_exceptions=return_exceptions,
**kwargs,
)
chain = (
ControlledExceptionRunnable("bux")
| ControlledExceptionRunnable("bar")
| ControlledExceptionRunnable("baz")
| ControlledExceptionRunnable("foo")
)
assert isinstance(chain, RunnableSequence)
# Test abatch
with pytest.raises(ValueError):
await chain.abatch(["foo", "bar", "baz", "qux"])
spy = mocker.spy(ControlledExceptionRunnable, "abatch")
tracer = FakeTracer()
inputs = ["foo", "bar", "baz", "qux"]
outputs = await chain.abatch(
inputs, dict(callbacks=[tracer]), return_exceptions=True
)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == "quxaaaa"
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [
# inputs to sequence step 0
# same as inputs to sequence.batch()
["foo", "bar", "baz", "qux"],
# inputs to sequence step 1
# == outputs of sequence step 0 as no exceptions were raised
["fooa", "bara", "baza", "quxa"],
# inputs to sequence step 2
# 'bar' was dropped as it raised an exception in step 1
["fooaa", "bazaa", "quxaa"],
# inputs to sequence step 3
# 'baz' was dropped as it raised an exception in step 2
["fooaaa", "quxaaa"],
]
parent_runs = sorted(
(r for r in tracer.runs if r.parent_run_id is None),
key=lambda run: inputs.index(run.inputs["input"]),
)
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs["input"] == "foo"
assert parent_run_foo.error == repr(ValueError())
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs] == [
None,
None,
None,
repr(ValueError()),
]
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs["input"] == "bar"
assert parent_run_bar.error == repr(ValueError())
assert len(parent_run_bar.child_runs) == 2
assert [r.error for r in parent_run_bar.child_runs] == [
None,
repr(ValueError()),
]
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs["input"] == "baz"
assert parent_run_baz.error == repr(ValueError())
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs] == [
None,
None,
repr(ValueError()),
]
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs["input"] == "qux"
assert parent_run_qux.error is None
assert parent_run_qux.outputs["output"] == "quxaaaa"
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None]
def test_runnable_branch_init() -> None:
"""Verify that runnable branch gets initialized properly."""
add = RunnableLambda(lambda x: x + 1)
condition = RunnableLambda(lambda x: x > 0)
# Test failure with less than 2 branches
with pytest.raises(ValueError):
RunnableBranch((condition, add))
# Test failure with less than 2 branches
with pytest.raises(ValueError):
RunnableBranch(condition)
@pytest.mark.parametrize(
"branches",
[
[
(RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)),
RunnableLambda(lambda x: x - 1),
],
[
(RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)),
(RunnableLambda(lambda x: x > 5), RunnableLambda(lambda x: x + 1)),
RunnableLambda(lambda x: x - 1),
],
[
(lambda x: x > 0, lambda x: x + 1),
(lambda x: x > 5, lambda x: x + 1),
lambda x: x - 1,
],
],
)
def test_runnable_branch_init_coercion(branches: Sequence[Any]) -> None:
"""Verify that runnable branch gets initialized properly."""
runnable = RunnableBranch[int, int](*branches)
for branch in runnable.branches:
condition, body = branch
assert isinstance(condition, Runnable)
assert isinstance(body, Runnable)
assert isinstance(runnable.default, Runnable)
assert runnable.input_schema.schema() == {"title": "RunnableBranchInput"}
def test_runnable_branch_invoke_call_counts(mocker: MockerFixture) -> None:
"""Verify that runnables are invoked only when necessary."""
# Test with single branch
add = RunnableLambda(lambda x: x + 1)
sub = RunnableLambda(lambda x: x - 1)
condition = RunnableLambda(lambda x: x > 0)
spy = mocker.spy(condition, "invoke")
add_spy = mocker.spy(add, "invoke")
branch = RunnableBranch[int, int]((condition, add), (condition, add), sub)
assert spy.call_count == 0
assert add_spy.call_count == 0
assert branch.invoke(1) == 2
assert add_spy.call_count == 1
assert spy.call_count == 1
assert branch.invoke(2) == 3
assert spy.call_count == 2
assert add_spy.call_count == 2
assert branch.invoke(-3) == -4
# Should fall through to default branch with condition being evaluated twice!
assert spy.call_count == 4
# Add should not be invoked
assert add_spy.call_count == 2
def test_runnable_branch_invoke() -> None:
# Test with single branch
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
# mypy cannot infer types from the lambda
(lambda x: x > 0 and x < 5, lambda x: x + 1), # type: ignore[misc]
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert branch.invoke(1) == 2
assert branch.invoke(10) == 100
assert branch.invoke(0) == -1
# Should raise an exception
with pytest.raises(ValueError):
branch.invoke(1000)
def test_runnable_branch_batch() -> None:
"""Test batch variant."""
# Test with single branch
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert branch.batch([1, 10, 0]) == [2, 100, -1]
@pytest.mark.asyncio
async def test_runnable_branch_ainvoke() -> None:
"""Test async variant of invoke."""
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert await branch.ainvoke(1) == 2
assert await branch.ainvoke(10) == 100
assert await branch.ainvoke(0) == -1
# Verify that the async variant is used if available
async def condition(x: int) -> bool:
return x > 0
async def add(x: int) -> int:
return x + 1
async def sub(x: int) -> int:
return x - 1
branch = RunnableBranch[int, int]((condition, add), sub)
assert await branch.ainvoke(1) == 2
assert await branch.ainvoke(-10) == -11
def test_runnable_branch_invoke_callbacks() -> None:
"""Verify that callbacks are correctly used in invoke."""
tracer = FakeTracer()
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
lambda x: x - 1,
)
assert branch.invoke(1, config={"callbacks": [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {"output": 0}
# Check that the chain on end is invoked
with pytest.raises(ValueError):
branch.invoke(1000, config={"callbacks": [tracer]})
assert len(tracer.runs) == 2
assert tracer.runs[1].error == "ValueError('x is too large')"
assert tracer.runs[1].outputs is None
@pytest.mark.asyncio
async def test_runnable_branch_ainvoke_callbacks() -> None:
"""Verify that callbacks are invoked correctly in ainvoke."""
tracer = FakeTracer()
async def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
lambda x: x - 1,
)
assert await branch.ainvoke(1, config={"callbacks": [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {"output": 0}
# Check that the chain on end is invoked
with pytest.raises(ValueError):
await branch.ainvoke(1000, config={"callbacks": [tracer]})
assert len(tracer.runs) == 2
assert tracer.runs[1].error == "ValueError('x is too large')"
assert tracer.runs[1].outputs is None
@pytest.mark.asyncio
async def test_runnable_branch_abatch() -> None:
"""Test async variant of invoke."""
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert await branch.abatch([1, 10, 0]) == [2, 100, -1]
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
def test_representation_of_runnables() -> None:
"""Test representation of runnables."""
runnable = RunnableLambda(lambda x: x * 2)
assert repr(runnable) == "RunnableLambda(lambda x: x * 2)"
def f(x: int) -> int:
"""Return 2."""
return 2
assert repr(RunnableLambda(func=f)) == "RunnableLambda(...)"
async def af(x: int) -> int:
"""Return 2."""
return 2
assert repr(RunnableLambda(func=f, afunc=af)) == "RunnableLambda(...)"
assert repr(
RunnableLambda(lambda x: x + 2)
| {
"a": RunnableLambda(lambda x: x * 2),
"b": RunnableLambda(lambda x: x * 3),
}
) == (
"RunnableLambda(...)\n"
"| {\n"
" a: RunnableLambda(...),\n"
" b: RunnableLambda(...)\n"
" }"
), "repr where code string contains multiple lambdas gives up"
@pytest.mark.asyncio
async def test_tool_from_runnable() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
chain_tool = tool("chain_tool", chain)
assert isinstance(chain_tool, BaseTool)
assert chain_tool.name == "chain_tool"
assert chain_tool.run({"question": "What up"}) == chain.invoke(
{"question": "What up"}
)
assert await chain_tool.arun({"question": "What up"}) == await chain.ainvoke(
{"question": "What up"}
)
assert chain_tool.description.endswith(repr(chain))
assert chain_tool.args_schema.schema() == chain.input_schema.schema()
assert chain_tool.args_schema.schema() == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "PromptInput",
"type": "object",
}
@pytest.mark.asyncio
async def test_runnable_gen() -> None:
"""Test that a generator can be used as a runnable."""
def gen(input: Iterator[Any]) -> Iterator[int]:
yield 1
yield 2
yield 3
runnable = RunnableGenerator(gen)
assert runnable.input_schema.schema() == {"title": "RunnableGeneratorInput"}
assert runnable.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert runnable.invoke(None) == 6
assert list(runnable.stream(None)) == [1, 2, 3]
assert runnable.batch([None, None]) == [6, 6]
async def agen(input: AsyncIterator[Any]) -> AsyncIterator[int]:
yield 1
yield 2
yield 3
arunnable = RunnableGenerator(agen)
assert await arunnable.ainvoke(None) == 6
assert [p async for p in arunnable.astream(None)] == [1, 2, 3]
assert await arunnable.abatch([None, None]) == [6, 6]
@pytest.mark.asyncio
async def test_runnable_gen_transform() -> None:
"""Test that a generator can be used as a runnable."""
def gen_indexes(length_iter: Iterator[int]) -> Iterator[int]:
for i in range(next(length_iter)):
yield i
async def agen_indexes(length_iter: AsyncIterator[int]) -> AsyncIterator[int]:
async for length in length_iter:
for i in range(length):
yield i
def plus_one(input: Iterator[int]) -> Iterator[int]:
for i in input:
yield i + 1
async def aplus_one(input: AsyncIterator[int]) -> AsyncIterator[int]:
async for i in input:
yield i + 1
chain: Runnable = RunnableGenerator(gen_indexes, agen_indexes) | plus_one
achain = RunnableGenerator(gen_indexes, agen_indexes) | aplus_one
assert chain.input_schema.schema() == {
"title": "RunnableGeneratorInput",
"type": "integer",
}
assert chain.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert achain.input_schema.schema() == {
"title": "RunnableGeneratorInput",
"type": "integer",
}
assert achain.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert list(chain.stream(3)) == [1, 2, 3]
assert [p async for p in achain.astream(4)] == [1, 2, 3, 4]
| [
"Hello, {name}!",
"Context:\n{documents}\n\nQuestion:\n{question}",
"human",
"Prompt Template",
"What is your favorite color?",
"what country is the city {city} in? respond in {language}",
"{question}",
"prompt_template",
"Respond to the following question: test",
"You are a nice assistant.",
"i'm a chatbot",
"Context:\n[Document(page_content='foo'), Document(page_content='bar')]\n\nQuestion:\nWhat is your name?",
"Hello, {name}! {name}!",
"What is your name?",
"foo",
"{documents}",
"You are an english major. Answer the question: {question}",
"Respond to the following question: {question}",
"{'title': 'Content', 'type': 'string'}",
"The prompt template for this chain",
"A very good morning to you, {name}!",
"{'title': 'Page Content', 'type': 'string'}",
"invoke",
"good_morning",
"what did baz say to {buz}",
"You are a nicer assistant.",
"ainvoke",
"[Document(page_content='foo'), Document(page_content='bar')]",
"what is the city {person} is from?",
"Hello, how are you?",
"foo, bar",
"You are a math genius. Answer the question: {question}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~openai_info.py | """Callback Handler that prints to std out."""
from typing import Any, Dict, List
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
MODEL_COST_PER_1K_TOKENS = {
# GPT-4 input
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
"gpt-4-32k-0314": 0.06,
"gpt-4-32k-0613": 0.06,
# GPT-4 output
"gpt-4-completion": 0.06,
"gpt-4-0314-completion": 0.06,
"gpt-4-0613-completion": 0.06,
"gpt-4-32k-completion": 0.12,
"gpt-4-32k-0314-completion": 0.12,
"gpt-4-32k-0613-completion": 0.12,
# GPT-3.5 input
"gpt-3.5-turbo": 0.0015,
"gpt-3.5-turbo-0301": 0.0015,
"gpt-3.5-turbo-0613": 0.0015,
"gpt-3.5-turbo-instruct": 0.0015,
"gpt-3.5-turbo-16k": 0.003,
"gpt-3.5-turbo-16k-0613": 0.003,
# GPT-3.5 output
"gpt-3.5-turbo-completion": 0.002,
"gpt-3.5-turbo-0301-completion": 0.002,
"gpt-3.5-turbo-0613-completion": 0.002,
"gpt-3.5-turbo-instruct-completion": 0.002,
"gpt-3.5-turbo-16k-completion": 0.004,
"gpt-3.5-turbo-16k-0613-completion": 0.004,
# Azure GPT-35 input
"gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0301": 0.0015, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613": 0.0015,
"gpt-35-turbo-instruct": 0.0015,
"gpt-35-turbo-16k": 0.003,
"gpt-35-turbo-16k-0613": 0.003,
# Azure GPT-35 output
"gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613-completion": 0.002,
"gpt-35-turbo-instruct-completion": 0.002,
"gpt-35-turbo-16k-completion": 0.004,
"gpt-35-turbo-16k-0613-completion": 0.004,
# Others
"text-ada-001": 0.0004,
"ada": 0.0004,
"text-babbage-001": 0.0005,
"babbage": 0.0005,
"text-curie-001": 0.002,
"curie": 0.002,
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
# Fine Tuned input
"babbage-002-finetuned": 0.0016,
"davinci-002-finetuned": 0.012,
"gpt-3.5-turbo-0613-finetuned": 0.012,
# Fine Tuned output
"babbage-002-finetuned-completion": 0.0016,
"davinci-002-finetuned-completion": 0.012,
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
# Legacy fine-tuned models
"ada-finetuned-legacy": 0.0016,
"babbage-finetuned-legacy": 0.0024,
"curie-finetuned-legacy": 0.012,
"davinci-finetuned-legacy": 0.12,
}
def standardize_model_name(
model_name: str,
is_completion: bool = False,
) -> str:
"""
Standardize the model name to a format that can be used in the OpenAI API.
Args:
model_name: Model name to standardize.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Standardized model name.
"""
model_name = model_name.lower()
if "ft-" in model_name:
return model_name.split(":")[0] + "-finetuned-legacy"
if "ft:" in model_name:
return model_name.split(":")[1] + "-finetuned"
elif is_completion and (
model_name.startswith("gpt-4")
or model_name.startswith("gpt-3.5")
or model_name.startswith("gpt-35")
or ("finetuned" in model_name and "legacy" not in model_name)
):
return model_name + "-completion"
else:
return model_name
def get_openai_token_cost_for_model(
model_name: str, num_tokens: int, is_completion: bool = False
) -> float:
"""
Get the cost in USD for a given model and number of tokens.
Args:
model_name: Name of the model
num_tokens: Number of tokens.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Cost in USD.
"""
model_name = standardize_model_name(model_name, is_completion=is_completion)
if model_name not in MODEL_COST_PER_1K_TOKENS:
raise ValueError(
f"Unknown model: {model_name}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys())
)
return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
class OpenAICallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks OpenAI info."""
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
total_cost: float = 0.0
def __repr__(self) -> str:
return (
f"Tokens Used: {self.total_tokens}\n"
f"\tPrompt Tokens: {self.prompt_tokens}\n"
f"\tCompletion Tokens: {self.completion_tokens}\n"
f"Successful Requests: {self.successful_requests}\n"
f"Total Cost (USD): ${self.total_cost}"
)
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
pass
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Print out the token."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
if response.llm_output is None:
return None
self.successful_requests += 1
if "token_usage" not in response.llm_output:
return None
token_usage = response.llm_output["token_usage"]
completion_tokens = token_usage.get("completion_tokens", 0)
prompt_tokens = token_usage.get("prompt_tokens", 0)
model_name = standardize_model_name(response.llm_output.get("model_name", ""))
if model_name in MODEL_COST_PER_1K_TOKENS:
completion_cost = get_openai_token_cost_for_model(
model_name, completion_tokens, is_completion=True
)
prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens)
self.total_cost += prompt_cost + completion_cost
self.total_tokens += token_usage.get("total_tokens", 0)
self.prompt_tokens += prompt_tokens
self.completion_tokens += completion_tokens
def __copy__(self) -> "OpenAICallbackHandler":
"""Return a copy of the callback handler."""
return self
def __deepcopy__(self, memo: Any) -> "OpenAICallbackHandler":
"""Return a deep copy of the callback handler."""
return self
| [
"0",
"prompt_tokens"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def poetry_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)["tool"]["poetry"]
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = poetry_conf["dependencies"]
is_required = {
package_name: isinstance(requirements, str)
or not requirements.get("optional", False)
for package_name, requirements in dependencies.items()
}
required_dependencies = [
package_name for package_name, required in is_required.items() if required
]
assert sorted(required_dependencies) == [
"PyYAML",
"SQLAlchemy",
"aiohttp",
"anyio",
"async-timeout",
"dataclasses-json",
"gigachat",
"jsonpatch",
"langsmith",
"numpy",
"pydantic",
"python",
"requests",
"tenacity",
]
unrequired_dependencies = [
package_name for package_name, required in is_required.items() if not required
]
in_extras = [dep for group in poetry_conf["extras"].values() for dep in group]
assert set(unrequired_dependencies) == set(in_extras)
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
assert test_group_deps == [
"duckdb-engine",
"freezegun",
"lark",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"responses",
"syrupy",
]
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
from langchain.callbacks import OpenAICallbackHandler # noqa: F401
from langchain.chains import LLMChain # noqa: F401
from langchain.chat_models import ChatOpenAI # noqa: F401
from langchain.document_loaders import BSHTMLLoader # noqa: F401
from langchain.embeddings import OpenAIEmbeddings # noqa: F401
from langchain.llms import OpenAI # noqa: F401
from langchain.retrievers import VespaRetriever # noqa: F401
from langchain.schema import BasePromptTemplate # noqa: F401
from langchain.tools import DuckDuckGoSearchResults # noqa: F401
from langchain.utilities import SerpAPIWrapper # noqa: F401
from langchain.vectorstores import FAISS # noqa: F401
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~metal.py | from typing import Any, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
class MetalRetriever(BaseRetriever):
"""`Metal API` retriever."""
client: Any
"""The Metal client to use."""
params: Optional[dict] = None
"""The parameters to pass to the Metal client."""
@root_validator(pre=True)
def validate_client(cls, values: dict) -> dict:
"""Validate that the client is of the correct type."""
from metal_sdk.metal import Metal
if "client" in values:
client = values["client"]
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
values["params"] = values.get("params", {})
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~load~dump.py | import json
from typing import Any, Dict
from langchain.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for a Serializable object or
a SerializedNotImplemented object."""
if isinstance(obj, Serializable):
return obj.to_json()
else:
return to_json_not_implemented(obj)
def dumps(obj: Any, *, pretty: bool = False) -> str:
"""Return a json string representation of an object."""
if pretty:
return json.dumps(obj, default=default, indent=2, ensure_ascii=False)
else:
return json.dumps(obj, default=default, ensure_ascii=False)
def dumpd(obj: Any) -> Dict[str, Any]:
"""Return a json dict representation of an object."""
return json.loads(dumps(obj))
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~opaqueprompts.py | import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema.language_model import BaseLanguageModel
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class OpaquePrompts(LLM):
"""An LLM wrapper that uses OpaquePrompts to sanitize prompts.
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
de-sanitizes the response.
To use, you should have the ``opaqueprompts`` python package installed,
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import OpaquePrompts
from langchain.chat_models import ChatOpenAI
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
"""The base LLM to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if op.__package__ is None:
raise ValueError(
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.predict(
sanitized_prompt_value_str,
stop=stop,
)
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
return desanitize_response.desanitized_text
@property
def _llm_type(self) -> str:
"""Return type of LLM.
This is an override of the base class method.
"""
return "opaqueprompts"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~org_mode.py | from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredOrgModeLoader(UnstructuredFileLoader):
"""Load `Org-Mode` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredOrgModeLoader
loader = UnstructuredOrgModeLoader(
"example.org", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-org
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to load the file from. Default is "single".
**unstructured_kwargs: Any additional keyword arguments to pass
to the unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.7.9")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.org import partition_org
return partition_org(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~cerebriumai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class CerebriumAI(LLM):
"""CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed, and the
environment variable ``CEREBRIUMAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = get_from_dict_or_env(
values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY"
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to CerebriumAI endpoint."""
try:
from cerebrium import model_api_request
except ImportError:
raise ValueError(
"Could not import cerebrium python package. "
"Please install it with `pip install cerebrium`."
)
params = self.model_kwargs or {}
response = model_api_request(
self.endpoint_url,
{"prompt": prompt, **params, **kwargs},
self.cerebriumai_api_key,
)
text = response["data"]["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~question_answering~map_rerank_prompt.py | # flake8: noqa
from langchain.output_parsers.regex import RegexParser
from langchain.prompts import PromptTemplate
output_parser = RegexParser(
regex=r"(.*?)\nScore: (\d*)",
output_keys=["answer", "score"],
)
prompt_template = """Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.
В дополнение к ответу, также верни оценку того, насколько полно он отвечает на вопрос пользователя. Это должно быть в следующем формате:
Question: [вопрос здесь]
Полезный ответ: [ответ здесь]
Оценка: [оценка от 0 до 100]
Как определить оценку:
- Чем выше, тем лучше ответ
- Лучше отвечает полностью на заданный вопрос, с достаточным уровнем детализации
- Если ты не знаешь ответа на основе контекста, то это должна быть оценка 0
- Не будь чересчур уверенным!
Пример #1
Контекст:
---------
Яблоки красные
---------
Вопрос: какого цвета яблоки?
Полезный ответ: красные
Оценка: 100
Пример #2
Контекст:
---------
была ночь, и свидетель забыл свои очки. он не был уверен, была ли это спортивная машина или внедорожник
---------
Вопрос: какого типа была машина?
Полезный ответ: спортивная машина или внедорожник
Оценка: 60
Пример #3
Контекст:
---------
Груши бывают красные или оранжевые
---------
Question: какого цвета яблоки?
Полезный ответ: Этот документ не отвечает на вопрос
Оценка: 0
Начнем!
Контекст:
---------
{context}
---------
Question: {question}
Полезный ответ:"""
PROMPT = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"],
output_parser=output_parser,
)
| [
"Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.\n\nВ дополнение к ответу, также верни оценку того, насколько полно он отвечает на вопрос пользователя. Это должно быть в следующем формате:\n\nQuestion: [вопрос здесь]\nПолезный ответ: [ответ здесь]\nОценка: [оценка от 0 до 100]\n\nКак определить оценку:\n- Чем выше, тем лучше ответ\n- Лучше отвечает полностью на заданный вопрос, с достаточным уровнем детализации\n- Если ты не знаешь ответа на основе контекста, то это должна быть оценка 0\n- Не будь чересчур уверенным!\n\nПример #1\n\nКонтекст:\n---------\nЯблоки красные\n---------\nВопрос: какого цвета яблоки?\nПолезный ответ: красные\nОценка: 100\n\nПример #2\n\nКонтекст:\n---------\nбыла ночь, и свидетель забыл свои очки. он не был уверен, была ли это спортивная машина или внедорожник\n---------\nВопрос: какого типа была машина?\nПолезный ответ: спортивная машина или внедорожник\nОценка: 60\n\nПример #3\n\nКонтекст:\n---------\nГруши бывают красные или оранжевые\n---------\nQuestion: какого цвета яблоки?\nПолезный ответ: Этот документ не отвечает на вопрос\nОценка: 0\n\nНачнем!\n\nКонтекст:\n---------\n{context}\n---------\nQuestion: {question}\nПолезный ответ:",
"question",
"context"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~document_loaders~test_url.py | import pytest
from langchain.document_loaders import UnstructuredURLLoader
def test_continue_on_failure_true() -> None:
"""Test exception is not raised when continue_on_failure=True."""
loader = UnstructuredURLLoader(["badurl.foobar"])
loader.load()
def test_continue_on_failure_false() -> None:
"""Test exception is raised when continue_on_failure=False."""
loader = UnstructuredURLLoader(["badurl.foobar"], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~marqo.py | from __future__ import annotations
import json
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
if TYPE_CHECKING:
import marqo
class Marqo(VectorStore):
"""`Marqo` vector store.
Marqo indexes have their own models associated with them to generate your
embeddings. This means that you can selected from a range of different models
and also use CLIP models to create multimodal indexes
with images and text together.
Marqo also supports more advanced queries with multiple weighted terms, see See
https://docs.marqo.ai/latest/#searching-using-weights-in-queries.
This class can flexibly take strings or dictionaries for weighted queries
in its similarity search methods.
To use, you should have the `marqo` python package installed, you can do this with
`pip install marqo`.
Example:
.. code-block:: python
import marqo
from langchain.vectorstores import Marqo
client = marqo.Client(url=os.environ["MARQO_URL"], ...)
vectorstore = Marqo(client, index_name)
"""
def __init__(
self,
client: marqo.Client,
index_name: str,
add_documents_settings: Optional[Dict[str, Any]] = None,
searchable_attributes: Optional[List[str]] = None,
page_content_builder: Optional[Callable[[Dict[str, Any]], str]] = None,
):
"""Initialize with Marqo client."""
try:
import marqo
except ImportError:
raise ImportError(
"Could not import marqo python package. "
"Please install it with `pip install marqo`."
)
if not isinstance(client, marqo.Client):
raise ValueError(
f"client should be an instance of marqo.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._add_documents_settings = (
{} if add_documents_settings is None else add_documents_settings
)
self._searchable_attributes = searchable_attributes
self.page_content_builder = page_content_builder
self.tensor_fields = ["text"]
self._document_batch_size = 1024
@property
def embeddings(self) -> Optional[Embeddings]:
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Marqo.
You can either have marqo generate ids for each document or you can provide
your own by including a "_id" field in the metadata objects.
Args:
texts (Iterable[str]): am iterator of texts - assumed to preserve an
order that matches the metadatas.
metadatas (Optional[List[dict]], optional): a list of metadatas.
Raises:
ValueError: if metadatas is provided and the number of metadatas differs
from the number of texts.
Returns:
List[str]: The list of ids that were added.
"""
if self._client.index(self._index_name).get_settings()["index_defaults"][
"treat_urls_and_pointers_as_images"
]:
raise ValueError(
"Marqo.add_texts is disabled for multimodal indexes. To add documents "
"with a multimodal index use the Python client for Marqo directly."
)
documents: List[Dict[str, str]] = []
num_docs = 0
for i, text in enumerate(texts):
doc = {
"text": text,
"metadata": json.dumps(metadatas[i]) if metadatas else json.dumps({}),
}
documents.append(doc)
num_docs += 1
ids = []
for i in range(0, num_docs, self._document_batch_size):
response = self._client.index(self._index_name).add_documents(
documents[i : i + self._document_batch_size],
tensor_fields=self.tensor_fields,
**self._add_documents_settings,
)
if response["errors"]:
err_msg = (
f"Error in upload for documents in index range [{i},"
f"{i + self._document_batch_size}], "
f"check Marqo logs."
)
raise RuntimeError(err_msg)
ids += [item["_id"] for item in response["items"]]
return ids
def similarity_search(
self,
query: Union[str, Dict[str, float]],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Search the marqo index for the most similar documents.
Args:
query (Union[str, Dict[str, float]]): The query for the search, either
as a string or a weighted query.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Document]: k documents ordered from best to worst match.
"""
results = self.marqo_similarity_search(query=query, k=k)
documents = self._construct_documents_from_results_without_score(results)
return documents
def similarity_search_with_score(
self,
query: Union[str, Dict[str, float]],
k: int = 4,
) -> List[Tuple[Document, float]]:
"""Return documents from Marqo that are similar to the query as well
as their scores.
Args:
query (str): The query to search with, either as a string or a weighted
query.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Tuple[Document, float]]: The matching documents and their scores,
ordered by descending score.
"""
results = self.marqo_similarity_search(query=query, k=k)
scored_documents = self._construct_documents_from_results_with_score(results)
return scored_documents
def bulk_similarity_search(
self,
queries: Iterable[Union[str, Dict[str, float]]],
k: int = 4,
**kwargs: Any,
) -> List[List[Document]]:
"""Search the marqo index for the most similar documents in bulk with multiple
queries.
Args:
queries (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to
execute in bulk, queries in the list can be strings or dictionaries of
weighted queries.
k (int, optional): The number of documents to return for each query.
Defaults to 4.
Returns:
List[List[Document]]: A list of results for each query.
"""
bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k)
bulk_documents: List[List[Document]] = []
for results in bulk_results["result"]:
documents = self._construct_documents_from_results_without_score(results)
bulk_documents.append(documents)
return bulk_documents
def bulk_similarity_search_with_score(
self,
queries: Iterable[Union[str, Dict[str, float]]],
k: int = 4,
**kwargs: Any,
) -> List[List[Tuple[Document, float]]]:
"""Return documents from Marqo that are similar to the query as well as
their scores using a batch of queries.
Args:
query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries
to execute in bulk, queries in the list can be strings or dictionaries
of weighted queries.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Tuple[Document, float]]: A list of lists of the matching
documents and their scores for each query
"""
bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k)
bulk_documents: List[List[Tuple[Document, float]]] = []
for results in bulk_results["result"]:
documents = self._construct_documents_from_results_with_score(results)
bulk_documents.append(documents)
return bulk_documents
def _construct_documents_from_results_with_score(
self, results: Dict[str, List[Dict[str, str]]]
) -> List[Tuple[Document, Any]]:
"""Helper to convert Marqo results into documents.
Args:
results (List[dict]): A marqo results object with the 'hits'.
include_scores (bool, optional): Include scores alongside documents.
Defaults to False.
Returns:
Union[List[Document], List[Tuple[Document, float]]]: The documents or
document score pairs if `include_scores` is true.
"""
documents: List[Tuple[Document, Any]] = []
for res in results["hits"]:
if self.page_content_builder is None:
text = res["text"]
else:
text = self.page_content_builder(res)
metadata = json.loads(res.get("metadata", "{}"))
documents.append(
(Document(page_content=text, metadata=metadata), res["_score"])
)
return documents
def _construct_documents_from_results_without_score(
self, results: Dict[str, List[Dict[str, str]]]
) -> List[Document]:
"""Helper to convert Marqo results into documents.
Args:
results (List[dict]): A marqo results object with the 'hits'.
include_scores (bool, optional): Include scores alongside documents.
Defaults to False.
Returns:
Union[List[Document], List[Tuple[Document, float]]]: The documents or
document score pairs if `include_scores` is true.
"""
documents: List[Document] = []
for res in results["hits"]:
if self.page_content_builder is None:
text = res["text"]
else:
text = self.page_content_builder(res)
metadata = json.loads(res.get("metadata", "{}"))
documents.append(Document(page_content=text, metadata=metadata))
return documents
def marqo_similarity_search(
self,
query: Union[str, Dict[str, float]],
k: int = 4,
) -> Dict[str, List[Dict[str, str]]]:
"""Return documents from Marqo exposing Marqo's output directly
Args:
query (str): The query to search with.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Dict[str, Any]]: This hits from marqo.
"""
results = self._client.index(self._index_name).search(
q=query, searchable_attributes=self._searchable_attributes, limit=k
)
return results
def marqo_bulk_similarity_search(
self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4
) -> Dict[str, List[Dict[str, List[Dict[str, str]]]]]:
"""Return documents from Marqo using a bulk search, exposes Marqo's
output directly
Args:
queries (Iterable[Union[str, Dict[str, float]]]): A list of queries.
k (int, optional): The number of documents to return for each query.
Defaults to 4.
Returns:
Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results
object
"""
bulk_results = {
"result": [
self._client.index(self._index_name).search(
q=query, searchable_attributes=self._searchable_attributes, limit=k
)
for query in queries
]
}
return bulk_results
@classmethod
def from_documents(
cls: Type[Marqo],
documents: List[Document],
embedding: Union[Embeddings, None] = None,
**kwargs: Any,
) -> Marqo:
"""Return VectorStore initialized from documents. Note that Marqo does not
need embeddings, we retain the parameter to adhere to the Liskov substitution
principle.
Args:
documents (List[Document]): Input documents
embedding (Any, optional): Embeddings (not required). Defaults to None.
Returns:
VectorStore: A Marqo vectorstore
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, metadatas=metadatas, **kwargs)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Any = None,
metadatas: Optional[List[dict]] = None,
index_name: str = "",
url: str = "http://localhost:8882",
api_key: str = "",
add_documents_settings: Optional[Dict[str, Any]] = None,
searchable_attributes: Optional[List[str]] = None,
page_content_builder: Optional[Callable[[Dict[str, str]], str]] = None,
index_settings: Optional[Dict[str, Any]] = None,
verbose: bool = True,
**kwargs: Any,
) -> Marqo:
"""Return Marqo initialized from texts. Note that Marqo does not need
embeddings, we retain the parameter to adhere to the Liskov
substitution principle.
This is a quick way to get started with marqo - simply provide your texts and
metadatas and this will create an instance of the data store and index the
provided data.
To know the ids of your documents with this approach you will need to include
them in under the key "_id" in your metadatas for each text
Example:
.. code-block:: python
from langchain.vectorstores import Marqo
datastore = Marqo(texts=['text'], index_name='my-first-index',
url='http://localhost:8882')
Args:
texts (List[str]): A list of texts to index into marqo upon creation.
embedding (Any, optional): Embeddings (not required). Defaults to None.
index_name (str, optional): The name of the index to use, if none is
provided then one will be created with a UUID. Defaults to None.
url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882".
api_key (str, optional): The API key for Marqo. Defaults to "".
metadatas (Optional[List[dict]], optional): A list of metadatas, to
accompany the texts. Defaults to None.
this is only used when a new index is being created. Defaults to "cpu". Can
be "cpu" or "cuda".
add_documents_settings (Optional[Dict[str, Any]], optional): Settings
for adding documents, see
https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters.
Defaults to {}.
index_settings (Optional[Dict[str, Any]], optional): Index settings if
the index doesn't exist, see
https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object.
Defaults to {}.
Returns:
Marqo: An instance of the Marqo vector store
"""
try:
import marqo
except ImportError:
raise ImportError(
"Could not import marqo python package. "
"Please install it with `pip install marqo`."
)
if not index_name:
index_name = str(uuid.uuid4())
client = marqo.Client(url=url, api_key=api_key)
try:
client.create_index(index_name, settings_dict=index_settings or {})
if verbose:
print(f"Created {index_name} successfully.")
except Exception:
if verbose:
print(f"Index {index_name} exists.")
instance: Marqo = cls(
client,
index_name,
searchable_attributes=searchable_attributes,
add_documents_settings=add_documents_settings or {},
page_content_builder=page_content_builder,
)
instance.add_texts(texts, metadatas)
return instance
def get_indexes(self) -> List[Dict[str, str]]:
"""Helper to see your available indexes in marqo, useful if the
from_texts method was used without an index name specified
Returns:
List[Dict[str, str]]: The list of indexes
"""
return self._client.get_indexes()["results"]
def get_number_of_documents(self) -> int:
"""Helper to see the number of documents in the index
Returns:
int: The number of documents
"""
return self._client.index(self._index_name).get_stats()["numberOfDocuments"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~litellm.py | """Wrapper around LiteLLM's model I/O library."""
from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import (
BaseChatModel,
_agenerate_from_stream,
_generate_from_stream,
)
from langchain.llms.base import create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ChatLiteLLMException(Exception):
"""Error with the `LiteLLM I/O` library"""
def _create_retry_decorator(
llm: ChatLiteLLM,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
async def acompletion_with_retry(
llm: ChatLiteLLM,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatLiteLLM(BaseChatModel):
"""A chat model that uses the LiteLLM API."""
client: Any #: :meta private:
model: str = "gpt-3.5-turbo"
model_name: Optional[str] = None
"""Model name to use."""
openai_api_key: Optional[str] = None
azure_api_key: Optional[str] = None
anthropic_api_key: Optional[str] = None
replicate_api_key: Optional[str] = None
cohere_api_key: Optional[str] = None
openrouter_api_key: Optional[str] = None
streaming: bool = False
api_base: Optional[str] = None
organization: Optional[str] = None
custom_llm_provider: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
temperature: Optional[float] = 1
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Run inference with this temperature. Must by in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
max_tokens: int = 256
max_retries: int = 6
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"force_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"custom_llm_provider": self.custom_llm_provider,
**self.model_kwargs,
}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
self.client.api_base = self.api_base
self.client.organization = self.organization
creds: Dict[str, Any] = {
"model": set_model_value,
"force_timeout": self.request_timeout,
}
return {**self._default_params, **creds}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.completion(**kwargs)
return _completion_with_retry(**kwargs)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
try:
import litellm
except ImportError:
raise ChatLiteLLMException(
"Could not import google.generativeai python package. "
"Please install it with `pip install google-generativeai`"
)
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY", default=""
)
values["azure_api_key"] = get_from_dict_or_env(
values, "azure_api_key", "AZURE_API_KEY", default=""
)
values["anthropic_api_key"] = get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY", default=""
)
values["replicate_api_key"] = get_from_dict_or_env(
values, "replicate_api_key", "REPLICATE_API_KEY", default=""
)
values["openrouter_api_key"] = get_from_dict_or_env(
values, "openrouter_api_key", "OPENROUTER_API_KEY", default=""
)
values["cohere_api_key"] = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY", default=""
)
values["huggingface_api_key"] = get_from_dict_or_env(
values, "huggingface_api_key", "HUGGINGFACE_API_KEY", default=""
)
values["together_ai_api_key"] = get_from_dict_or_env(
values, "together_ai_api_key", "TOGETHERAI_API_KEY", default=""
)
values["client"] = litellm
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
llm_output = {"token_usage": token_usage, "model": set_model_value}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.content)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await _agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(
self, messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
set_model_value = self.model
if self.model_name is not None:
set_model_value = self.model_name
return {
"model": set_model_value,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "litellm-chat"
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~arthur_callback.py | """ArthurAI's Callback Handler."""
from __future__ import annotations
import os
import uuid
from collections import defaultdict
from datetime import datetime
from time import time
from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional
import numpy as np
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
if TYPE_CHECKING:
import arthurai
from arthurai.core.models import ArthurModel
PROMPT_TOKENS = "prompt_tokens"
COMPLETION_TOKENS = "completion_tokens"
TOKEN_USAGE = "token_usage"
FINISH_REASON = "finish_reason"
DURATION = "duration"
def _lazy_load_arthur() -> arthurai:
"""Lazy load Arthur."""
try:
import arthurai
except ImportError as e:
raise ImportError(
"To use the ArthurCallbackHandler you need the"
" `arthurai` package. Please install it with"
" `pip install arthurai`.",
e,
)
return arthurai
class ArthurCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Arthur platform.
Arthur helps enterprise teams optimize model operations
and performance at scale. The Arthur API tracks model
performance, explainability, and fairness across tabular,
NLP, and CV models. Our API is model- and platform-agnostic,
and continuously scales with complex and dynamic enterprise needs.
To learn more about Arthur, visit our website at
https://www.arthur.ai/ or read the Arthur docs at
https://docs.arthur.ai/
"""
def __init__(
self,
arthur_model: ArthurModel,
) -> None:
"""Initialize callback handler."""
super().__init__()
arthurai = _lazy_load_arthur()
Stage = arthurai.common.constants.Stage
ValueType = arthurai.common.constants.ValueType
self.arthur_model = arthur_model
# save the attributes of this model to be used when preparing
# inferences to log to Arthur in on_llm_end()
self.attr_names = set([a.name for a in self.arthur_model.get_attributes()])
self.input_attr = [
x
for x in self.arthur_model.get_attributes()
if x.stage == Stage.ModelPipelineInput
and x.value_type == ValueType.Unstructured_Text
][0].name
self.output_attr = [
x
for x in self.arthur_model.get_attributes()
if x.stage == Stage.PredictedValue
and x.value_type == ValueType.Unstructured_Text
][0].name
self.token_likelihood_attr = None
if (
len(
[
x
for x in self.arthur_model.get_attributes()
if x.value_type == ValueType.TokenLikelihoods
]
)
> 0
):
self.token_likelihood_attr = [
x
for x in self.arthur_model.get_attributes()
if x.value_type == ValueType.TokenLikelihoods
][0].name
self.run_map: DefaultDict[str, Any] = defaultdict(dict)
@classmethod
def from_credentials(
cls,
model_id: str,
arthur_url: Optional[str] = "https://app.arthur.ai",
arthur_login: Optional[str] = None,
arthur_password: Optional[str] = None,
) -> ArthurCallbackHandler:
"""Initialize callback handler from Arthur credentials.
Args:
model_id (str): The ID of the arthur model to log to.
arthur_url (str, optional): The URL of the Arthur instance to log to.
Defaults to "https://app.arthur.ai".
arthur_login (str, optional): The login to use to connect to Arthur.
Defaults to None.
arthur_password (str, optional): The password to use to connect to
Arthur. Defaults to None.
Returns:
ArthurCallbackHandler: The initialized callback handler.
"""
arthurai = _lazy_load_arthur()
ArthurAI = arthurai.ArthurAI
ResponseClientError = arthurai.common.exceptions.ResponseClientError
# connect to Arthur
if arthur_login is None:
try:
arthur_api_key = os.environ["ARTHUR_API_KEY"]
except KeyError:
raise ValueError(
"No Arthur authentication provided. Either give"
" a login to the ArthurCallbackHandler"
" or set an ARTHUR_API_KEY as an environment variable."
)
arthur = ArthurAI(url=arthur_url, access_key=arthur_api_key)
else:
if arthur_password is None:
arthur = ArthurAI(url=arthur_url, login=arthur_login)
else:
arthur = ArthurAI(
url=arthur_url, login=arthur_login, password=arthur_password
)
# get model from Arthur by the provided model ID
try:
arthur_model = arthur.get_model(model_id)
except ResponseClientError:
raise ValueError(
f"Was unable to retrieve model with id {model_id} from Arthur."
" Make sure the ID corresponds to a model that is currently"
" registered with your Arthur account."
)
return cls(arthur_model)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""On LLM start, save the input prompts"""
run_id = kwargs["run_id"]
self.run_map[run_id]["input_texts"] = prompts
self.run_map[run_id]["start_time"] = time()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""On LLM end, send data to Arthur."""
try:
import pytz # type: ignore[import]
except ImportError as e:
raise ImportError(
"Could not import pytz. Please install it with 'pip install pytz'."
) from e
run_id = kwargs["run_id"]
# get the run params from this run ID,
# or raise an error if this run ID has no corresponding metadata in self.run_map
try:
run_map_data = self.run_map[run_id]
except KeyError as e:
raise KeyError(
"This function has been called with a run_id"
" that was never registered in on_llm_start()."
" Restart and try running the LLM again"
) from e
# mark the duration time between on_llm_start() and on_llm_end()
time_from_start_to_end = time() - run_map_data["start_time"]
# create inferences to log to Arthur
inferences = []
for i, generations in enumerate(response.generations):
for generation in generations:
inference = {
"partner_inference_id": str(uuid.uuid4()),
"inference_timestamp": datetime.now(tz=pytz.UTC),
self.input_attr: run_map_data["input_texts"][i],
self.output_attr: generation.text,
}
if generation.generation_info is not None:
# add finish reason to the inference
# if generation info contains a finish reason and
# if the ArthurModel was registered to monitor finish_reason
if (
FINISH_REASON in generation.generation_info
and FINISH_REASON in self.attr_names
):
inference[FINISH_REASON] = generation.generation_info[
FINISH_REASON
]
# add token likelihoods data to the inference if the ArthurModel
# was registered to monitor token likelihoods
logprobs_data = generation.generation_info["logprobs"]
if (
logprobs_data is not None
and self.token_likelihood_attr is not None
):
logprobs = logprobs_data["top_logprobs"]
likelihoods = [
{k: np.exp(v) for k, v in logprobs[i].items()}
for i in range(len(logprobs))
]
inference[self.token_likelihood_attr] = likelihoods
# add token usage counts to the inference if the
# ArthurModel was registered to monitor token usage
if (
isinstance(response.llm_output, dict)
and TOKEN_USAGE in response.llm_output
):
token_usage = response.llm_output[TOKEN_USAGE]
if (
PROMPT_TOKENS in token_usage
and PROMPT_TOKENS in self.attr_names
):
inference[PROMPT_TOKENS] = token_usage[PROMPT_TOKENS]
if (
COMPLETION_TOKENS in token_usage
and COMPLETION_TOKENS in self.attr_names
):
inference[COMPLETION_TOKENS] = token_usage[COMPLETION_TOKENS]
# add inference duration to the inference if the ArthurModel
# was registered to monitor inference duration
if DURATION in self.attr_names:
inference[DURATION] = time_from_start_to_end
inferences.append(inference)
# send inferences to arthur
self.arthur_model.send_inferences(inferences)
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""On chain start, do nothing."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""On chain end, do nothing."""
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""On new token, pass."""
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
| [
"prompt_tokens"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~notebook.py | """Loads .ipynb notebook files."""
import json
from pathlib import Path
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_cells(
cell: dict, include_outputs: bool, max_output_length: int, traceback: bool
) -> str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell["cell_type"]
source = cell["source"]
output = cell["outputs"]
if include_outputs and cell_type == "code" and output:
if "ename" in output[0].keys():
error_name = output[0]["ename"]
error_value = output[0]["evalue"]
if traceback:
traceback = output[0]["traceback"]
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f" with description '{error_value}'\n"
f"and traceback '{traceback}'\n\n"
)
else:
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f"with description '{error_value}'\n\n"
)
elif output[0]["output_type"] == "stream":
output = output[0]["text"]
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with "
f"output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ""
def remove_newlines(x: Any) -> Any:
"""Recursively remove newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace("\n", "")
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x
class NotebookLoader(BaseLoader):
"""Load `Jupyter notebook` (.ipynb) files."""
def __init__(
self,
path: str,
include_outputs: bool = False,
max_output_length: int = 10,
remove_newline: bool = False,
traceback: bool = False,
):
"""Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
def load(
self,
) -> List[Document]:
"""Load documents."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is needed for Notebook Loader, "
"please install with `pip install pandas`"
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
data = pd.json_normalize(d["cells"])
filtered_data = data[["cell_type", "source", "outputs"]]
if self.remove_newline:
filtered_data = filtered_data.applymap(remove_newlines)
text = filtered_data.apply(
lambda x: concatenate_cells(
x, self.include_outputs, self.max_output_length, self.traceback
),
axis=1,
).str.cat(sep=" ")
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~pubmed.py | from typing import Iterator, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.pubmed import PubMedAPIWrapper
class PubMedLoader(BaseLoader):
"""Load from the `PubMed` biomedical library.
Attributes:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
"""
def __init__(
self,
query: str,
load_max_docs: Optional[int] = 3,
):
"""Initialize the PubMedLoader.
Args:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
Defaults to 3.
"""
self.query = query
self.load_max_docs = load_max_docs
self._client = PubMedAPIWrapper(
top_k_results=load_max_docs,
)
def load(self) -> List[Document]:
return list(self._client.lazy_load_docs(self.query))
def lazy_load(self) -> Iterator[Document]:
for doc in self._client.lazy_load_docs(self.query):
yield doc
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~openapi~utils~api_models.py | """Pydantic models for parsing an OpenAPI spec."""
from __future__ import annotations
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec
logger = logging.getLogger(__name__)
PRIMITIVE_TYPES = {
"integer": int,
"number": float,
"string": str,
"boolean": bool,
"array": List,
"object": Dict,
"null": None,
}
# See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn
# for more info.
class APIPropertyLocation(Enum):
"""The location of the property."""
QUERY = "query"
PATH = "path"
HEADER = "header"
COOKIE = "cookie" # Not yet supported
@classmethod
def from_str(cls, location: str) -> "APIPropertyLocation":
"""Parse an APIPropertyLocation."""
try:
return cls(location)
except ValueError:
raise ValueError(
f"Invalid APIPropertyLocation. Valid values are {cls.__members__}"
)
_SUPPORTED_MEDIA_TYPES = ("application/json",)
SUPPORTED_LOCATIONS = {
APIPropertyLocation.QUERY,
APIPropertyLocation.PATH,
}
INVALID_LOCATION_TEMPL = (
'Unsupported APIPropertyLocation "{location}"'
" for parameter {name}. "
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}"
)
SCHEMA_TYPE = Union[str, Type, tuple, None, Enum]
class APIPropertyBase(BaseModel):
"""Base model for an API property."""
# The name of the parameter is required and is case-sensitive.
# If "in" is "path", the "name" field must correspond to a template expression
# within the path field in the Paths Object.
# If "in" is "header" and the "name" field is "Accept", "Content-Type",
# or "Authorization", the parameter definition is ignored.
# For all other cases, the "name" corresponds to the parameter
# name used by the "in" property.
name: str = Field(alias="name")
"""The name of the property."""
required: bool = Field(alias="required")
"""Whether the property is required."""
type: SCHEMA_TYPE = Field(alias="type")
"""The type of the property.
Either a primitive type, a component/parameter type,
or an array or 'object' (dict) of the above."""
default: Optional[Any] = Field(alias="default", default=None)
"""The default value of the property."""
description: Optional[str] = Field(alias="description", default=None)
"""The description of the property."""
if TYPE_CHECKING:
from openapi_pydantic import (
MediaType,
Parameter,
RequestBody,
Schema,
)
class APIProperty(APIPropertyBase):
"""A model for a property in the query, path, header, or cookie params."""
location: APIPropertyLocation = Field(alias="location")
"""The path/how it's being passed to the endpoint."""
@staticmethod
def _cast_schema_list_type(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
@staticmethod
def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum:
"""Get the schema type when the parameter is an enum."""
param_name = f"{parameter.name}Enum"
return Enum(param_name, {str(v): v for v in schema.enum})
@staticmethod
def _get_schema_type_for_array(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
from openapi_pydantic import (
Reference,
Schema,
)
items = schema.items
if isinstance(items, Schema):
schema_type = APIProperty._cast_schema_list_type(items)
elif isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
schema_type = ref_name # TODO: Add ref definitions to make his valid
else:
raise ValueError(f"Unsupported array items: {items}")
if isinstance(schema_type, str):
# TODO: recurse
schema_type = (schema_type,)
return schema_type
@staticmethod
def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE:
if schema is None:
return None
schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema)
if schema_type == "array":
schema_type = APIProperty._get_schema_type_for_array(schema)
elif schema_type == "object":
# TODO: Resolve array and object types to components.
raise NotImplementedError("Objects not yet supported")
elif schema_type in PRIMITIVE_TYPES:
if schema.enum:
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema)
else:
# Directly use the primitive type
pass
else:
raise NotImplementedError(f"Unsupported type: {schema_type}")
return schema_type
@staticmethod
def _validate_location(location: APIPropertyLocation, name: str) -> None:
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(
INVALID_LOCATION_TEMPL.format(location=location, name=name)
)
@staticmethod
def _validate_content(content: Optional[Dict[str, MediaType]]) -> None:
if content:
raise ValueError(
"API Properties with media content not supported. "
"Media content only supported within APIRequestBodyProperty's"
)
@staticmethod
def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]:
from openapi_pydantic import (
Reference,
Schema,
)
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f"Error dereferencing schema: {schema}")
return schema
@staticmethod
def is_supported_location(location: str) -> bool:
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
@classmethod
def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty":
"""Instantiate from an OpenAPI Parameter."""
location = APIPropertyLocation.from_str(parameter.param_in)
cls._validate_location(
location,
parameter.name,
)
cls._validate_content(parameter.content)
schema = cls._get_schema(parameter, spec)
schema_type = cls._get_schema_type(parameter, schema)
default_val = schema.default if schema is not None else None
return cls(
name=parameter.name,
location=location,
default=default_val,
description=parameter.description,
required=parameter.required,
type=schema_type,
)
class APIRequestBodyProperty(APIPropertyBase):
"""A model for a request body property."""
properties: List["APIRequestBodyProperty"] = Field(alias="properties")
"""The sub-properties of the property."""
# This is useful for handling nested property cycles.
# We can define separate types in that case.
references_used: List[str] = Field(alias="references_used")
"""The references used by the property."""
@classmethod
def _process_object_schema(
cls, schema: Schema, spec: OpenAPISpec, references_used: List[str]
) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]:
from openapi_pydantic import (
Reference,
)
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f"No properties found when processing object schema: {schema}"
)
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(
cls.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_props,
spec=spec,
references_used=references_used,
)
)
return schema.type, properties
@classmethod
def _process_array_schema(
cls,
schema: Schema,
name: str,
spec: OpenAPISpec,
references_used: List[str],
) -> str:
from openapi_pydantic import Reference, Schema
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f"Array<{ref_name}>"
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(
schema=items,
name=f"{name}Item",
required=True, # TODO: Add required
spec=spec,
references_used=references_used,
)
return f"Array<{array_type.type}>"
return "array"
@classmethod
def from_schema(
cls,
schema: Schema,
name: str,
required: bool,
spec: OpenAPISpec,
references_used: Optional[List[str]] = None,
) -> "APIRequestBodyProperty":
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == "object" and schema.properties:
schema_type, properties = cls._process_object_schema(
schema, spec, references_used
)
elif schema_type == "array":
schema_type = cls._process_array_schema(schema, name, spec, references_used)
elif schema_type in PRIMITIVE_TYPES:
# Use the primitive type directly
pass
elif schema_type is None:
# No typing specified/parsed. WIll map to 'any'
pass
else:
raise ValueError(f"Unsupported type: {schema_type}")
return cls(
name=name,
required=required,
type=schema_type,
default=schema.default,
description=schema.description,
properties=properties,
references_used=references_used,
)
# class APIRequestBodyProperty(APIPropertyBase):
class APIRequestBody(BaseModel):
"""A model for a request body."""
description: Optional[str] = Field(alias="description")
"""The description of the request body."""
properties: List[APIRequestBodyProperty] = Field(alias="properties")
# E.g., application/json - we only support JSON at the moment.
media_type: str = Field(alias="media_type")
"""The media type of the request body."""
@classmethod
def _process_supported_media_type(
cls,
media_type_obj: MediaType,
spec: OpenAPISpec,
) -> List[APIRequestBodyProperty]:
"""Process the media type of the request body."""
from openapi_pydantic import Reference
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split("/")[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f"Could not resolve schema for media type: {media_type_obj}"
)
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == "object" and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(
APIRequestBodyProperty.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_properties,
spec=spec,
)
)
else:
api_request_body_properties.append(
APIRequestBodyProperty(
name="body",
required=True,
type=schema.type,
default=schema.default,
description=schema.description,
properties=[],
references_used=references_used,
)
)
return api_request_body_properties
@classmethod
def from_request_body(
cls, request_body: RequestBody, spec: OpenAPISpec
) -> "APIRequestBody":
"""Instantiate from an OpenAPI RequestBody."""
properties = []
for media_type, media_type_obj in request_body.content.items():
if media_type not in _SUPPORTED_MEDIA_TYPES:
continue
api_request_body_properties = cls._process_supported_media_type(
media_type_obj,
spec,
)
properties.extend(api_request_body_properties)
return cls(
description=request_body.description,
properties=properties,
media_type=media_type,
)
# class APIRequestBodyProperty(APIPropertyBase):
# class APIRequestBody(BaseModel):
class APIOperation(BaseModel):
"""A model for a single API operation."""
operation_id: str = Field(alias="operation_id")
"""The unique identifier of the operation."""
description: Optional[str] = Field(alias="description")
"""The description of the operation."""
base_url: str = Field(alias="base_url")
"""The base URL of the operation."""
path: str = Field(alias="path")
"""The path of the operation."""
method: HTTPVerb = Field(alias="method")
"""The HTTP method of the operation."""
properties: Sequence[APIProperty] = Field(alias="properties")
# TODO: Add parse in used components to be able to specify what type of
# referenced object it is.
# """The properties of the operation."""
# components: Dict[str, BaseModel] = Field(alias="components")
request_body: Optional[APIRequestBody] = Field(alias="request_body")
"""The request body of the operation."""
@staticmethod
def _get_properties_from_parameters(
parameters: List[Parameter], spec: OpenAPISpec
) -> List[APIProperty]:
"""Get the properties of the operation."""
properties = []
for param in parameters:
if APIProperty.is_supported_location(param.param_in):
properties.append(APIProperty.from_parameter(param, spec))
elif param.required:
raise ValueError(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
)
else:
logger.warning(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
+ " Ignoring optional parameter"
)
pass
return properties
@classmethod
def from_openapi_url(
cls,
spec_url: str,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
@classmethod
def from_openapi_spec(
cls,
spec: OpenAPISpec,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI spec."""
operation = spec.get_operation(path, method)
parameters = spec.get_parameters_for_operation(operation)
properties = cls._get_properties_from_parameters(parameters, spec)
operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method)
request_body = spec.get_request_body_for_operation(operation)
api_request_body = (
APIRequestBody.from_request_body(request_body, spec)
if request_body is not None
else None
)
description = operation.description or operation.summary
if not description and spec.paths is not None:
description = spec.paths[path].description or spec.paths[path].summary
return cls(
operation_id=operation_id,
description=description or "",
base_url=spec.base_url,
path=path,
method=method,
properties=properties,
request_body=api_request_body,
)
@staticmethod
def ts_type_from_python(type_: SCHEMA_TYPE) -> str:
if type_ is None:
# TODO: Handle Nones better. These often result when
# parsing specs that are < v3
return "any"
elif isinstance(type_, str):
return {
"str": "string",
"integer": "number",
"float": "number",
"date-time": "string",
}.get(type_, type_)
elif isinstance(type_, tuple):
return f"Array<{APIOperation.ts_type_from_python(type_[0])}>"
elif isinstance(type_, type) and issubclass(type_, Enum):
return " | ".join([f"'{e.value}'" for e in type_])
else:
return str(type_)
def _format_nested_properties(
self, properties: List[APIRequestBodyProperty], indent: int = 2
) -> str:
"""Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
if prop.properties:
nested_props = self._format_nested_properties(
prop.properties, indent + 2
)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"{prop_desc}\n{' ' * indent}{prop_name}"
f"{prop_required}: {prop_type},"
)
return "\n".join(formatted_props)
def to_typescript(self) -> str:
"""Get typescript string representation of the operation."""
operation_name = self.operation_id
params = []
if self.request_body:
formatted_request_body_props = self._format_nested_properties(
self.request_body.properties
)
params.append(formatted_request_body_props)
for prop in self.properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},")
formatted_params = "\n".join(params).strip()
description_str = f"/* {self.description} */" if self.description else ""
typescript_definition = f"""
{description_str}
type {operation_name} = (_: {{
{formatted_params}
}}) => any;
"""
return typescript_definition.strip()
@property
def query_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.QUERY
]
@property
def path_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.PATH
]
@property
def body_params(self) -> List[str]:
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~callbacks~fake_callback_handler.py | """A fake callback handler for testing purposes."""
from itertools import chain
from typing import Any, Dict, List, Optional, Union
from uuid import UUID
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain.pydantic_v1 import BaseModel
from langchain.schema.messages import BaseMessage
class BaseFakeCallbackHandler(BaseModel):
"""Base fake callback handler for testing."""
starts: int = 0
ends: int = 0
errors: int = 0
text: int = 0
ignore_llm_: bool = False
ignore_chain_: bool = False
ignore_agent_: bool = False
ignore_retriever_: bool = False
ignore_chat_model_: bool = False
# to allow for similar callback handlers that are not technicall equal
fake_id: Union[str, None] = None
# add finer-grained counters for easier debugging of failing tests
chain_starts: int = 0
chain_ends: int = 0
llm_starts: int = 0
llm_ends: int = 0
llm_streams: int = 0
tool_starts: int = 0
tool_ends: int = 0
agent_actions: int = 0
agent_ends: int = 0
chat_model_starts: int = 0
retriever_starts: int = 0
retriever_ends: int = 0
retriever_errors: int = 0
retries: int = 0
class BaseFakeCallbackHandlerMixin(BaseFakeCallbackHandler):
"""Base fake callback handler mixin for testing."""
def on_llm_start_common(self) -> None:
self.llm_starts += 1
self.starts += 1
def on_llm_end_common(self) -> None:
self.llm_ends += 1
self.ends += 1
def on_llm_error_common(self) -> None:
self.errors += 1
def on_llm_new_token_common(self) -> None:
self.llm_streams += 1
def on_retry_common(self) -> None:
self.retries += 1
def on_chain_start_common(self) -> None:
self.chain_starts += 1
self.starts += 1
def on_chain_end_common(self) -> None:
self.chain_ends += 1
self.ends += 1
def on_chain_error_common(self) -> None:
self.errors += 1
def on_tool_start_common(self) -> None:
self.tool_starts += 1
self.starts += 1
def on_tool_end_common(self) -> None:
self.tool_ends += 1
self.ends += 1
def on_tool_error_common(self) -> None:
self.errors += 1
def on_agent_action_common(self) -> None:
self.agent_actions += 1
self.starts += 1
def on_agent_finish_common(self) -> None:
self.agent_ends += 1
self.ends += 1
def on_chat_model_start_common(self) -> None:
self.chat_model_starts += 1
self.starts += 1
def on_text_common(self) -> None:
self.text += 1
def on_retriever_start_common(self) -> None:
self.starts += 1
self.retriever_starts += 1
def on_retriever_end_common(self) -> None:
self.ends += 1
self.retriever_ends += 1
def on_retriever_error_common(self) -> None:
self.errors += 1
self.retriever_errors += 1
class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_start_common()
def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_new_token_common()
def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_end_common()
def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_llm_error_common()
def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_start_common()
def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_end_common()
def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_chain_error_common()
def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_start_common()
def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_end_common()
def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_tool_error_common()
def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_action_common()
def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_agent_finish_common()
def on_text(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_text_common()
def on_retriever_start(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_start_common()
def on_retriever_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_end_common()
def on_retriever_error(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retriever_error_common()
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler":
return self
class FakeCallbackHandlerWithChatStart(FakeCallbackHandler):
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
assert all(isinstance(m, BaseMessage) for m in chain(*messages))
self.on_chat_model_start_common()
class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake async callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
async def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
async def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_start_common()
async def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_new_token_common()
async def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_end_common()
async def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_error_common()
async def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_start_common()
async def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_end_common()
async def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_error_common()
async def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_start_common()
async def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_end_common()
async def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_error_common()
async def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_action_common()
async def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_finish_common()
async def on_text(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_text_common()
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler":
return self
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~forefrontai.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class ForefrontAI(LLM):
"""ForefrontAI large language models.
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import ForefrontAI
forefrontai = ForefrontAI(endpoint_url="")
"""
endpoint_url: str = ""
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
length: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 40
"""The number of highest probability vocabulary tokens to
keep for top-k-filtering."""
repetition_penalty: int = 1
"""Penalizes repeated tokens according to frequency."""
forefrontai_api_key: Optional[str] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
forefrontai_api_key = get_from_dict_or_env(
values, "forefrontai_api_key", "FOREFRONTAI_API_KEY"
)
values["forefrontai_api_key"] = forefrontai_api_key
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling ForefrontAI API."""
return {
"temperature": self.temperature,
"length": self.length,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"endpoint_url": self.endpoint_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "forefrontai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
response = requests.post(
url=self.endpoint_url,
headers={
"Authorization": f"Bearer {self.forefrontai_api_key}",
"Content-Type": "application/json",
},
json={"text": prompt, **self._default_params, **kwargs},
)
response_json = response.json()
text = response_json["result"][0]["completion"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~enum.py | from enum import Enum
from typing import Any, Dict, List, Type
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseOutputParser, OutputParserException
class EnumOutputParser(BaseOutputParser):
"""Parse an output that is one of a set of values."""
enum: Type[Enum]
"""The enum to parse. Its values must be strings."""
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
raise ValueError("Enum values must be strings")
return values
@property
def _valid_values(self) -> List[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Any:
try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~structured.py | from __future__ import annotations
from typing import Any, List
from langchain.output_parsers.format_instructions import (
STRUCTURED_FORMAT_INSTRUCTIONS,
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS,
)
from langchain.output_parsers.json import parse_and_check_json_markdown
from langchain.pydantic_v1 import BaseModel
from langchain.schema import BaseOutputParser
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
"""A schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredOutputParser(BaseOutputParser):
"""Parse the output of an LLM call to a structured output."""
response_schemas: List[ResponseSchema]
"""The schemas for the response."""
@classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self, only_json: bool = False) -> str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions())
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
else:
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> Any:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
| [
"\t\"{name}\": {type} // {description}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~callbacks~test_openai_info.py | from unittest.mock import MagicMock
from uuid import uuid4
import pytest
from langchain.callbacks import OpenAICallbackHandler
from langchain.llms.openai import BaseOpenAI
from langchain.schema import LLMResult
@pytest.fixture
def handler() -> OpenAICallbackHandler:
return OpenAICallbackHandler()
def test_on_llm_end(handler: OpenAICallbackHandler) -> None:
response = LLMResult(
generations=[],
llm_output={
"token_usage": {
"prompt_tokens": 2,
"completion_tokens": 1,
"total_tokens": 3,
},
"model_name": BaseOpenAI.__fields__["model_name"].default,
},
)
handler.on_llm_end(response)
assert handler.successful_requests == 1
assert handler.total_tokens == 3
assert handler.prompt_tokens == 2
assert handler.completion_tokens == 1
assert handler.total_cost > 0
def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
response = LLMResult(
generations=[],
llm_output={
"token_usage": {
"prompt_tokens": 2,
"completion_tokens": 1,
"total_tokens": 3,
},
"model_name": "foo-bar",
},
)
handler.on_llm_end(response)
assert handler.total_cost == 0
@pytest.mark.parametrize(
"model_name",
[
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"babbage:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"curie:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"ft:babbage-002:your-org:custom-model-name:1abcdefg",
"ft:davinci-002:your-org:custom-model-name:1abcdefg",
"ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg",
],
)
def test_on_llm_end_finetuned_model(
handler: OpenAICallbackHandler, model_name: str
) -> None:
response = LLMResult(
generations=[],
llm_output={
"token_usage": {
"prompt_tokens": 2,
"completion_tokens": 1,
"total_tokens": 3,
},
"model_name": model_name,
},
)
handler.on_llm_end(response)
assert handler.total_cost > 0
@pytest.mark.parametrize(
"model_name,expected_cost",
[
("gpt-35-turbo", 0.0035),
("gpt-35-turbo-0301", 0.0035),
(
"gpt-35-turbo-0613",
0.0035,
),
(
"gpt-35-turbo-16k-0613",
0.007,
),
(
"gpt-35-turbo-16k",
0.007,
),
("gpt-4", 0.09),
("gpt-4-0314", 0.09),
("gpt-4-0613", 0.09),
("gpt-4-32k", 0.18),
("gpt-4-32k-0314", 0.18),
("gpt-4-32k-0613", 0.18),
],
)
def test_on_llm_end_azure_openai(
handler: OpenAICallbackHandler, model_name: str, expected_cost: float
) -> None:
response = LLMResult(
generations=[],
llm_output={
"token_usage": {
"prompt_tokens": 1000,
"completion_tokens": 1000,
"total_tokens": 2000,
},
"model_name": model_name,
},
)
handler.on_llm_end(response)
assert handler.total_cost == expected_cost
@pytest.mark.parametrize(
"model_name", ["gpt-35-turbo-16k-0301", "gpt-4-0301", "gpt-4-32k-0301"]
)
def test_on_llm_end_no_cost_invalid_model(
handler: OpenAICallbackHandler, model_name: str
) -> None:
response = LLMResult(
generations=[],
llm_output={
"token_usage": {
"prompt_tokens": 1000,
"completion_tokens": 1000,
"total_tokens": 2000,
},
"model_name": model_name,
},
)
handler.on_llm_end(response)
assert handler.total_cost == 0
def test_on_retry_works(handler: OpenAICallbackHandler) -> None:
handler.on_retry(MagicMock(), run_id=uuid4())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~evaluation~agents~trajectory_eval_prompt.py | """Prompt for trajectory evaluation chain."""
# flake8: noqa
from langchain.schema.messages import HumanMessage, AIMessage, SystemMessage
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
EVAL_TEMPLATE = """Языковая модель AI получила доступ к следующему набору инструментов, чтобы помочь ответить на вопрос пользователя.
Инструменты, предоставленные модели AI:
[TOOL_DESCRIPTIONS]
{tool_descriptions}
[END_TOOL_DESCRIPTIONS]
Вопрос, который человек задал модели AI:
[QUESTION]
{question}
[END_QUESTION]{reference}
Языковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:
[AGENT_TRAJECTORY]
{agent_trajectory}
[END_AGENT_TRAJECTORY]
Окончательный ответ языковой модели AI на вопрос был:
[RESPONSE]
{answer}
[END_RESPONSE]
Давай проведем подробную оценку ответа языковой модели AI шаг за шагом.
Мы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:
i. Полезен ли окончательный ответ?
ii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?
iii. Использует ли модель языка AI инструменты полезным образом?
iv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?
v. Используются ли подходящие инструменты для ответа на вопрос?"""
EXAMPLE_INPUT = """Языковая модель AI получила доступ к следующему набору инструментов, чтобы помочь ответить на вопрос пользователя.
Инструменты, предоставленные модели AI:
[TOOL_DESCRIPTIONS]
Инструмент 1:
Название: Поиск
Описание: полезно, когда нужно задать вопрос с поиском
Инструмент 2:
Название: Поиск
Описание: полезно, когда нужно задать вопрос с поиском
Инструмент 3:
Название: Калькулятор
Описание: полезно для выполнения расчетов
Инструмент 4:
Название: Поиск в Интернете (SerpAPI)
Описание: полезно, когда нужно ответить на вопросы о текущих событиях
[END_TOOL_DESCRIPTIONS]
Вопрос, который человек задал модели AI: Если положить Статую Свободы вдоль, сколько раз она протянется через Соединенные Штаты?
Языковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:
[AGENT_TRAJECTORY]
Шаг 1:
Используемый инструмент: Поиск в Интернете (SerpAPI)
Ввод инструмента: Если положить Статую Свободы вдоль, сколько раз она протянется через Соединенные Штаты?
Выход инструмента: Статуя Свободы была подарена Соединенным Штатам Францией в знак дружбы между двумя странами. Она была воздвигнута на американском проекте ...
[END_AGENT_TRAJECTORY]
[RESPONSE]
Окончательный ответ языковой модели AI на вопрос был: Есть разные способы измерить длину Соединенных Штатов, но если мы используем расстояние между Статуей Свободы и самой западной точкой континентальных Соединенных Штатов (мыс Алава, Вашингтон), которое составляет примерно 2,857 миль (4,596 км), и предположим, что Статуя Свободы имеет высоту 305 футов (93 метра), то статуя протянется через Соединенные Штаты примерно 17,5 раз, если положить ее вдоль.
[END_RESPONSE]
Давай проведем подробную оценку ответа языковой модели AI шаг за шагом.
Мы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:
i. Полезен ли окончательный ответ?
ii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?
iii. Использует ли модель языка AI инструменты полезным образом?
iv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?
v. Используются ли подходящие инструменты для ответа на вопрос?"""
EXAMPLE_OUTPUT = """Во-первых, давайте оценим окончательный ответ. Окончательный ответ использует хорошее обоснование, но неверен. 2,857 деленное на 305 не равно 17,5.\
Модель должна была использовать калькулятор, чтобы выяснить это. Во-вторых, использует ли модель логическую последовательность инструментов для ответа на вопрос?\
Способ использования поиска моделью не полезен. Модель должна была использовать инструмент поиска, чтобы узнать ширину США или высоту статуи.\
Модель не использовала инструмент калькулятора и дала неверный ответ. Поисковый API должен использоваться для текущих событий или конкретных вопросов.\
Инструменты не использовались полезным образом. Модель не использовала слишком много шагов для ответа на вопрос.\
Модель не использовала подходящие инструменты для ответа на вопрос.\
Оценка: Учитывая хорошее обоснование в окончательном ответе, но в остальном слабую работу, мы даем модели оценку 2.
Оценка: 2"""
EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(
content="Ты полезный помощник, который оценивает языковые модели."
),
HumanMessage(content=EXAMPLE_INPUT),
AIMessage(content=EXAMPLE_OUTPUT),
HumanMessagePromptTemplate.from_template(EVAL_TEMPLATE),
]
)
TOOL_FREE_EVAL_TEMPLATE = """Языковая модель AI получила доступ к набору инструментов, чтобы помочь ответить на вопрос пользователя.
Вопрос, который человек задал модели AI:
[QUESTION]
{question}
[END_QUESTION]{reference}
Языковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:
[AGENT_TRAJECTORY]
{agent_trajectory}
[END_AGENT_TRAJECTORY]
Окончательный ответ языковой модели AI на вопрос был:
[RESPONSE]
{answer}
[END_RESPONSE]
Давай проведем подробную оценку ответа языковой модели AI шаг за шагом.
Мы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:
i. Полезен ли окончательный ответ?
ii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?
iii. Использует ли модель языка AI инструменты полезным образом?
iv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?
v. Используются ли подходящие инструменты для ответа на вопрос?"""
TOOL_FREE_EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(
content="Ты полезный помощник, который оценивает языковые модели."
),
HumanMessage(content=EXAMPLE_INPUT),
AIMessage(content=EXAMPLE_OUTPUT),
HumanMessagePromptTemplate.from_template(TOOL_FREE_EVAL_TEMPLATE),
]
)
| [
"Языковая модель AI получила доступ к следующему набору инструментов, чтобы помочь ответить на вопрос пользователя.\n\nИнструменты, предоставленные модели AI:\n[TOOL_DESCRIPTIONS]\nИнструмент 1:\nНазвание: Поиск\nОписание: полезно, когда нужно задать вопрос с поиском\n\nИнструмент 2:\nНазвание: Поиск\nОписание: полезно, когда нужно задать вопрос с поиском\n\nИнструмент 3:\nНазвание: Калькулятор\nОписание: полезно для выполнения расчетов\n\nИнструмент 4:\nНазвание: Поиск в Интернете (SerpAPI)\nОписание: полезно, когда нужно ответить на вопросы о текущих событиях\n[END_TOOL_DESCRIPTIONS]\n\nВопрос, который человек задал модели AI: Если положить Статую Свободы вдоль, сколько раз она протянется через Соединенные Штаты?\n\nЯзыковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:\n[AGENT_TRAJECTORY]\nШаг 1:\nИспользуемый инструмент: Поиск в Интернете (SerpAPI)\nВвод инструмента: Если положить Статую Свободы вдоль, сколько раз она протянется через Соединенные Штаты?\nВыход инструмента: Статуя Свободы была подарена Соединенным Штатам Францией в знак дружбы между двумя странами. Она была воздвигнута на американском проекте ...\n[END_AGENT_TRAJECTORY]\n\n[RESPONSE]\nОкончательный ответ языковой модели AI на вопрос был: Есть разные способы измерить длину Соединенных Штатов, но если мы используем расстояние между Статуей Свободы и самой западной точкой континентальных Соединенных Штатов (мыс Алава, Вашингтон), которое составляет примерно 2,857 миль (4,596 км), и предположим, что Статуя Свободы имеет высоту 305 футов (93 метра), то статуя протянется через Соединенные Штаты примерно 17,5 раз, если положить ее вдоль.\n[END_RESPONSE]\n\nДавай проведем подробную оценку ответа языковой модели AI шаг за шагом.\n\nМы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:\n\ni. Полезен ли окончательный ответ?\nii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?\niii. Использует ли модель языка AI инструменты полезным образом?\niv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?\nv. Используются ли подходящие инструменты для ответа на вопрос?",
"Языковая модель AI получила доступ к следующему набору инструментов, чтобы помочь ответить на вопрос пользователя.\n\nИнструменты, предоставленные модели AI:\n[TOOL_DESCRIPTIONS]\n{tool_descriptions}\n[END_TOOL_DESCRIPTIONS]\n\nВопрос, который человек задал модели AI:\n[QUESTION]\n{question}\n[END_QUESTION]{reference}\n\nЯзыковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:\n[AGENT_TRAJECTORY]\n{agent_trajectory}\n[END_AGENT_TRAJECTORY]\n\nОкончательный ответ языковой модели AI на вопрос был:\n[RESPONSE]\n{answer}\n[END_RESPONSE]\n\nДавай проведем подробную оценку ответа языковой модели AI шаг за шагом.\n\nМы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:\n\ni. Полезен ли окончательный ответ?\nii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?\niii. Использует ли модель языка AI инструменты полезным образом?\niv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?\nv. Используются ли подходящие инструменты для ответа на вопрос?",
"Ты полезный помощник, который оценивает языковые модели.",
"Языковая модель AI получила доступ к набору инструментов, чтобы помочь ответить на вопрос пользователя.\n\nВопрос, который человек задал модели AI:\n[QUESTION]\n{question}\n[END_QUESTION]{reference}\n\nЯзыковая модель AI решила использовать следующий набор инструментов для ответа на вопрос:\n[AGENT_TRAJECTORY]\n{agent_trajectory}\n[END_AGENT_TRAJECTORY]\n\nОкончательный ответ языковой модели AI на вопрос был:\n[RESPONSE]\n{answer}\n[END_RESPONSE]\n\nДавай проведем подробную оценку ответа языковой модели AI шаг за шагом.\n\nМы учитываем следующие критерии перед тем, как дать оценку от 1 до 5:\n\ni. Полезен ли окончательный ответ?\nii. Использует ли AI логическую последовательность инструментов для ответа на вопрос?\niii. Использует ли модель языка AI инструменты полезным образом?\niv. Использует ли модель языка AI слишком много шагов для ответа на вопрос?\nv. Используются ли подходящие инструменты для ответа на вопрос?",
"Во-первых, давайте оценим окончательный ответ. Окончательный ответ использует хорошее обоснование, но неверен. 2,857 деленное на 305 не равно 17,5.Модель должна была использовать калькулятор, чтобы выяснить это. Во-вторых, использует ли модель логическую последовательность инструментов для ответа на вопрос?Способ использования поиска моделью не полезен. Модель должна была использовать инструмент поиска, чтобы узнать ширину США или высоту статуи.Модель не использовала инструмент калькулятора и дала неверный ответ. Поисковый API должен использоваться для текущих событий или конкретных вопросов.Инструменты не использовались полезным образом. Модель не использовала слишком много шагов для ответа на вопрос.Модель не использовала подходящие инструменты для ответа на вопрос. \nОценка: Учитывая хорошее обоснование в окончательном ответе, но в остальном слабую работу, мы даем модели оценку 2.\n\nОценка: 2"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~srt.py | from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class SRTLoader(BaseLoader):
"""Load `.srt` (subtitle) files."""
def __init__(self, file_path: str):
"""Initialize with a file path."""
try:
import pysrt # noqa:F401
except ImportError:
raise ImportError(
"package `pysrt` not found, please install it with `pip install pysrt`"
)
self.file_path = file_path
def load(self) -> List[Document]:
"""Load using pysrt file."""
import pysrt
parsed_info = pysrt.open(self.file_path)
text = " ".join([t.text for t in parsed_info])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~messages.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union
from typing_extensions import Literal
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Extra, Field
if TYPE_CHECKING:
from langchain.prompts.chat import ChatPromptTemplate
def get_buffer_string(
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
) -> str:
"""Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single string concatenation of all input messages.
Example:
.. code-block:: python
from langchain.schema import AIMessage, HumanMessage
messages = [
HumanMessage(content="Hi, how are you?"),
AIMessage(content="Good, how are you?"),
]
get_buffer_string(messages)
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
"""
string_messages = []
for m in messages:
if isinstance(m, HumanMessage):
role = human_prefix
elif isinstance(m, AIMessage):
role = ai_prefix
elif isinstance(m, SystemMessage):
role = "System"
elif isinstance(m, FunctionMessage):
role = "Function"
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f"Got unsupported message type: {m}")
message = f"{role}: {m.content}"
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
string_messages.append(message)
return "\n".join(string_messages)
class BaseMessage(Serializable):
"""The base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
content: str
"""The string contents of the message."""
additional_kwargs: dict = Field(default_factory=dict)
"""Any additional information."""
type: str
class Config:
extra = Extra.allow
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self])
return prompt + other
class BaseMessageChunk(BaseMessage):
"""A Message chunk, which can be concatenated with other Message chunks."""
def _merge_kwargs_dict(
self, left: Dict[str, Any], right: Dict[str, Any]
) -> Dict[str, Any]:
"""Merge additional_kwargs from another BaseMessageChunk into this one."""
merged = left.copy()
for k, v in right.items():
if k not in merged:
merged[k] = v
elif type(merged[k]) != type(v):
raise ValueError(
f'additional_kwargs["{k}"] already exists in this message,'
" but with a different type."
)
elif isinstance(merged[k], str):
merged[k] += v
elif isinstance(merged[k], dict):
merged[k] = self._merge_kwargs_dict(merged[k], v)
else:
raise ValueError(
f"Additional kwargs key {k} already exists in this message."
)
return merged
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
if isinstance(self, ChatMessageChunk):
return self.__class__(
role=self.role,
content=self.content + other.content,
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return self.__class__(
content=self.content + other.content,
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
else:
raise TypeError(
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
class HumanMessage(BaseMessage):
"""A Message from a human."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
conversation.
"""
type: Literal["human"] = "human"
HumanMessage.update_forward_refs()
class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""A Human Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501
class AIMessage(BaseMessage):
"""A Message from an AI."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
conversation.
"""
type: Literal["ai"] = "ai"
AIMessage.update_forward_refs()
class AIMessageChunk(AIMessage, BaseMessageChunk):
"""A Message chunk from an AI."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, AIMessageChunk):
if self.example != other.example:
raise ValueError(
"Cannot concatenate AIMessageChunks with different example values."
)
return self.__class__(
example=self.example,
content=self.content + other.content,
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
class SystemMessage(BaseMessage):
"""A Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
"""
type: Literal["system"] = "system"
SystemMessage.update_forward_refs()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""A System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501
class FunctionMessage(BaseMessage):
"""A Message for passing the result of executing a function back to a model."""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
FunctionMessage.update_forward_refs()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""A Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal[
"FunctionMessageChunk"
] = "FunctionMessageChunk" # type: ignore[assignment]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
raise ValueError(
"Cannot concatenate FunctionMessageChunks with different names."
)
return self.__class__(
name=self.name,
content=self.content + other.content,
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
class ChatMessage(BaseMessage):
"""A Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""A Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
return self.__class__(
role=self.role,
content=self.content + other.content,
additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs
),
)
return super().__add__(other)
AnyMessage = Union[AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage]
def _message_to_dict(message: BaseMessage) -> dict:
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as BaseMessages) to convert.
Returns:
List of messages as dicts.
"""
return [_message_to_dict(m) for m in messages]
def _message_from_dict(message: dict) -> BaseMessage:
_type = message["type"]
if _type == "human":
return HumanMessage(**message["data"])
elif _type == "ai":
return AIMessage(**message["data"])
elif _type == "system":
return SystemMessage(**message["data"])
elif _type == "chat":
return ChatMessage(**message["data"])
elif _type == "function":
return FunctionMessage(**message["data"])
else:
raise ValueError(f"Got unexpected message type: {_type}")
def messages_from_dict(messages: List[dict]) -> List[BaseMessage]:
"""Convert a sequence of messages from dicts to Message objects.
Args:
messages: Sequence of messages (as dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_from_dict(m) for m in messages]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~docugami.py | import io
import logging
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.pydantic_v1 import BaseModel, root_validator
TD_NAME = "{http://www.w3.org/1999/xhtml}td"
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
DOCUMENT_ID_KEY = "id"
DOCUMENT_SOURCE_KEY = "source"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
class DocugamiLoader(BaseLoader, BaseModel):
"""Load from `Docugami`.
To use, you should have the ``lxml`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
"""The Docugami API endpoint to use."""
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
"""The Docugami API access token to use."""
docset_id: Optional[str]
"""The Docugami API docset ID to use."""
document_ids: Optional[Sequence[str]]
"""The Docugami API document IDs to use."""
file_paths: Optional[Sequence[Union[Path, str]]]
"""The local file paths to use."""
min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking
"""The minimum chunk size to use when parsing DGML. Defaults to 32."""
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
# helpers
def _xpath_qname_for_chunk(chunk: Any) -> str:
"""Get the xpath qname for a chunk."""
qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}"
parent = chunk.getparent()
if parent is not None:
doppelgangers = [x for x in parent if x.tag == chunk.tag]
if len(doppelgangers) > 1:
idx_of_self = doppelgangers.index(chunk)
qname = f"{qname}[{idx_of_self + 1}]"
return qname
def _xpath_for_chunk(chunk: Any) -> str:
"""Get the xpath for a chunk."""
ancestor_chain = chunk.xpath("ancestor-or-self::*")
return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain)
def _structure_value(node: Any) -> str:
"""Get the structure value for a node."""
structure = (
"table"
if node.tag == TABLE_NAME
else node.attrib["structure"]
if "structure" in node.attrib
else None
)
return structure
def _is_structural(node: Any) -> bool:
"""Check if a node is structural."""
return _structure_value(node) is not None
def _is_heading(node: Any) -> bool:
"""Check if a node is a heading."""
structure = _structure_value(node)
return structure is not None and structure.lower().startswith("h")
def _get_text(node: Any) -> str:
"""Get the text of a node."""
return " ".join(node.itertext()).strip()
def _has_structural_descendant(node: Any) -> bool:
"""Check if a node has a structural descendant."""
for child in node:
if _is_structural(child) or _has_structural_descendant(child):
return True
return False
def _leaf_structural_nodes(node: Any) -> List:
"""Get the leaf structural nodes of a node."""
if _is_structural(node) and not _has_structural_descendant(node):
return [node]
else:
leaf_nodes = []
for child in node:
leaf_nodes.extend(_leaf_structural_nodes(child))
return leaf_nodes
def _create_doc(node: Any, text: str) -> Document:
"""Create a Document from a node and text."""
metadata = {
XPATH_KEY: _xpath_for_chunk(node),
DOCUMENT_ID_KEY: document[DOCUMENT_ID_KEY],
DOCUMENT_NAME_KEY: document[DOCUMENT_NAME_KEY],
DOCUMENT_SOURCE_KEY: document[DOCUMENT_NAME_KEY],
STRUCTURE_KEY: node.attrib.get("structure", ""),
TAG_KEY: re.sub(r"\{.*\}", "", node.tag),
}
if doc_metadata:
metadata.update(doc_metadata)
return Document(
page_content=text,
metadata=metadata,
)
# parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
chunks: List[Document] = []
prev_small_chunk_text = None
for node in _leaf_structural_nodes(root):
text = _get_text(node)
if prev_small_chunk_text:
text = prev_small_chunk_text + " " + text
prev_small_chunk_text = None
if _is_heading(node) or len(text) < self.min_chunk_size:
# Save headings or other small chunks to be appended to the next chunk
prev_small_chunk_text = text
else:
chunks.append(_create_doc(node, text))
if prev_small_chunk_text and len(chunks) > 0:
# small chunk at the end left over, just append to last chunk
chunks[-1].page_content += " " + prev_small_chunk_text
return chunks
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get("id")
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
per_file_metadata = {}
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == "report-values.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc["id"]
metadata: Dict = {}
# the evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//pr:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./pr:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None
) -> List[Document]:
"""Load chunks for a document."""
document_id = document["id"]
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(document, response.content, doc_metadata)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d["id"] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata = {}
if _project_details:
# if there are any projects for this docset, load project metadata
for project in _project_details:
metadata = self._metadata_for_project(project)
combined_project_metadata.update(metadata)
for doc in _document_details:
doc_metadata = combined_project_metadata.get(doc["id"])
chunks += self._load_chunks_for_document(
self.docset_id, doc, doc_metadata
)
elif self.file_paths:
# local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
{
DOCUMENT_ID_KEY: path.name,
DOCUMENT_NAME_KEY: path.name,
},
file.read(),
)
return chunks
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~weaviate.py | from __future__ import annotations
import datetime
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
)
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import weaviate
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
def _create_weaviate_client(
url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> weaviate.Client:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
)
url = url or os.environ.get("WEAVIATE_URL")
api_key = api_key or os.environ.get("WEAVIATE_API_KEY")
auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None
return weaviate.Client(url=url, auth_client_secret=auth, **kwargs)
def _default_score_normalizer(val: float) -> float:
return 1 - 1 / (1 + np.exp(val))
def _json_serializable(value: Any) -> Any:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
class Weaviate(VectorStore):
"""`Weaviate` vector store.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
embedding: Optional[Embeddings] = None,
attributes: Optional[List[str]] = None,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
by_text: bool = True,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._query_attrs = [self._text_key]
self.relevance_score_fn = relevance_score_fn
self._by_text = by_text
if attributes is not None:
self._query_attrs.extend(attributes)
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return (
self.relevance_score_fn
if self.relevance_score_fn
else _default_score_normalizer
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
embeddings: Optional[List[List[float]]] = None
if self._embedding:
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
# Allow for ids (consistent w/ other methods)
# # Or uuids (backwards compatible w/ existing arg)
# If the UUID of one of the objects already exists
# then the existing object will be replaced by the new object.
_id = get_valid_uuid(uuid4())
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
elif "ids" in kwargs:
_id = kwargs["ids"][i]
batch.add_data_object(
data_object=data_properties,
class_name=self._index_name,
uuid=_id,
vector=embeddings[i] if embeddings else None,
)
ids.append(_id)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search when "
"_by_text=False"
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
def similarity_search_by_text(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_text(content).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
if kwargs.get("additional"):
query_obj = query_obj.with_additional(kwargs.get("additional"))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
"max_marginal_relevance_search requires a suitable Embeddings object"
)
return self.max_marginal_relevance_search_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
results = (
query_obj.with_additional("vector")
.with_near_vector(vector)
.with_limit(fetch_k)
.do()
)
payload = results["data"]["Get"][self._index_name]
embeddings = [result["_additional"]["vector"] for result in payload]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
docs = []
for idx in mmr_selected:
text = payload[idx].pop(self._text_key)
payload[idx].pop("_additional")
meta = payload[idx]
docs.append(Document(page_content=text, metadata=meta))
return docs
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding is None:
raise ValueError(
"_embedding cannot be None for similarity_search_with_score"
)
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
if kwargs.get("tenant"):
query_obj = query_obj.with_tenant(kwargs.get("tenant"))
embedded_query = self._embedding.embed_query(query)
if not self._by_text:
vector = {"vector": embedded_query}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
else:
result = (
query_obj.with_near_text(content)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(res["_additional"]["vector"], embedded_query)
docs_and_scores.append((Document(page_content=text, metadata=res), score))
return docs_and_scores
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
client: Optional[weaviate.Client] = None,
weaviate_url: Optional[str] = None,
weaviate_api_key: Optional[str] = None,
batch_size: Optional[int] = None,
index_name: Optional[str] = None,
text_key: str = "text",
by_text: bool = False,
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_score_normalizer,
**kwargs: Any,
) -> Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
try:
from weaviate.util import get_valid_uuid
except ImportError as e:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`"
) from e
client = client or _create_weaviate_client(
url=weaviate_url,
api_key=weaviate_api_key,
)
if batch_size:
client.batch.configure(batch_size=batch_size)
index_name = index_name or f"LangChain_{uuid4().hex}"
schema = _default_schema(index_name)
# check whether the index already exists
if not client.schema.contains(schema):
client.schema.create_class(schema)
embeddings = embedding.embed_documents(texts) if embedding else None
attributes = list(metadatas[0].keys()) if metadatas else None
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {
text_key: text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
# if an embedding strategy is not provided, we let
# weaviate create the embedding. Note that this will only
# work if weaviate has been installed with a vectorizer module
# like text2vec-contextionary for example
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(
client,
index_name,
text_key,
embedding=embedding,
attributes=attributes,
relevance_score_fn=relevance_score_fn,
by_text=by_text,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
# TODO: Check if this can be done in bulk
for id in ids:
self._client.data_object.delete(uuid=id)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~llms~fake_chat_model.py | """Fake Chat Model wrapper for testing purposes."""
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import SimpleChatModel
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import AIMessage, BaseMessage
class FakeChatModel(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return "fake response"
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-chat-model"
@property
def _identifying_params(self) -> Dict[str, Any]:
return {"key": "fake"}
| [
"fake response"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~document_loaders~test_url_playwright.py | """Tests for the Playwright URL loader"""
from typing import TYPE_CHECKING
import pytest
from langchain.document_loaders import PlaywrightURLLoader
from langchain.document_loaders.url_playwright import PlaywrightEvaluator
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.async_api import Response as AsyncResponse
from playwright.sync_api import Browser, Page, Response
class TestEvaluator(PlaywrightEvaluator):
"""A simple evaluator for testing purposes."""
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
return "test"
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
return "test"
def test_playwright_url_loader() -> None:
"""Test Playwright URL loader."""
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
"https://techmeme.com",
"https://techcrunch.com",
]
loader = PlaywrightURLLoader(
urls=urls,
remove_selectors=["header", "footer"],
continue_on_failure=False,
headless=True,
)
docs = loader.load()
assert len(docs) > 0
@pytest.mark.asyncio
async def test_playwright_async_url_loader() -> None:
"""Test Playwright async URL loader."""
urls = [
"https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"https://goo.gl/maps/NDSHwePEyaHMFGwh8",
"https://techmeme.com",
"https://techcrunch.com",
]
loader = PlaywrightURLLoader(
urls=urls,
remove_selectors=["header", "footer"],
continue_on_failure=False,
headless=True,
)
docs = await loader.aload()
assert len(docs) > 0
def test_playwright_url_loader_with_custom_evaluator() -> None:
"""Test Playwright URL loader with a custom evaluator."""
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
loader = PlaywrightURLLoader(
urls=urls,
evaluator=TestEvaluator(),
continue_on_failure=False,
headless=True,
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == "test"
@pytest.mark.asyncio
async def test_playwright_async_url_loader_with_custom_evaluator() -> None:
"""Test Playwright async URL loader with a custom evaluator."""
urls = ["https://www.youtube.com/watch?v=dQw4w9WgXcQ"]
loader = PlaywrightURLLoader(
urls=urls,
evaluator=TestEvaluator(),
continue_on_failure=False,
headless=True,
)
docs = await loader.aload()
assert len(docs) == 1
assert docs[0].page_content == "test"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~starrocks.py | from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseSettings
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger()
DEBUG = False
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string has multiple substrings.
Args:
s: The string to check
*args: The substrings to check for in the string
Returns:
bool: True if all substrings are present in the string, False otherwise
"""
for a in args:
if a not in s:
return False
return True
def debug_output(s: Any) -> None:
"""
Print a debug message if DEBUG is True.
Args:
s: The message to print
"""
if DEBUG:
print(s)
def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]:
"""
Get a named result from a query.
Args:
connection: The connection to the database
query: The query to execute
Returns:
List[dict[str, Any]]: The result of the query
"""
cursor = connection.cursor()
cursor.execute(query)
columns = cursor.description
result = []
for value in cursor.fetchall():
r = {}
for idx, datum in enumerate(value):
k = columns[idx][0]
r[k] = datum
result.append(r)
debug_output(result)
cursor.close()
return result
class StarRocksSettings(BaseSettings):
"""StarRocks client configuration.
Attribute:
StarRocks_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
StarRocks_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'embedding': 'text_embedding',
'document': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 9030
username: str = "root"
password: str = ""
column_map: Dict[str, str] = {
"id": "id",
"document": "document",
"embedding": "embedding",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "starrocks_"
env_file_encoding = "utf-8"
class StarRocks(VectorStore):
"""`StarRocks` vector store.
You need a `pymysql` python package, and a valid account
to connect to StarRocks.
Right now StarRocks has only implemented `cosine_similarity` function to
compute distance between two vectors. And there is no vector inside right now,
so we have to iterate all vectors and compute spatial distance.
For more information, please visit
[StarRocks official site](https://www.starrocks.io/)
[StarRocks github](https://github.com/StarRocks/starrocks)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[StarRocksSettings] = None,
**kwargs: Any,
) -> None:
"""StarRocks Wrapper to LangChain
embedding_function (Embeddings):
config (StarRocksSettings): Configuration to StarRocks Client
"""
try:
import pymysql # type: ignore[import]
except ImportError:
raise ImportError(
"Could not import pymysql python package. "
"Please install it with `pip install pymysql`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x, **kwargs: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = StarRocksSettings()
assert self.config
assert self.config.host and self.config.port
assert self.config.column_map and self.config.database and self.config.table
for k in ["id", "embedding", "document", "metadata"]:
assert k in self.config.column_map
# initialize the schema
dim = len(embedding.embed_query("test"))
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} string,
{self.config.column_map['document']} string,
{self.config.column_map['embedding']} array<float>,
{self.config.column_map['metadata']} string
) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \
PROPERTIES ("replication_num" = "1")\
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding
self.dist_order = "DESC"
debug_output(self.config)
# Create a connection to StarRocks
self.connection = pymysql.connect(
host=self.config.host,
port=self.config.port,
user=self.config.username,
password=self.config.password,
database=self.config.database,
**kwargs,
)
debug_output(self.schema)
get_named_result(self.connection, self.schema)
def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
embed_tuple_index = tuple(column_names).index(
self.config.column_map["embedding"]
)
_data = []
for n in transac:
n = ",".join(
[
f"'{self.escape_str(str(_n))}'"
if idx != embed_tuple_index
else f"array<float>{str(_n)}"
for (idx, _n) in enumerate(n)
]
)
_data.append(f"({n})")
i_str = f"""
INSERT INTO
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_insert_query = self._build_insert_sql(transac, column_names)
debug_output(_insert_query)
get_named_result(self.connection, _insert_query)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["document"]: texts,
colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert (
len(v[keys.index(self.config.column_map["embedding"])]) == self.dim
)
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[StarRocksSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> StarRocks:
"""Create StarRocks wrapper with existing texts
Args:
embedding_function (Embeddings): Function to extract text embedding
texts (Iterable[str]): List or tuple of strings to be added
config (StarRocksSettings, Optional): StarRocks configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to StarRocks.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Returns:
StarRocks Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for StarRocks Vector Store, prints backends, username
and schemas. Easy to use with `str(StarRocks())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
width = 25
fields = 3
_repr += "-" * (width * fields + 1) + "\n"
columns = ["name", "type", "key"]
_repr += f"|\033[94m{columns[0]:24s}\033[0m|\033[96m{columns[1]:24s}"
_repr += f"\033[0m|\033[96m{columns[2]:24s}\033[0m|\n"
_repr += "-" * (width * fields + 1) + "\n"
q_str = f"DESC {self.config.database}.{self.config.table}"
debug_output(q_str)
rs = get_named_result(self.connection, q_str)
for r in rs:
_repr += f"|\033[94m{r['Field']:24s}\033[0m|\033[96m{r['Type']:24s}"
_repr += f"\033[0m|\033[96m{r['Key']:24s}\033[0m|\n"
_repr += "-" * (width * fields + 1) + "\n"
return _repr
def _build_query_sql(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"WHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['document']},
{self.config.column_map['metadata']},
cosine_similarity_norm(array<float>[{q_emb_str}],
{self.config.column_map['embedding']}) as dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY dist {self.dist_order}
LIMIT {topk}
"""
debug_output(q_str)
return q_str
def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with StarRocks
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self.embedding_function.embed_query(query), k, where_str, **kwargs
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with StarRocks by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["document"]],
metadata=json.loads(r[self.config.column_map["metadata"]]),
)
for r in get_named_result(self.connection, q_str)
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with StarRocks
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents
"""
q_str = self._build_query_sql(
self.embedding_function.embed_query(query), k, where_str
)
try:
return [
(
Document(
page_content=r[self.config.column_map["document"]],
metadata=json.loads(r[self.config.column_map["metadata"]]),
),
r["dist"],
)
for r in get_named_result(self.connection, q_str)
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def drop(self) -> None:
"""
Helper function: Drop data
"""
get_named_result(
self.connection,
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}",
)
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~tracers~run_collector.py | """A tracer that collects all nested runs in a list."""
from typing import Any, List, Optional, Union
from uuid import UUID
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.schemas import Run
class RunCollectorCallbackHandler(BaseTracer):
"""
A tracer that collects all nested runs in a list.
This tracer is useful for inspection and evaluation purposes.
Parameters
----------
example_id : Optional[Union[UUID, str]], default=None
The ID of the example being traced. It can be either a UUID or a string.
"""
name: str = "run-collector_callback_handler"
def __init__(
self, example_id: Optional[Union[UUID, str]] = None, **kwargs: Any
) -> None:
"""
Initialize the RunCollectorCallbackHandler.
Parameters
----------
example_id : Optional[Union[UUID, str]], default=None
The ID of the example being traced. It can be either a UUID or a string.
"""
super().__init__(**kwargs)
self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id
)
self.traced_runs: List[Run] = []
def _persist_run(self, run: Run) -> None:
"""
Persist a run by adding it to the traced_runs list.
Parameters
----------
run : Run
The run to be persisted.
"""
run_ = run.copy()
run_.reference_example_id = self.example_id
self.traced_runs.append(run_)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~predibase.py | from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field
class Predibase(LLM):
"""Use your Predibase models with Langchain.
To use, you should have the ``predibase`` python package installed,
and have your Predibase API key.
"""
model: str
predibase_api_key: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _llm_type(self) -> str:
return "predibase"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any
) -> str:
try:
from predibase import PredibaseClient
pc = PredibaseClient(token=self.predibase_api_key)
except ImportError as e:
raise ImportError(
"Could not import Predibase Python package. "
"Please install it with `pip install predibase`."
) from e
except ValueError as e:
raise ValueError("Your API key is not correct. Please try again") from e
# load model and version
results = pc.prompt(prompt, model_name=self.model)
return results[0].response
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~document_loaders~test_json_loader.py | import io
from typing import Any, Dict
import pytest
from pytest import raises
from pytest_mock import MockerFixture
from langchain.docstore.document import Document
from langchain.document_loaders.json_loader import JSONLoader
pytestmark = pytest.mark.requires("jq")
def test_load_valid_string_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value='[{"text": "value1"}, {"text": "value2"}]',
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].text", text_content=True)
result = loader.load()
assert result == expected_docs
def test_load_valid_dict_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content='{"text": "value1"}',
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content='{"text": "value2"}',
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[{"text": "value1"}, {"text": "value2"}]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[]", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_valid_bool_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="False",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="True",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[
{"flag": false}, {"flag": true}
]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].flag", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_valid_numeric_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="99",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="99.5",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[
{"num": 99}, {"num": 99.5}
]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[].num", text_content=False)
result = loader.load()
assert result == expected_docs
def test_load_invalid_test_content(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(
"pathlib.Path.read_text",
return_value="""
[{"text": "value1"}, {"text": "value2"}]
""",
)
loader = JSONLoader(file_path=file_path, jq_schema=".[]", text_content=True)
with raises(ValueError):
loader.load()
def test_load_jsonlines(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
)
loader = JSONLoader(
file_path=file_path, jq_schema=".", content_key="text", json_lines=True
)
result = loader.load()
assert result == expected_docs
@pytest.mark.parametrize(
"params",
(
{"jq_schema": ".[].text"},
{"jq_schema": ".[]", "content_key": "text"},
),
)
def test_load_jsonlines_list(params: Dict, mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
Document(
page_content="value3",
metadata={"source": file_path, "seq_num": 3},
),
Document(
page_content="value4",
metadata={"source": file_path, "seq_num": 4},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
[{"text": "value1"}, {"text": "value2"}]
[{"text": "value3"}, {"text": "value4"}]
"""
),
)
loader = JSONLoader(file_path=file_path, json_lines=True, **params)
result = loader.load()
assert result == expected_docs
def test_load_empty_jsonlines(mocker: MockerFixture) -> None:
mocker.patch("pathlib.Path.open", return_value=io.StringIO(""))
loader = JSONLoader(file_path="file_path", jq_schema=".[].text", json_lines=True)
result = loader.load()
assert result == []
@pytest.mark.parametrize(
"patch_func,patch_func_value,kwargs",
(
# JSON content.
(
"pathlib.Path.read_text",
'[{"text": "value1"}, {"text": "value2"}]',
{"jq_schema": ".[]", "content_key": "text"},
),
# JSON Lines content.
(
"pathlib.Path.open",
io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
{"jq_schema": ".", "content_key": "text", "json_lines": True},
),
),
)
def test_json_meta(
patch_func: str, patch_func_value: Any, kwargs: Dict, mocker: MockerFixture
) -> None:
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1, "x": "value1-meta"},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2, "x": "value2-meta"},
),
]
def metadata_func(record: Dict, metadata: Dict) -> Dict:
metadata["x"] = f"{record['text']}-meta"
return metadata
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func, **kwargs)
result = loader.load()
assert result == expected_docs
| [
"text"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~tavily_search_api.py | import os
from enum import Enum
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import Document
from langchain.schema.retriever import BaseRetriever
class SearchDepth(Enum):
"""Search depth as enumerator."""
BASIC = "basic"
ADVANCED = "advanced"
class TavilySearchAPIRetriever(BaseRetriever):
"""Tavily Search API retriever."""
k: int = 10
include_generated_answer: bool = False
include_raw_content: bool = False
include_images: bool = False
search_depth: SearchDepth = SearchDepth.BASIC
include_domains: Optional[List[str]] = None
exclude_domains: Optional[List[str]] = None
kwargs: Optional[Dict[str, Any]] = {}
api_key: Optional[str] = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
from tavily import Client
except ImportError:
raise ImportError(
"Tavily python package not found. "
"Please install it with `pip install tavily-python`."
)
tavily = Client(api_key=self.api_key or os.environ["TAVILY_API_KEY"])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(
query=query,
max_results=max_results,
search_depth=self.search_depth.value,
include_answer=self.include_generated_answer,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
**self.kwargs
)
docs = [
Document(
page_content=result.get("content", "")
if not self.include_raw_content
else result.get("raw_content", ""),
metadata={
"title": result.get("title", ""),
"source": result.get("url", ""),
**{
k: v
for k, v in result.items()
if k not in ("content", "title", "url", "raw_content")
},
"images": response.get("images"),
},
)
for result in response.get("results")
]
if self.include_generated_answer:
docs = [
Document(
page_content=response.get("answer", ""),
metadata={
"title": "Suggested Answer",
"source": "https://tavily.com/",
},
),
*docs,
]
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~openapi.py | """Utility functions for parsing an OpenAPI spec."""
from __future__ import annotations
import copy
import json
import logging
import re
from enum import Enum
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import requests
import yaml
from langchain.pydantic_v1 import ValidationError
logger = logging.getLogger(__name__)
class HTTPVerb(str, Enum):
"""Enumerator of the HTTP verbs."""
GET = "get"
PUT = "put"
POST = "post"
DELETE = "delete"
OPTIONS = "options"
HEAD = "head"
PATCH = "patch"
TRACE = "trace"
@classmethod
def from_str(cls, verb: str) -> HTTPVerb:
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}")
if TYPE_CHECKING:
from openapi_pydantic import (
Components,
Operation,
Parameter,
PathItem,
Paths,
Reference,
RequestBody,
Schema,
)
try:
from openapi_pydantic import OpenAPI
except ImportError:
OpenAPI = object # type: ignore
class OpenAPISpec(OpenAPI):
"""OpenAPI Model that removes mis-formatted parts of the spec."""
openapi: str = "3.1.0" # overriding overly restrictive type from parent class
@property
def _paths_strict(self) -> Paths:
if not self.paths:
raise ValueError("No paths found in spec")
return self.paths
def _get_path_strict(self, path: str) -> PathItem:
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f"No path found for {path}")
return path_item
@property
def _components_strict(self) -> Components:
"""Get components or err."""
if self.components is None:
raise ValueError("No components found in spec. ")
return self.components
@property
def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]:
"""Get parameters or err."""
parameters = self._components_strict.parameters
if parameters is None:
raise ValueError("No parameters found in spec. ")
return parameters
@property
def _schemas_strict(self) -> Dict[str, Schema]:
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError("No schemas found in spec. ")
return schemas
@property
def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]:
"""Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError("No request body found in spec. ")
return request_bodies
def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]:
"""Get a parameter (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
parameters = self._parameters_strict
if ref_name not in parameters:
raise ValueError(f"No parameter found for {ref_name}")
return parameters[ref_name]
def _get_root_referenced_parameter(self, ref: Reference) -> Parameter:
"""Get the root reference or err."""
from openapi_pydantic import Reference
parameter = self._get_referenced_parameter(ref)
while isinstance(parameter, Reference):
parameter = self._get_referenced_parameter(parameter)
return parameter
def get_referenced_schema(self, ref: Reference) -> Schema:
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f"No schema found for {ref_name}")
return schemas[ref_name]
def get_schema(self, schema: Union[Reference, Schema]) -> Schema:
from openapi_pydantic import Reference
if isinstance(schema, Reference):
return self.get_referenced_schema(schema)
return schema
def _get_root_referenced_schema(self, ref: Reference) -> Schema:
"""Get the root reference or err."""
from openapi_pydantic import Reference
schema = self.get_referenced_schema(ref)
while isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
return schema
def _get_referenced_request_body(
self, ref: Reference
) -> Optional[Union[Reference, RequestBody]]:
"""Get a request body (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
request_bodies = self._request_bodies_strict
if ref_name not in request_bodies:
raise ValueError(f"No request body found for {ref_name}")
return request_bodies[ref_name]
def _get_root_referenced_request_body(
self, ref: Reference
) -> Optional[RequestBody]:
"""Get the root request Body or err."""
from openapi_pydantic import Reference
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
@staticmethod
def _alert_unsupported_spec(obj: dict) -> None:
"""Alert if the spec is not supported."""
warning_message = (
" This may result in degraded performance."
+ " Convert your OpenAPI spec to 3.1.* spec"
+ " for better support."
)
swagger_version = obj.get("swagger")
openapi_version = obj.get("openapi")
if isinstance(openapi_version, str):
if openapi_version != "3.1.0":
logger.warning(
f"Attempting to load an OpenAPI {openapi_version}"
f" spec. {warning_message}"
)
else:
pass
elif isinstance(swagger_version, str):
logger.warning(
f"Attempting to load a Swagger {swagger_version}"
f" spec. {warning_message}"
)
else:
raise ValueError(
"Attempting to load an unsupported spec:"
f"\n\n{obj}\n{warning_message}"
)
@classmethod
def parse_obj(cls, obj: dict) -> OpenAPISpec:
try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
# We are handling possibly misconfigured specs and
# want to do a best-effort job to get a reasonable interface out of it.
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error["loc"]
item = new_obj
for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj)
@classmethod
def from_spec_dict(cls, spec_dict: dict) -> OpenAPISpec:
"""Get an OpenAPI spec from a dict."""
return cls.parse_obj(spec_dict)
@classmethod
def from_text(cls, text: str) -> OpenAPISpec:
"""Get an OpenAPI spec from a text."""
try:
spec_dict = json.loads(text)
except json.JSONDecodeError:
spec_dict = yaml.safe_load(text)
return cls.from_spec_dict(spec_dict)
@classmethod
def from_file(cls, path: Union[str, Path]) -> OpenAPISpec:
"""Get an OpenAPI spec from a file path."""
path_ = path if isinstance(path, Path) else Path(path)
if not path_.exists():
raise FileNotFoundError(f"{path} does not exist")
with path_.open("r") as f:
return cls.from_text(f.read())
@classmethod
def from_url(cls, url: str) -> OpenAPISpec:
"""Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text)
@property
def base_url(self) -> str:
"""Get the base url."""
return self.servers[0].url
def get_methods_for_path(self, path: str) -> List[str]:
"""Return a list of valid methods for the specified path."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
def get_parameters_for_path(self, path: str) -> List[Parameter]:
from openapi_pydantic import Reference
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_operation(self, path: str, method: str) -> Operation:
"""Get the operation object for a given path and HTTP method."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
operation_obj = getattr(path_item, method, None)
if not isinstance(operation_obj, Operation):
raise ValueError(f"No {method} method found for {path}")
return operation_obj
def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]:
"""Get the components for a given operation."""
from openapi_pydantic import Reference
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
def get_request_body_for_operation(
self, operation: Operation
) -> Optional[RequestBody]:
"""Get the request body for a given operation."""
from openapi_pydantic import Reference
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body
@staticmethod
def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str:
"""Get a cleaned operation id from an operation id."""
operation_id = operation.operationId
if operation_id is None:
# Replace all punctuation of any kind with underscore
path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/"))
operation_id = f"{path}_{method}"
return operation_id.replace("-", "_").replace(".", "_").replace("/", "_")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~vald.py | """Wrapper around Vald vector database."""
from __future__ import annotations
from typing import Any, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
class Vald(VectorStore):
"""Wrapper around Vald vector database.
To use, you should have the ``vald-client-python`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Vald
texts = ['foo', 'bar', 'baz']
vald = Vald.from_texts(
texts=texts,
embedding=HuggingFaceEmbeddings(),
host="localhost",
port=8080,
skip_strict_exist_check=False,
)
"""
def __init__(
self,
embedding: Embeddings,
host: str = "localhost",
port: int = 8080,
grpc_options: Tuple = (
("grpc.keepalive_time_ms", 1000 * 10),
("grpc.keepalive_timeout_ms", 1000 * 10),
),
):
self._embedding = embedding
self.target = host + ":" + str(port)
self.grpc_options = grpc_options
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
skip_strict_exist_check: bool = False,
**kwargs: Any,
) -> List[str]:
"""
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
"""
try:
import grpc
from vald.v1.payload import payload_pb2
from vald.v1.vald import upsert_pb2_grpc
except ImportError:
raise ValueError(
"Could not import vald-client-python python package. "
"Please install it with `pip install vald-client-python`."
)
channel = grpc.insecure_channel(self.target, options=self.grpc_options)
# Depending on the network quality,
# it is necessary to wait for ChannelConnectivity.READY.
# _ = grpc.channel_ready_future(channel).result(timeout=10)
stub = upsert_pb2_grpc.UpsertStub(channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=skip_strict_exist_check)
ids = []
embs = self._embedding.embed_documents(list(texts))
for text, emb in zip(texts, embs):
vec = payload_pb2.Object.Vector(id=text, vector=emb)
res = stub.Upsert(payload_pb2.Upsert.Request(vector=vec, config=cfg))
ids.append(res.uuid)
channel.close()
return ids
def delete(
self,
ids: Optional[List[str]] = None,
skip_strict_exist_check: bool = False,
**kwargs: Any,
) -> Optional[bool]:
"""
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
"""
try:
import grpc
from vald.v1.payload import payload_pb2
from vald.v1.vald import remove_pb2_grpc
except ImportError:
raise ValueError(
"Could not import vald-client-python python package. "
"Please install it with `pip install vald-client-python`."
)
if ids is None:
raise ValueError("No ids provided to delete")
channel = grpc.insecure_channel(self.target, options=self.grpc_options)
# Depending on the network quality,
# it is necessary to wait for ChannelConnectivity.READY.
# _ = grpc.channel_ready_future(channel).result(timeout=10)
stub = remove_pb2_grpc.RemoveStub(channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=skip_strict_exist_check)
for _id in ids:
oid = payload_pb2.Object.ID(id=_id)
_ = stub.Remove(payload_pb2.Remove.Request(id=oid, config=cfg))
channel.close()
return True
def similarity_search(
self,
query: str,
k: int = 4,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score(
query, k, radius, epsilon, timeout
)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs
def similarity_search_with_score(
self,
query: str,
k: int = 4,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
emb = self._embedding.embed_query(query)
docs_and_scores = self.similarity_search_with_score_by_vector(
emb, k, radius, epsilon, timeout
)
return docs_and_scores
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, k, radius, epsilon, timeout
)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
try:
import grpc
from vald.v1.payload import payload_pb2
from vald.v1.vald import search_pb2_grpc
except ImportError:
raise ValueError(
"Could not import vald-client-python python package. "
"Please install it with `pip install vald-client-python`."
)
channel = grpc.insecure_channel(self.target, options=self.grpc_options)
# Depending on the network quality,
# it is necessary to wait for ChannelConnectivity.READY.
# _ = grpc.channel_ready_future(channel).result(timeout=10)
stub = search_pb2_grpc.SearchStub(channel)
cfg = payload_pb2.Search.Config(
num=k, radius=radius, epsilon=epsilon, timeout=timeout
)
res = stub.Search(payload_pb2.Search.Request(vector=embedding, config=cfg))
docs_and_scores = []
for result in res.results:
docs_and_scores.append((Document(page_content=result.id), result.distance))
channel.close()
return docs_and_scores
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Document]:
emb = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
emb,
k=k,
fetch_k=fetch_k,
radius=radius,
epsilon=epsilon,
timeout=timeout,
lambda_mult=lambda_mult,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
radius: float = -1.0,
epsilon: float = 0.01,
timeout: int = 3000000000,
**kwargs: Any,
) -> List[Document]:
try:
import grpc
from vald.v1.payload import payload_pb2
from vald.v1.vald import object_pb2_grpc
except ImportError:
raise ValueError(
"Could not import vald-client-python python package. "
"Please install it with `pip install vald-client-python`."
)
channel = grpc.insecure_channel(self.target, options=self.grpc_options)
# Depending on the network quality,
# it is necessary to wait for ChannelConnectivity.READY.
# _ = grpc.channel_ready_future(channel).result(timeout=10)
stub = object_pb2_grpc.ObjectStub(channel)
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout
)
docs = []
embs = []
for doc, _ in docs_and_scores:
vec = stub.GetObject(
payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=doc.page_content)
)
)
embs.append(vec.vector)
docs.append(doc)
mmr = maximal_marginal_relevance(
np.array(embedding),
embs,
lambda_mult=lambda_mult,
k=k,
)
channel.close()
return [docs[i] for i in mmr]
@classmethod
def from_texts(
cls: Type[Vald],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
host: str = "localhost",
port: int = 8080,
grpc_options: Tuple = (
("grpc.keepalive_time_ms", 1000 * 10),
("grpc.keepalive_timeout_ms", 1000 * 10),
),
skip_strict_exist_check: bool = False,
**kwargs: Any,
) -> Vald:
"""
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
"""
vald = cls(
embedding=embedding,
host=host,
port=port,
grpc_options=grpc_options,
**kwargs,
)
vald.add_texts(
texts=texts,
metadatas=metadatas,
skip_strict_exist_check=skip_strict_exist_check,
)
return vald
"""We will support if there are any requests."""
# async def aadd_texts(
# self,
# texts: Iterable[str],
# metadatas: Optional[List[dict]] = None,
# **kwargs: Any,
# ) -> List[str]:
# pass
#
# def _select_relevance_score_fn(self) -> Callable[[float], float]:
# pass
#
# def _similarity_search_with_relevance_scores(
# self,
# query: str,
# k: int = 4,
# **kwargs: Any,
# ) -> List[Tuple[Document, float]]:
# pass
#
# def similarity_search_with_relevance_scores(
# self,
# query: str,
# k: int = 4,
# **kwargs: Any,
# ) -> List[Tuple[Document, float]]:
# pass
#
# async def amax_marginal_relevance_search_by_vector(
# self,
# embedding: List[float],
# k: int = 4,
# fetch_k: int = 20,
# lambda_mult: float = 0.5,
# **kwargs: Any,
# ) -> List[Document]:
# pass
#
# @classmethod
# async def afrom_texts(
# cls: Type[VST],
# texts: List[str],
# embedding: Embeddings,
# metadatas: Optional[List[dict]] = None,
# **kwargs: Any,
# ) -> VST:
# pass
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~xinference.py | """Wrapper around Xinference embedding models."""
from typing import Any, List, Optional
from langchain.schema.embeddings import Embeddings
class XinferenceEmbeddings(Embeddings):
"""Xinference embedding models.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install xinference
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers.
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then you can use Xinference Embedding with LangChain.
Example:
.. code-block:: python
from langchain.embeddings import XinferenceEmbeddings
xinference = XinferenceEmbeddings(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
def __init__(
self, server_url: Optional[str] = None, model_uid: Optional[str] = None
):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference`."
) from e
super().__init__()
if server_url is None:
raise ValueError("Please provide server URL")
if model_uid is None:
raise ValueError("Please provide the model UID")
self.server_url = server_url
self.model_uid = model_uid
self.client = RESTfulClient(server_url)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using Xinference.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
model = self.client.get_model(self.model_uid)
embeddings = [
model.create_embedding(text)["data"][0]["embedding"] for text in texts
]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query of documents using Xinference.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
model = self.client.get_model(self.model_uid)
embedding_res = model.create_embedding(text)
embedding = embedding_res["data"][0]["embedding"]
return list(map(float, embedding))
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~utilities~test_wolfram_alpha_api.py | """Integration test for Wolfram Alpha API Wrapper."""
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper()
output = search.run("what is 2x+18=x+5?")
assert "x = -13" in output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~agents~test_public_api.py | from langchain.agents import __all__ as agents_all
_EXPECTED = [
"Agent",
"AgentExecutor",
"AgentExecutorIterator",
"AgentOutputParser",
"AgentType",
"BaseMultiActionAgent",
"BaseSingleActionAgent",
"ConversationalAgent",
"ConversationalChatAgent",
"ConversationalChatContextAgent",
"LLMSingleActionAgent",
"MRKLChain",
"OpenAIFunctionsAgent",
"OpenAIMultiFunctionsAgent",
"ReActChain",
"ReActTextWorldAgent",
"SelfAskWithSearchChain",
"StructuredChatAgent",
"Tool",
"XMLAgent",
"ZeroShotAgent",
"create_csv_agent",
"create_json_agent",
"create_openapi_agent",
"create_pandas_dataframe_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_dataframe_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"get_all_tool_names",
"initialize_agent",
"load_agent",
"load_huggingface_tool",
"load_tools",
"tool",
"create_xorbits_agent",
]
def test_public_api() -> None:
"""Test for regressions or changes in the agents public API."""
assert sorted(agents_all) == sorted(_EXPECTED)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~base_language.py | """Deprecated module for BaseLanguageModel class, kept for backwards compatibility."""
from __future__ import annotations
from langchain.schema.language_model import BaseLanguageModel
__all__ = ["BaseLanguageModel"]
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.