id
stringlengths 14
16
| text
stringlengths 31
3.14k
| source
stringlengths 58
124
|
---|---|---|
d6d5b25c9fa0-4 | text = "This is a test text"
doc_result = embeddings.embed_documents([text])
query_result = embeddings.embed_query(text)
"""
def _embed(self, text: str) -> List[float]:
try:
from aleph_alpha_client import (
Prompt,
SemanticEmbeddingRequest,
SemanticRepresentation,
)
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
query_params = {
"prompt": Prompt.from_text(text),
"representation": SemanticRepresentation.Symmetric,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
query_request = SemanticEmbeddingRequest(**query_params)
query_response = self.client.semantic_embed(
request=query_request, model=self.model
)
return query_response.embedding
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
""" | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
d6d5b25c9fa0-5 | Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
c03db3029d58-0 | Source code for langchain.embeddings.tensorflow_hub
"""Wrapper around TensorflowHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
[docs]class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around tensorflow_hub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
import tensorflow_text # noqa
self.embed = tensorflow_hub.load(self.model_url)
except ImportError as e:
raise ValueError(
"Could not import some python packages." "Please install them."
) from e
class Config: | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
c03db3029d58-1 | ) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
6cf2a6524308-0 | Source code for langchain.embeddings.sagemaker_endpoint
"""Wrapper around Sagemaker InvokeEndpoint API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]):
"""Content handler for LLM class."""
[docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointEmbeddings
endpoint_name = ( | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
6cf2a6524308-1 | endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: EmbeddingsContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
class ContentHandler(EmbeddingsContentHandler): | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
6cf2a6524308-2 | class ContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompts: prompts, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> List[List[float]]:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["vectors"]
""" # noqa: E501
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None: | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
6cf2a6524308-3 | if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type, | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
6cf2a6524308-4 | Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
[docs] def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.extend(response)
return results
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])[0]
By Harrison Chase
© Copyright 2023, Harrison Chase. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
6cf2a6524308-5 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
ad2b2ab40837-0 | Source code for langchain.embeddings.huggingface_hub
"""Wrapper around HuggingFace Hub embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2"
VALID_TASKS = ("feature-extraction",)
[docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings):
"""Wrapper around HuggingFaceHub embedding models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = "feature-extraction"
"""Task to call the model with.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
ad2b2ab40837-1 | """Task to call the model with."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
if not repo_id.startswith("sentence-transformers"):
raise ValueError(
"Currently only 'sentence-transformers' embedding models "
f"are supported. Got invalid 'repo_id' {repo_id}."
)
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError( | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
ad2b2ab40837-2 | )
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = self.client(inputs=texts, params=_model_kwargs)
return responses
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = self.embed_documents([text])[0]
return response
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
48398b2c2f24-0 | Source code for langchain.embeddings.self_hosted
"""Running custom embedding models on self-hosted remote hardware."""
from typing import Any, Callable, List
from pydantic import Extra
from langchain.embeddings.base import Embeddings
from langchain.llms import SelfHostedPipeline
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
[docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Runs custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large" | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
48398b2c2f24-1 | model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
class Config: | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
48398b2c2f24-2 | class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
1f36ae388e50-0 | Source code for langchain.embeddings.cohere
"""Wrapper around Cohere embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class CohereEmbeddings(BaseModel, Embeddings):
"""Wrapper around Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(model="medium", cohere_api_key="my-api-key")
"""
client: Any #: :meta private:
model: str = "large"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
1f36ae388e50-1 | """Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(
model=self.model, texts=[text], truncate=self.truncate
).embeddings[0]
return list(map(float, embedding))
By Harrison Chase
© Copyright 2023, Harrison Chase. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
1f36ae388e50-2 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
c4a24790c843-0 | Source code for langchain.embeddings.openai
"""Wrapper around OpenAI embedding models."""
from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Set,
Tuple,
Union,
)
import numpy as np
from pydantic import BaseModel, Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError) | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-1 | | retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
return embeddings.client.create(**kwargs)
return _embed_with_retry(**kwargs)
[docs]class OpenAIEmbeddings(BaseModel, Embeddings):
"""Wrapper around OpenAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
In order to use the library with Microsoft Azure endpoints, you need to set
the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and optionally and
API_VERSION. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-2 | API_VERSION.
The OPENAI_API_TYPE must be set to 'azure' and the others correspond to
the properties of your endpoint.
In addition, the deployment name must be passed as the model parameter.
Example:
.. code-block:: python
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(
deployment="your-embeddings-deployment-name",
model="your-embeddings-model-name"
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names
embedding_ctx_length: int = 8191
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Tuple[()]] = "all"
chunk_size: int = 1000 | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-3 | chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["client"] = openai.Embedding
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
# please refer to
# https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
def _get_len_safe_embeddings(
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None
) -> List[List[float]]: | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-4 | ) -> List[List[float]]:
embeddings: List[List[float]] = [[] for i in range(len(texts))]
try:
import tiktoken
tokens = []
indices = []
encoding = tiktoken.model.encoding_for_model(self.model)
for i, text in enumerate(texts):
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
token = encoding.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens += [token[j : j + self.embedding_ctx_length]]
indices += [i]
batched_embeddings = []
_chunk_size = chunk_size or self.chunk_size
for i in range(0, len(tokens), _chunk_size):
response = embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
engine=self.deployment,
)
batched_embeddings += [r["embedding"] for r in response["data"]]
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
lens: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
results[indices[i]].append(batched_embeddings[i]) | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-5 | lens[indices[i]].append(len(batched_embeddings[i]))
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average = embed_with_retry(self, input="", engine=self.deployment)[
"data"
][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=lens[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to for OpenAIEmbeddings. "
"Please install it with `pip install tiktoken`."
)
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint."""
# handle large input text
if self.embedding_ctx_length > 0:
return self._get_len_safe_embeddings([text], engine=engine)[0]
else:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return embed_with_retry(self, input=[text], engine=engine)["data"][0][
"embedding"
]
[docs] def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]: | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c4a24790c843-6 | ) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# handle batches of large input text
if self.embedding_ctx_length > 0:
return self._get_len_safe_embeddings(texts, engine=self.deployment)
else:
results = []
_chunk_size = chunk_size or self.chunk_size
for i in range(0, len(texts), _chunk_size):
response = embed_with_retry(
self,
input=texts[i : i + _chunk_size],
engine=self.deployment,
)
results += [r["embedding"] for r in response["data"]]
return results
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = self._embedding_func(text, engine=self.deployment)
return embedding
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
c0afe36ad171-0 | Source code for langchain.embeddings.self_hosted_hugging_face
"""Wrapper around HuggingFace embedding models for self-hosted remote hardware."""
import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
logger = logging.getLogger(__name__)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any:
"""Load the embedding model."""
if not instruct:
import sentence_transformers
client = sentence_transformers.SentenceTransformer(model_id)
else:
from InstructorEmbedding import INSTRUCTOR
client = INSTRUCTOR(model_id) | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
c0afe36ad171-1 | client = INSTRUCTOR(model_id)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
client = client.to(device)
return client
[docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
c0afe36ad171-2 | import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
"""
client: Any #: :meta private:
model_id: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_reqs: List[str] = ["./", "sentence_transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
c0afe36ad171-3 | load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
"""Runs InstructorEmbedding embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
c0afe36ad171-4 | embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"]
"""Requirements to install on hardware to inference the model."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get(
"model_id", DEFAULT_INSTRUCT_MODEL
)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [] | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
c0afe36ad171-5 | """
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
b5ed624fe7b7-0 | Source code for langchain.embeddings.fake
from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
[docs]class FakeEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=self.size))
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding() for _ in texts]
[docs] def embed_query(self, text: str) -> List[float]:
return self._get_embedding()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html |
9d6ad98bd423-0 | Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
""" | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
9d6ad98bd423-1 | in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
openai_api_type: str = "azure"
openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
openai_api_version = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
openai_api_type = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_type = openai_api_type
openai.api_base = openai_api_base
openai.api_version = openai_api_version
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
9d6ad98bd423-2 | if openai_organization:
openai.organization = openai_organization
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
de537338d997-0 | Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError) | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-1 | | retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else: | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-2 | return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
[docs]class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-3 | .. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
request_timeout: int = 60
"""Timeout in seconds for the OpenAPI request."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-4 | """Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely " | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-5 | "`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout) | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-6 | retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-7 | else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]] | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-8 | ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry( | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-9 | self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if self.callback_manager.is_async:
await self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
)
else:
self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
[docs] def get_num_tokens(self, text: str) -> int:
"""Calculate num tokens with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_num_tokens(text) | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-10 | return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
# create a GPT-3.5-Turbo encoder instance
enc = tiktoken.encoding_for_model(self.model_name)
# encode the text using the GPT-3.5-Turbo encoder
tokenized_text = enc.encode(text)
# calculate the number of tokens in the encoded text
return len(tokenized_text)
[docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301. | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-11 | # Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
de537338d997-12 | for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
d2afde6159ca-0 | Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import List, Optional
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
[docs]class PromptLayerChatOpenAI(ChatOpenAI):
"""Wrapper around OpenAI Chat large language models and PromptLayer.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerChatOpenAI adds to optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult: | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
d2afde6159ca-1 | ) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerChatOpenAI",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult: | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
d2afde6159ca-2 | ) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerChatOpenAI.async",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
d580cf680916-0 | Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
[docs]class ChatAnthropic(BaseChatModel, _AnthropicCommon):
r"""Wrapper around Anthropic's large language model.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}" | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
d580cf680916-1 | elif isinstance(message, HumanMessage):
message_text = f"{self.HUMAN_PROMPT} {message.content}"
elif isinstance(message, AIMessage):
message_text = f"{self.AI_PROMPT} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format a list of strings into a single string with necessary newlines.
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary newlines.
"""
return "".join(
self._convert_one_message_to_text(message) for message in messages
)
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
if not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if not isinstance(messages[-1], AIMessage): | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
d580cf680916-2 | if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = self._convert_messages_to_text(messages)
return (
text.rstrip()
) # trim off the trailing ' ' that might come from the "Assistant: "
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
self.callback_manager.on_llm_new_token(
delta,
verbose=self.verbose,
)
else:
response = self.client.completion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages) | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
d580cf680916-3 | prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = await self.client.acompletion_stream(**params)
async for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if self.callback_manager.is_async:
await self.callback_manager.on_llm_new_token(
delta,
verbose=self.verbose,
)
else:
self.callback_manager.on_llm_new_token(
delta,
verbose=self.verbose,
)
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
311adb2e5e85-0 | .md
.pdf
Cloud Hosted Setup
Contents
Installation
Environment Setup
Cloud Hosted Setup#
We offer a hosted version of tracing at langchainplus.vercel.app. You can use this to view traces from your run without having to run the server locally.
Note: we are currently only offering this to a limited number of users. The hosted platform is VERY alpha, in active development, and data might be dropped at any time. Don’t depend on data being persisted in the system long term and don’t log traces that may contain sensitive information. If you’re interested in using the hosted platform, please fill out the form here.
Installation#
Login to the system and click “API Key” in the top right corner. Generate a new key and keep it safe. You will need it to authenticate with the system.
Environment Setup#
After installation, you must now set up your environment to use tracing.
This can be done by setting an environment variable in your terminal by running export LANGCHAIN_HANDLER=langchain.
You can also do this by adding the below snippet to the top of every script. IMPORTANT: this must go at the VERY TOP of your script, before you import anything from langchain.
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
You will also need to set an environment variable to specify the endpoint and your API key. This can be done with the following environment variables:
LANGCHAIN_ENDPOINT = “https://langchain-api-gateway-57eoxz8z.uc.gateway.dev”
LANGCHAIN_API_KEY - set this to the API key you generated during installation.
An example of adding all relevant environment variables is below:
import os | /content/https://python.langchain.com/en/latest/tracing/hosted_installation.html |
311adb2e5e85-1 | An example of adding all relevant environment variables is below:
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
os.environ["LANGCHAIN_API_KEY"] = "my_api_key" # Don't commit this to your repo! Better to set it in your terminal.
Contents
Installation
Environment Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/tracing/hosted_installation.html |
7c50766b3932-0 | .ipynb
.pdf
Tracing Walkthrough
Tracing Walkthrough#
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
## Uncomment this if using hosted setup.
# os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
## Uncomment this if you want traces to be recorded to "my_session" instead of default.
# os.environ["LANGCHAIN_SESSION"] = "my_session"
## Better to set this environment variable in the terminal
## Uncomment this if using hosted version. Replace "my_api_key" with your actual API Key.
# os.environ["LANGCHAIN_API_KEY"] = "my_api_key"
import langchain
from langchain.agents import Tool, initialize_agent, load_tools
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
I need to use a calculator to solve this.
Action: Calculator
Action Input: 2^.123243 | /content/https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
7c50766b3932-1 | Action: Calculator
Action Input: 2^.123243
Observation: Answer: 1.0891804557407723
Thought: I now know the final answer.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
# Agent run with tracing using a chat model
agent = initialize_agent(
tools, ChatOpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
Question: What is 2 raised to .123243 power?
Thought: I need a calculator to solve this problem.
Action:
```
{
"action": "calculator",
"action_input": "2^0.123243"
}
```
Observation: calculator is not a valid tool, try another one.
I made a mistake, I need to use the correct tool for this question.
Action:
```
{
"action": "calculator",
"action_input": "2^0.123243"
}
```
Observation: calculator is not a valid tool, try another one.
I made a mistake, the tool name is actually "calc" instead of "calculator".
Action:
```
{
"action": "calc",
"action_input": "2^0.123243"
}
```
Observation: calc is not a valid tool, try another one.
I made another mistake, the tool name is actually "Calculator" instead of "calc".
Action:
```
{
"action": "Calculator",
"action_input": "2^0.123243"
}
``` | /content/https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
7c50766b3932-2 | "action_input": "2^0.123243"
}
```
Observation: Answer: 1.0891804557407723
Thought:The final answer is 1.0891804557407723.
Final Answer: 1.0891804557407723
> Finished chain.
'1.0891804557407723'
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
2a0b29f878d5-0 | .md
.pdf
Locally Hosted Setup
Contents
Installation
Environment Setup
Locally Hosted Setup#
This page contains instructions for installing and then setting up the environment to use the locally hosted version of tracing.
Installation#
Ensure you have Docker installed (see Get Docker) and that it’s running.
Install the latest version of langchain: pip install langchain or pip install langchain -U to upgrade your
existing version.
Run langchain-server. This command was installed automatically when you ran the above command (pip install langchain).
This will spin up the server in the terminal, hosted on port 4137 by default.
Once you see the terminal
output langchain-langchain-frontend-1 | ➜ Local: [http://localhost:4173/](http://localhost:4173/), navigate
to http://localhost:4173/
You should see a page with your tracing sessions. See the overview page for a walkthrough of the UI.
Currently, trace data is not guaranteed to be persisted between runs of langchain-server. If you want to
persist your data, you can mount a volume to the Docker container. See the Docker docs for more info.
To stop the server, press Ctrl+C in the terminal where you ran langchain-server.
Environment Setup#
After installation, you must now set up your environment to use tracing.
This can be done by setting an environment variable in your terminal by running export LANGCHAIN_HANDLER=langchain.
You can also do this by adding the below snippet to the top of every script. IMPORTANT: this must go at the VERY TOP of your script, before you import anything from langchain.
import os | /content/https://python.langchain.com/en/latest/tracing/local_installation.html |
2a0b29f878d5-1 | import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
Contents
Installation
Environment Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/tracing/local_installation.html |
5e7a66fdc41f-0 | .md
.pdf
Question Answering over Docs
Contents
Document Question Answering
Adding in sources
Additional Related Resources
End-to-end examples
Question Answering over Docs#
Conceptual Guide
Question answering in this context refers to question answering over your document data.
For question answering over other types of data, please see other sources documentation like SQL database Question Answering or Interacting with APIs.
For question answering over many documents, you almost always want to create an index over the data.
This can be used to smartly access the most relevant documents for a given question, allowing you to avoid having to pass all the documents to the LLM (saving you time and money).
See this notebook for a more detailed introduction to this, but for a super quick start the steps involved are:
Load Your Documents
from langchain.document_loaders import TextLoader
loader = TextLoader('../state_of_the_union.txt')
See here for more information on how to get started with document loading.
Create Your Index
from langchain.indexes import VectorstoreIndexCreator
index = VectorstoreIndexCreator().from_loaders([loader])
The best and most popular index by far at the moment is the VectorStore index.
Query Your Index
query = "What did the president say about Ketanji Brown Jackson"
index.query(query)
Alternatively, use query_with_sources to also get back the sources involved
query = "What did the president say about Ketanji Brown Jackson"
index.query_with_sources(query)
Again, these high level interfaces obfuscate a lot of what is going on under the hood, so please see this notebook for a lower level walkthrough.
Document Question Answering# | /content/https://python.langchain.com/en/latest/use_cases/question_answering.html |
5e7a66fdc41f-1 | Document Question Answering#
Question answering involves fetching multiple documents, and then asking a question of them.
The LLM response will contain the answer to your question, based on the content of the documents.
The recommended way to get started using a question answering chain is:
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff")
chain.run(input_documents=docs, question=query)
The following resources exist:
Question Answering Notebook: A notebook walking through how to accomplish this task.
VectorDB Question Answering Notebook: A notebook walking through how to do question answering over a vector database. This can often be useful for when you have a LOT of documents, and you don’t want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Adding in sources#
There is also a variant of this, where in addition to responding with the answer the language model will also cite its sources (eg which of the documents passed in it used).
The recommended way to get started using a question answering with sources chain is:
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
chain = load_qa_with_sources_chain(llm, chain_type="stuff")
chain({"input_documents": docs, "question": query}, return_only_outputs=True)
The following resources exist:
QA With Sources Notebook: A notebook walking through how to accomplish this task. | /content/https://python.langchain.com/en/latest/use_cases/question_answering.html |
5e7a66fdc41f-2 | QA With Sources Notebook: A notebook walking through how to accomplish this task.
VectorDB QA With Sources Notebook: A notebook walking through how to do question answering with sources over a vector database. This can often be useful for when you have a LOT of documents, and you don’t want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Additional Related Resources#
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents) and Embeddings & Vectorstores (useful for the above Vector DB example).
CombineDocuments Chains: A conceptual overview of specific types of chains by which you can accomplish this task.
End-to-end examples#
For examples to this done in an end-to-end manner, please see the following resources:
Semantic search over a group chat with Sources Notebook: A notebook that semantically searches over a group chat conversation.
previous
Agent Simulations
next
Chatbots
Contents
Document Question Answering
Adding in sources
Additional Related Resources
End-to-end examples
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/question_answering.html |
34a7d9557dd1-0 | .md
.pdf
Agent Simulations
Contents
Simulations with Two Agents
Simulations with Multiple Agents
Agent Simulations#
Agent simulations involve interacting one of more agents with eachother.
Agent simulations generally involve two main components:
Long Term Memory
Simulation Environment
Specific implementations of agent simulations (or parts of agent simulations) include
Simulations with Two Agents#
CAMEL: an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with each other.
Two Player D&D: an example of how to use a generic simulator for two agents to implement a variant of the popular Dungeons & Dragons role playing game.
Simulations with Multiple Agents#
Multi-Player D&D: an example of how to use a generic dialogue simulator for multiple dialogue agents with a custom speaker-ordering, illustrated with a variant of the popular Dungeons & Dragons role playing game.
Generative Agents: This notebook implements a generative agent based on the paper Generative Agents: Interactive Simulacra of Human Behavior by Park, et. al.
previous
Autonomous Agents
next
Question Answering over Docs
Contents
Simulations with Two Agents
Simulations with Multiple Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations.html |
81a582f00587-0 | .md
.pdf
Summarization
Summarization#
Conceptual Guide
Summarization involves creating a smaller summary of multiple longer documents.
This can be useful for distilling long documents into the core pieces of information.
The recommended way to get started using a summarization chain is:
from langchain.chains.summarize import load_summarize_chain
chain = load_summarize_chain(llm, chain_type="map_reduce")
chain.run(docs)
The following resources exist:
Summarization Notebook: A notebook walking through how to accomplish this task.
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents).
previous
Interacting with APIs
next
Extraction
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/summarization.html |
014d1ea2b63c-0 | .md
.pdf
Chatbots
Chatbots#
Conceptual Guide
Since language models are good at producing text, that makes them ideal for creating chatbots.
Aside from the base prompts/LLMs, an important concept to know for Chatbots is memory.
Most chat based applications rely on remembering what happened in previous interactions, which memory is designed to help with.
The following resources exist:
ChatGPT Clone: A notebook walking through how to recreate a ChatGPT-like experience with LangChain.
Conversation Memory: A notebook walking through how to use different types of conversational memory.
Conversation Agent: A notebook walking through how to create an agent optimized for conversation.
Additional related resources include:
Memory Key Concepts: Explanation of key concepts related to memory.
Memory Examples: A collection of how-to examples for working with memory.
More end-to-end examples include:
Voice Assistant: A notebook walking through how to create a voice assistant using LangChain.
previous
Question Answering over Docs
next
Querying Tabular Data
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/chatbots.html |
9bdb258ea963-0 | .md
.pdf
Extraction
Extraction#
Conceptual Guide
Most APIs and databases still deal with structured information.
Therefore, in order to better work with those, it can be useful to extract structured information from text.
Examples of this include:
Extracting a structured row to insert into a database from a sentence
Extracting multiple rows to insert into a database from a long document
Extracting the correct API parameters from a user query
This work is extremely related to output parsing.
Output parsers are responsible for instructing the LLM to respond in a specific format.
In this case, the output parsers specify the format of the data you would like to extract from the document.
Then, in addition to the output format instructions, the prompt should also contain the data you would like to extract information from.
While normal output parsers are good enough for basic structuring of response data,
when doing extraction you often want to extract more complicated or nested structures.
For a deep dive on extraction, we recommend checking out kor,
a library that uses the existing LangChain chain and OutputParser abstractions
but deep dives on allowing extraction of more complicated schemas.
previous
Summarization
next
Evaluation
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/extraction.html |
260551e9d95d-0 | .md
.pdf
Code Understanding
Contents
Conversational Retriever Chain
Code Understanding#
Overview
LangChain is a useful tool designed to parse GitHub code repositories. By leveraging VectorStores, Conversational RetrieverChain, and GPT-4, it can answer questions in the context of an entire GitHub repository or generate new code. This documentation page outlines the essential components of the system and guides using LangChain for better code comprehension, contextual question answering, and code generation in GitHub repositories.
Conversational Retriever Chain#
Conversational RetrieverChain is a retrieval-focused system that interacts with the data stored in a VectorStore. Utilizing advanced techniques, like context-aware filtering and ranking, it retrieves the most relevant code snippets and information for a given user query. Conversational RetrieverChain is engineered to deliver high-quality, pertinent results while considering conversation history and context.
LangChain Workflow for Code Understanding and Generation
Index the code base: Clone the target repository, load all files within, chunk the files, and execute the indexing process. Optionally, you can skip this step and use an already indexed dataset.
Embedding and Code Store: Code snippets are embedded using a code-aware embedding model and stored in a VectorStore.
Query Understanding: GPT-4 processes user queries, grasping the context and extracting relevant details.
Construct the Retriever: Conversational RetrieverChain searches the VectorStore to identify the most relevant code snippets for a given query. | /content/https://python.langchain.com/en/latest/use_cases/code.html |
260551e9d95d-1 | Build the Conversational Chain: Customize the retriever settings and define any user-defined filters as needed.
Ask questions: Define a list of questions to ask about the codebase, and then use the ConversationalRetrievalChain to generate context-aware answers. The LLM (GPT-4) generates comprehensive, context-aware answers based on retrieved code snippets and conversation history.
The full tutorial is available below.
Twitter the-algorithm codebase analysis with Deep Lake: A notebook walking through how to parse github source code and run queries conversation.
LangChain codebase analysis with Deep Lake: A notebook walking through how to analyze and do question answering over THIS code base.
previous
Querying Tabular Data
next
Interacting with APIs
Contents
Conversational Retriever Chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/code.html |
148822ccfd30-0 | .md
.pdf
Querying Tabular Data
Contents
Document Loading
Querying
Chains
Agents
Querying Tabular Data#
Conceptual Guide
Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables.
This page covers all resources available in LangChain for working with data in this format.
Document Loading#
If you have text data stored in a tabular format, you may want to load the data into a Document and then index it as you would
other text/unstructured data. For this, you should use a document loader like the CSVLoader
and then you should create an index over that data, and query it that way.
Querying#
If you have more numeric tabular data, or have a large amount of data and don’t want to index it, you should get started
by looking at various chains and agents we have for dealing with this data.
Chains#
If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
SQL Database Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger databases and more complex schemas.
SQL Agent
Pandas Agent
CSV Agent
previous
Chatbots
next
Code Understanding
Contents
Document Loading
Querying
Chains
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/tabular.html |
b9e313a5b0d3-0 | .md
.pdf
Autonomous Agents
Contents
Baby AGI (Original Repo)
AutoGPT (Original Repo)
MetaPrompt (Original Repo)
Autonomous Agents#
Autonomous Agents are agents that designed to be more long running.
You give them one or multiple long term goals, and they independently execute towards those goals.
The applications combine tool usage and long term memory.
At the moment, Autonomous Agents are fairly experimental and based off of other open-source projects.
By implementing these open source projects in LangChain primitives we can get the benefits of LangChain -
easy switching an experimenting with multiple LLMs, usage of different vectorstores as memory,
usage of LangChain’s collection of tools.
Baby AGI (Original Repo)#
Baby AGI: a notebook implementing BabyAGI as LLM Chains
Baby AGI with Tools: building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions.
AutoGPT (Original Repo)#
AutoGPT: a notebook implementing AutoGPT in LangChain primitives
WebSearch Research Assistant: a notebook showing how to use AutoGPT plus specific tools to act as research assistant that can use the web.
MetaPrompt (Original Repo)#
Meta-Prompt: a notebook implementing Meta-Prompt in LangChain primitives
previous
Personal Assistants (Agents)
next
Agent Simulations
Contents
Baby AGI (Original Repo)
AutoGPT (Original Repo)
MetaPrompt (Original Repo)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/autonomous_agents.html |
a9d6d8b9521c-0 | .md
.pdf
Personal Assistants (Agents)
Personal Assistants (Agents)#
Conceptual Guide
We use “personal assistant” here in a very broad sense.
Personal assistants have a few characteristics:
They can interact with the outside world
They have knowledge of your data
They remember your interactions
Really all of the functionality in LangChain is relevant for building a personal assistant.
Highlighting specific parts:
Agent Documentation (for interacting with the outside world)
Index Documentation (for giving them knowledge of your data)
Memory (for helping them remember interactions)
Specific examples of this include:
AI Plugins: an implementation of an agent that is designed to be able to use all AI Plugins.
Plug-and-PlAI (Plugins Database): an implementation of an agent that is designed to be able to use all AI Plugins retrieved from PlugNPlAI.
Wikibase Agent: an implementation of an agent that is designed to interact with Wikibase.
Sales GPT: This notebook demonstrates an implementation of a Context-Aware AI Sales agent.
previous
How to add SharedMemory to an Agent and its Tools
next
Autonomous Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/personal_assistants.html |
f5e996ebf9f3-0 | .rst
.pdf
Evaluation
Contents
The Problem
The Solution
The Examples
Other Examples
Evaluation#
Note
Conceptual Guide
This section of documentation covers how we approach and think about evaluation in LangChain.
Both evaluation of internal chains/agents, but also how we would recommend people building on top of LangChain approach evaluation.
The Problem#
It can be really hard to evaluate LangChain chains and agents.
There are two main reasons for this:
# 1: Lack of data
You generally don’t have a ton of data to evaluate your chains/agents over before starting a project.
This is usually because Large Language Models (the core of most chains/agents) are terrific few-shot and zero shot learners,
meaning you are almost always able to get started on a particular task (text-to-SQL, question answering, etc) without
a large dataset of examples.
This is in stark contrast to traditional machine learning where you had to first collect a bunch of datapoints
before even getting started using a model.
# 2: Lack of metrics
Most chains/agents are performing tasks for which there are not very good metrics to evaluate performance.
For example, one of the most common use cases is generating text of some form.
Evaluating generated text is much more complicated than evaluating a classification prediction, or a numeric prediction.
The Solution#
LangChain attempts to tackle both of those issues.
What we have so far are initial passes at solutions - we do not think we have a perfect solution.
So we very much welcome feedback, contributions, integrations, and thoughts on this.
Here is what we have for each problem so far:
# 1: Lack of data
We have started LangChainDatasets a Community space on Hugging Face. | /content/https://python.langchain.com/en/latest/use_cases/evaluation.html |
f5e996ebf9f3-1 | We have started LangChainDatasets a Community space on Hugging Face.
We intend this to be a collection of open source datasets for evaluating common chains and agents.
We have contributed five datasets of our own to start, but we highly intend this to be a community effort.
In order to contribute a dataset, you simply need to join the community and then you will be able to upload datasets.
We’re also aiming to make it as easy as possible for people to create their own datasets.
As a first pass at this, we’ve added a QAGenerationChain, which given a document comes up
with question-answer pairs that can be used to evaluate question-answering tasks over that document down the line.
See this notebook for an example of how to use this chain.
# 2: Lack of metrics
We have two solutions to the lack of metrics.
The first solution is to use no metrics, and rather just rely on looking at results by eye to get a sense for how the chain/agent is performing.
To assist in this, we have developed (and will continue to develop) tracing, a UI-based visualizer of your chain and agent runs.
The second solution we recommend is to use Language Models themselves to evaluate outputs.
For this we have a few different chains and prompts aimed at tackling this issue.
The Examples#
We have created a bunch of examples combining the above two solutions to show how we internally evaluate chains and agents when we are developing.
In addition to the examples we’ve curated, we also highly welcome contributions here.
To facilitate that, we’ve included a template notebook for community members to use to build their own examples.
The existing examples we have are: | /content/https://python.langchain.com/en/latest/use_cases/evaluation.html |
f5e996ebf9f3-2 | The existing examples we have are:
Question Answering (State of Union): A notebook showing evaluation of a question-answering task over a State-of-the-Union address.
Question Answering (Paul Graham Essay): A notebook showing evaluation of a question-answering task over a Paul Graham essay.
SQL Question Answering (Chinook): A notebook showing evaluation of a question-answering task over a SQL database (the Chinook database).
Agent Vectorstore: A notebook showing evaluation of an agent doing question answering while routing between two different vector databases.
Agent Search + Calculator: A notebook showing evaluation of an agent doing question answering using a Search engine and a Calculator as tools.
Evaluating an OpenAPI Chain: A notebook showing evaluation of an OpenAPI chain, including how to generate test data if you don’t have any.
Other Examples#
In addition, we also have some more generic resources for evaluation.
Question Answering: An overview of LLMs aimed at evaluating question answering systems in general.
Data Augmented Question Answering: An end-to-end example of evaluating a question answering system focused on a specific document (a RetrievalQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
Hugging Face Datasets: Covers an example of loading and using a dataset from Hugging Face for evaluation.
previous
Extraction
next
Agent Benchmarking: Search + Calculator
Contents
The Problem
The Solution
The Examples
Other Examples
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/evaluation.html |
3eccf1eda7fc-0 | .md
.pdf
Interacting with APIs
Contents
Chains
Agents
Interacting with APIs#
Conceptual Guide
Lots of data and information is stored behind APIs.
This page covers all resources available in LangChain for working with APIs.
Chains#
If you are just getting started, and you have relatively simple apis, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
API Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger and more complex schemas.
OpenAPI Agent
previous
Code Understanding
next
Summarization
Contents
Chains
Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/apis.html |
458f626cf1db-0 | .ipynb
.pdf
Generative Agents in LangChain
Contents
Generative Agent Memory Components
Memory Lifecycle
Create a Generative Character
Pre-Interview with Character
Step through the day’s observations.
Interview after the day
Adding Multiple Characters
Pre-conversation interviews
Dialogue between Generative Agents
Let’s interview our agents after their conversation
Generative Agents in LangChain#
This notebook implements a generative agent based on the paper Generative Agents: Interactive Simulacra of Human Behavior by Park, et. al.
In it, we leverage a time-weighted Memory object backed by a LangChain Retriever.
# Use termcolor to make it easy to colorize the outputs.
!pip install termcolor > /dev/null
import logging
logging.basicConfig(level=logging.ERROR)
from datetime import datetime, timedelta
from typing import List
from termcolor import colored
from langchain.chat_models import ChatOpenAI
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.vectorstores import FAISS
USER_NAME = "Person A" # The name you want to use when interviewing the agent.
LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want.
Generative Agent Memory Components#
This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:
Memory Formation
Generative Agents have extended memories, stored in a single stream: | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-1 | Memory Formation
Generative Agents have extended memories, stored in a single stream:
Observations - from dialogues or interactions with the virtual world, about self or others
Reflections - resurfaced and summarized core memories
Memory Recall
Memories are retrieved using a weighted sum of salience, recency, and importance.
You can review the definitions of the GenerativeAgent and GenerativeAgentMemory in the reference documentation for the following imports, focusing on add_memory and summarize_related_memories methods.
from langchain.experimental.generative_agents import GenerativeAgent, GenerativeAgentMemory
Memory Lifecycle#
Summarizing the key methods in the above: add_memory and summarize_related_memories.
When an agent makes an observation, it stores the memory:
Language model scores the memory’s importance (1 for mundane, 10 for poignant)
Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a last_accessed_time.
When an agent responds to an observation:
Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.
Summarizes the retrieved information
Updates the last_accessed_time for the used documents.
Create a Generative Character#
Now that we’ve walked through the definition, we will create two characters named “Tommie” and “Eve”.
import math
import faiss
def relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# This will differ depending on a few things: | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-2 | # This will differ depending on a few things:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
return 1.0 - score / math.sqrt(2)
def create_new_memory_retriever():
"""Create a new vector store retriever unique to the agent."""
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)
return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
tommies_memory = GenerativeAgentMemory(
llm=LLM,
memory_retriever=create_new_memory_retriever(),
verbose=False,
reflection_threshold=8 # we will give this a relatively low number to show how reflection works
)
tommie = GenerativeAgent(name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-3 | status="looking for a job", # When connected to a virtual world, we can have the characters update their status
memory_retriever=create_new_memory_retriever(),
llm=LLM,
memory=tommies_memory
)
# The current "Summary" of a character can't be made because the agent hasn't made
# any observations yet.
print(tommie.get_summary())
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
No statements were provided about Tommie's core characteristics.
# We can add memories directly to the memory object
tommie_observations = [
"Tommie remembers his dog, Bruno, from when he was a kid",
"Tommie feels tired from driving so far",
"Tommie sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"Tommie is hungry",
"Tommie tries to get some rest.",
]
for observation in tommie_observations:
tommie.memory.add_memory(observation)
# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.
# We will see how this summary updates after more observations to create a more rich description.
print(tommie.get_summary(force_refresh=True))
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a tired and hungry person who is moving into a new home. He remembers his childhood dog and is aware of the new neighbors' cat. He is trying to get some rest despite the noisy road. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-4 | Pre-Interview with Character#
Before sending our character on their way, let’s ask them a few questions.
def interview_agent(agent: GenerativeAgent, message: str) -> str:
"""Help the notebook user interact with the agent."""
new_message = f"{USER_NAME} says {message}"
return agent.generate_dialogue_response(new_message)[1]
interview_agent(tommie, "What do you like to do?")
'Tommie said "I really enjoy design and have been working on some projects in my free time. I\'m also quite talkative and enjoy meeting new people. What about you?"'
interview_agent(tommie, "What are you looking forward to doing today?")
'Tommie said "Well, today I\'m mostly focused on getting settled into my new home. But once that\'s taken care of, I\'m looking forward to exploring the neighborhood and finding some new design inspiration. What about you?"'
interview_agent(tommie, "What are you most worried about today?")
'Tommie said "Honestly, I\'m a bit anxious about finding a job in this new area. But I\'m trying to focus on settling in first and then I\'ll start my job search. How about you?"'
Step through the day’s observations.#
# Let's have Tommie start going through a day in the life.
observations = [
"Tommie wakes up to the sound of a noisy construction site outside his window.",
"Tommie gets out of bed and heads to the kitchen to make himself some coffee.", | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-5 | "Tommie gets out of bed and heads to the kitchen to make himself some coffee.",
"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"Tommie finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and Tommie regrets not buying a better brand.",
"Tommie checks his email and sees that he has no job offers yet.",
"Tommie spends some time updating his resume and cover letter.",
"Tommie heads out to explore the city and look for job openings.",
"Tommie sees a sign for a job fair and decides to attend.",
"The line to get in is long, and Tommie has to wait for an hour.",
"Tommie meets several potential employers at the job fair but doesn't receive any offers.",
"Tommie leaves the job fair feeling disappointed.",
"Tommie stops by a local diner to grab some lunch.",
"The service is slow, and Tommie has to wait for 30 minutes to get his food.",
"Tommie overhears a conversation at the next table about a job opening.",
"Tommie asks the diners about the job opening and gets some information about the company.",
"Tommie decides to apply for the job and sends his resume and cover letter.",
"Tommie continues his search for job openings and drops off his resume at several local businesses.",
"Tommie takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.",
"Tommie sees a group of people playing frisbee and decides to join in.", | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-6 | "Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"Tommie goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"Tommie starts to feel frustrated with his job search.",
"Tommie calls his best friend to vent about his struggles.",
"Tommie's friend offers some words of encouragement and tells him to keep trying.",
"Tommie feels slightly better after talking to his friend.",
]
# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve
for i, observation in enumerate(observations):
_, reaction = tommie.generate_reaction(observation)
print(colored(observation, "green"), reaction)
if ((i+1) % 20) == 0:
print('*'*40)
print(colored(f"After {i+1} observations, Tommie's summary is:\n{tommie.get_summary(force_refresh=True)}", "blue"))
print('*'*40)
Tommie wakes up to the sound of a noisy construction site outside his window. Tommie groans and covers his head with a pillow to try and block out the noise.
Tommie gets out of bed and heads to the kitchen to make himself some coffee. Tommie stretches his arms and yawns before making his way to the kitchen.
Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some. Tommie sighs in frustration but continues to search through the boxes. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-7 | Tommie finally finds the filters and makes himself a cup of coffee. Tommie takes a sip of the coffee and smiles, feeling a bit more awake and energized.
The coffee tastes bitter, and Tommie regrets not buying a better brand. Tommie grimaces and sets down the coffee, disappointed in the taste.
Tommie checks his email and sees that he has no job offers yet. Tommie Tommie's shoulders slump and he sighs, feeling discouraged.
Tommie spends some time updating his resume and cover letter. Tommie nods to himself, feeling productive and hopeful.
Tommie heads out to explore the city and look for job openings. Tommie said "Do you have any recommendations for good places to look for job openings in the area?"
Tommie sees a sign for a job fair and decides to attend. Tommie said "That job fair could be a great opportunity for me to network and find some job leads. Thanks for letting me know."
The line to get in is long, and Tommie has to wait for an hour. Tommie sighs and looks around, feeling impatient and frustrated.
Tommie meets several potential employers at the job fair but doesn't receive any offers. Tommie Tommie's shoulders slump and he sighs, feeling discouraged.
Tommie leaves the job fair feeling disappointed. Tommie Tommie's shoulders slump and he sighs, feeling discouraged.
Tommie stops by a local diner to grab some lunch. Tommie said "Can I get a burger and fries to go, please?"
The service is slow, and Tommie has to wait for 30 minutes to get his food. Tommie sighs and looks at his phone, feeling impatient. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-8 | Tommie overhears a conversation at the next table about a job opening. Tommie said "Excuse me, I couldn't help but overhear your conversation about the job opening. Do you have any more information about it?"
Tommie asks the diners about the job opening and gets some information about the company. Tommie said "Thank you for the information, I will definitely look into that company."
Tommie decides to apply for the job and sends his resume and cover letter. Tommie nods to himself, feeling hopeful and motivated.
Tommie continues his search for job openings and drops off his resume at several local businesses. Tommie nods to himself, feeling proactive and hopeful.
Tommie takes a break from his job search to go for a walk in a nearby park. Tommie takes a deep breath of fresh air and feels a sense of calm.
A dog approaches and licks Tommie's feet, and he pets it for a few minutes. Tommie smiles and enjoys the moment of affection from the dog.
****************************************
After 20 observations, Tommie's summary is:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is hopeful and proactive in his job search, but easily becomes discouraged when faced with setbacks. He enjoys spending time outdoors and interacting with animals. Tommie is also productive and enjoys updating his resume and cover letter. He is talkative, enjoys meeting new people, and has an interest in design. Tommie is also a coffee drinker and seeks advice from others on finding job openings.
****************************************
Tommie sees a group of people playing frisbee and decides to join in. Do nothing. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-9 | Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose. Tommie winces and touches his nose, feeling a bit of pain.
Tommie goes back to his apartment to rest for a bit. Tommie takes a deep breath and sinks into his couch, feeling grateful for a moment of relaxation.
A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor. Tommie sighs and grabs a broom and dustpan to clean up the mess.
Tommie starts to feel frustrated with his job search. Tommie sighs and feels discouraged.
Tommie calls his best friend to vent about his struggles. Tommie said "Hey, can I vent to you for a bit about my job search? I'm feeling pretty discouraged."
Tommie's friend offers some words of encouragement and tells him to keep trying. Tommie said "Thank you for the encouragement, it means a lot to me."
Tommie feels slightly better after talking to his friend. Tommie nods to himself, feeling grateful for the support from his friend.
Interview after the day#
interview_agent(tommie, "Tell me about how your day has been going")
'Tommie said "Well, it\'s been a bit of a mixed day. I\'ve had some setbacks in my job search, but I also had some fun playing frisbee and spending time outdoors. How about you?"'
interview_agent(tommie, "How do you feel about coffee?")
'Tommie said "I really enjoy coffee, it helps me feel more awake and energized. But sometimes I regret not buying a better brand and finding the taste bitter. How about you?"' | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-10 | interview_agent(tommie, "Tell me about your childhood dog!")
'Tommie said "I actually didn\'t have a childhood dog, but I\'ve always loved animals. Do you have any pets?"'
Adding Multiple Characters#
Let’s add a second character to have a conversation with Tommie. Feel free to configure different traits.
eves_memory = GenerativeAgentMemory(
llm=LLM,
memory_retriever=create_new_memory_retriever(),
verbose=False,
reflection_threshold=5
)
eve = GenerativeAgent(name="Eve",
age=34,
traits="curious, helpful", # You can add more persistent traits here
status="N/A", # When connected to a virtual world, we can have the characters update their status
llm=LLM,
daily_summaries = [
("Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.")
],
memory=eves_memory
)
yesterday = (datetime.now() - timedelta(days=1)).strftime("%A %B %d")
eve_observations = [
"Eve overhears her colleague say something about a new client being hard to work with",
"Eve wakes up and hear's the alarm",
"Eve eats a boal of porridge",
"Eve helps a coworker on a task",
"Eve plays tennis with her friend Xu before going to work",
"Eve overhears her colleague say something about Tommie being hard to work with",
]
for observation in eve_observations: | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-11 | ]
for observation in eve_observations:
eve.memory.add_memory(observation)
print(eve.get_summary())
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and active person who enjoys playing tennis, maintaining a healthy diet, and staying aware of her surroundings. She is a responsible employee who is attentive to her coworkers' comments and willing to assist them with tasks.
Pre-conversation interviews#
Let’s “Interview” Eve before she speaks with Tommie.
interview_agent(eve, "How are you feeling about today?")
'Eve said "I\'m feeling pretty good, thanks for asking! How about you?"'
interview_agent(eve, "What do you know about Tommie?")
'Eve said "I don\'t know much about Tommie, why do you ask?"'
interview_agent(eve, "Tommie is looking to find a job. What are are some things you'd like to ask him?")
'Eve said "That\'s interesting. I don\'t know much about Tommie, but if I had the chance, I would ask him about his previous work experience and what kind of job he\'s looking for. What about you, what would you ask him?"'
interview_agent(eve, "You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.")
'Eve said "Sure, I can definitely ask him a lot of questions to keep the conversation going. Thanks for the heads up about his anxiety."'
Dialogue between Generative Agents# | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-12 | Dialogue between Generative Agents#
Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve.
def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:
"""Runs a conversation between agents."""
_, observation = agents[1].generate_reaction(initial_observation)
print(observation)
turns = 0
while True:
break_dialogue = False
for agent in agents:
stay_in_dialogue, observation = agent.generate_dialogue_response(observation)
print(observation)
# observation = f"{agent.name} said {reaction}"
if not stay_in_dialogue:
break_dialogue = True
if break_dialogue:
break
turns += 1
agents = [tommie, eve]
run_conversation(agents, "Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?")
Eve said "Of course, Tommie. I'd be happy to share about my experience. What specific questions do you have?"
Tommie said "Thank you, Eve. I'm curious about what strategies you used in your own job search. Did you have any specific tactics that helped you stand out to employers?"
Eve said "Sure, Tommie. I found that networking and reaching out to professionals in my field was really helpful. I also made sure to tailor my resume and cover letter to each job I applied to. Do you have any specific questions about those strategies?" | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-13 | Tommie said "Thank you, Eve. That's really helpful advice. Did you have any specific ways of networking that worked well for you?"
Eve said "Sure, Tommie. I found that attending industry events and connecting with professionals on LinkedIn were both great ways to network. Do you have any specific questions about those tactics?"
Tommie said "That's really helpful, thank you for sharing. Did you find that you were able to make meaningful connections through LinkedIn?"
Eve said "Yes, definitely. I was able to connect with several professionals in my field and even landed a job through a LinkedIn connection. Have you had any luck with networking on LinkedIn?"
Tommie said "That's really impressive! I haven't had much luck yet, but I'll definitely keep trying. Thank you for the advice, Eve."
Eve said "Glad I could help, Tommie. Is there anything else you want to know?"
Tommie said "Thanks again, Eve. I really appreciate your advice and I'll definitely put it into practice. Have a great day!"
Eve said "You're welcome, Tommie! Don't hesitate to reach out if you have any more questions. Have a great day too!"
Let’s interview our agents after their conversation#
Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis.
# We can see a current "Summary" of a character based on their own perception of self
# has changed
print(tommie.get_summary(force_refresh=True))
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-14 | Innate traits: anxious, likes design, talkative
Tommie is a hopeful and proactive individual who is searching for a job. He becomes discouraged when he doesn't receive any offers or positive responses, but he tries to stay productive and calm by updating his resume, going for walks, and talking to friends for support. He is also grateful for any encouragement he receives and is motivated to continue his job search. Additionally, he has a fond memory of his childhood pet and enjoys taking breaks to relax.
print(eve.get_summary(force_refresh=True))
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and friendly coworker who enjoys playing tennis and eating breakfast. She is attentive and observant, often overhearing conversations around her. She is also proactive and willing to offer advice and assistance to colleagues, particularly in job searching and networking. She is considerate of others' feelings and strives to keep conversations going to make others feel comfortable.
interview_agent(tommie, "How was your conversation with Eve?")
'Tommie said "It was really helpful actually! Eve gave me some great advice on job search strategies and networking. Have you ever tried networking on LinkedIn?"'
interview_agent(eve, "How was your conversation with Tommie?")
'Eve said "It was great, thanks for asking! Tommie had some really insightful questions about job searching and networking, and I was happy to offer my advice. How about you, have you had a chance to speak with Tommie recently?"'
interview_agent(eve, "What do you wish you would have said to Tommie?") | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
458f626cf1db-15 | interview_agent(eve, "What do you wish you would have said to Tommie?")
'Eve said "Well, I think I covered most of the topics Tommie was interested in, but if I had to add one thing, it would be to make sure to follow up with any connections you make during your job search. It\'s important to maintain those relationships and keep them updated on your progress. Did you have any other questions, Person A?"'
Contents
Generative Agent Memory Components
Memory Lifecycle
Create a Generative Character
Pre-Interview with Character
Step through the day’s observations.
Interview after the day
Adding Multiple Characters
Pre-conversation interviews
Dialogue between Generative Agents
Let’s interview our agents after their conversation
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html |
f3aaf80350d4-0 | .ipynb
.pdf
CAMEL Role-Playing Autonomous Cooperative Agents
Contents
Import LangChain related modules
Define a CAMEL agent helper class
Setup OpenAI API key and roles and task for role-playing
Create a task specify agent for brainstorming and get the specified task
Create inception prompts for AI assistant and AI user for role-playing
Create a helper helper to get system messages for AI assistant and AI user from role names and the task
Create AI assistant agent and AI user agent from obtained system messages
Start role-playing session to solve the task!
CAMEL Role-Playing Autonomous Cooperative Agents#
This is a langchain implementation of paper: “CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society”.
Overview: | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/camel_role_playing.html |
f3aaf80350d4-1 | Overview:
The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their “cognitive” processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond.
The original implementation: https://github.com/lightaime/camel
Project website: https://www.camel-ai.org/
Arxiv paper: https://arxiv.org/abs/2303.17760
Import LangChain related modules#
from typing import List
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
)
Define a CAMEL agent helper class#
class CAMELAgent: | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/camel_role_playing.html |
f3aaf80350d4-2 | )
Define a CAMEL agent helper class#
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
Setup OpenAI API key and roles and task for role-playing#
import os
os.environ["OPENAI_API_KEY"] = ""
assistant_role_name = "Python Programmer"
user_role_name = "Stock Trader"
task = "Develop a trading bot for the stock market"
word_limit = 50 # word limit for task brainstorming
Create a task specify agent for brainstorming and get the specified task#
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = ( | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/camel_role_playing.html |
f3aaf80350d4-3 | task_specifier_prompt = (
"""Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
)
task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task, word_limit=word_limit)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
Specified task: Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.
Create inception prompts for AI assistant and AI user for role-playing#
assistant_inception_prompt = ( | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/camel_role_playing.html |
f3aaf80350d4-4 | assistant_inception_prompt = (
"""Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
)
user_inception_prompt = (
"""Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task! | /content/https://python.langchain.com/en/latest/use_cases/agent_simulations/camel_role_playing.html |
Subsets and Splits