date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~__init__.py | """
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
from typing import Any, Callable, Dict, Type
from langchain.llms.base import BaseLLM
def _import_ai21() -> Any:
from langchain.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain.llms.databricks import Databricks
return Databricks
def _import_deepinfra() -> Any:
from langchain.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain.llms.minimax import Minimax
return Minimax
def _import_mlflow_ai_gateway() -> Any:
from langchain.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_together() -> Any:
from langchain.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_writer() -> Any:
from langchain.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain.llms.yandex import YandexGPT
return YandexGPT
def __getattr__(name: str) -> Any:
if name == "AI21":
return _import_ai21()
elif name == "AlephAlpha":
return _import_aleph_alpha()
elif name == "AmazonAPIGateway":
return _import_amazon_api_gateway()
elif name == "Anthropic":
return _import_anthropic()
elif name == "Anyscale":
return _import_anyscale()
elif name == "Arcee":
return _import_arcee()
elif name == "Aviary":
return _import_aviary()
elif name == "AzureMLOnlineEndpoint":
return _import_azureml_endpoint()
elif name == "QianfanLLMEndpoint":
return _import_baidu_qianfan_endpoint()
elif name == "Banana":
return _import_bananadev()
elif name == "Baseten":
return _import_baseten()
elif name == "Beam":
return _import_beam()
elif name == "Bedrock":
return _import_bedrock()
elif name == "NIBittensorLLM":
return _import_bittensor()
elif name == "CerebriumAI":
return _import_cerebriumai()
elif name == "ChatGLM":
return _import_chatglm()
elif name == "Clarifai":
return _import_clarifai()
elif name == "Cohere":
return _import_cohere()
elif name == "CTransformers":
return _import_ctransformers()
elif name == "CTranslate2":
return _import_ctranslate2()
elif name == "Databricks":
return _import_databricks()
elif name == "DeepInfra":
return _import_deepinfra()
elif name == "DeepSparse":
return _import_deepsparse()
elif name == "EdenAI":
return _import_edenai()
elif name == "FakeListLLM":
return _import_fake()
elif name == "Fireworks":
return _import_fireworks()
elif name == "ForefrontAI":
return _import_forefrontai()
elif name == "GigaChat":
return _import_gigachat()
elif name == "GooglePalm":
return _import_google_palm()
elif name == "GooseAI":
return _import_gooseai()
elif name == "GPT4All":
return _import_gpt4all()
elif name == "GradientLLM":
return _import_gradient_ai()
elif name == "HuggingFaceEndpoint":
return _import_huggingface_endpoint()
elif name == "HuggingFaceHub":
return _import_huggingface_hub()
elif name == "HuggingFacePipeline":
return _import_huggingface_pipeline()
elif name == "HuggingFaceTextGenInference":
return _import_huggingface_text_gen_inference()
elif name == "HumanInputLLM":
return _import_human()
elif name == "JavelinAIGateway":
return _import_javelin_ai_gateway()
elif name == "KoboldApiLLM":
return _import_koboldai()
elif name == "LlamaCpp":
return _import_llamacpp()
elif name == "ManifestWrapper":
return _import_manifest()
elif name == "Minimax":
return _import_minimax()
elif name == "MlflowAIGateway":
return _import_mlflow_ai_gateway()
elif name == "Modal":
return _import_modal()
elif name == "MosaicML":
return _import_mosaicml()
elif name == "NLPCloud":
return _import_nlpcloud()
elif name == "OctoAIEndpoint":
return _import_octoai_endpoint()
elif name == "Ollama":
return _import_ollama()
elif name == "OpaquePrompts":
return _import_opaqueprompts()
elif name == "AzureOpenAI":
return _import_azure_openai()
elif name == "OpenAI":
return _import_openai()
elif name == "OpenAIChat":
return _import_openai_chat()
elif name == "OpenLLM":
return _import_openllm()
elif name == "OpenLM":
return _import_openlm()
elif name == "PaiEasEndpoint":
return _import_pai_eas_endpoint()
elif name == "Petals":
return _import_petals()
elif name == "PipelineAI":
return _import_pipelineai()
elif name == "Predibase":
return _import_predibase()
elif name == "PredictionGuard":
return _import_predictionguard()
elif name == "PromptLayerOpenAI":
return _import_promptlayer()
elif name == "PromptLayerOpenAIChat":
return _import_promptlayer_chat()
elif name == "Replicate":
return _import_replicate()
elif name == "RWKV":
return _import_rwkv()
elif name == "SagemakerEndpoint":
return _import_sagemaker_endpoint()
elif name == "SelfHostedPipeline":
return _import_self_hosted()
elif name == "SelfHostedHuggingFaceLLM":
return _import_self_hosted_hugging_face()
elif name == "StochasticAI":
return _import_stochasticai()
elif name == "Nebula":
return _import_symblai_nebula()
elif name == "TextGen":
return _import_textgen()
elif name == "TitanTakeoff":
return _import_titan_takeoff()
elif name == "Together":
return _import_together()
elif name == "Tongyi":
return _import_tongyi()
elif name == "VertexAI":
return _import_vertex()
elif name == "VertexAIModelGarden":
return _import_vertex_model_garden()
elif name == "VLLM":
return _import_vllm()
elif name == "VLLMOpenAI":
return _import_vllm_openai()
elif name == "Writer":
return _import_writer()
elif name == "Xinference":
return _import_xinference()
elif name == "YandexGPT":
return _import_yandex_gpt()
elif name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
raise AttributeError(f"Could not find: {name}")
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"GigaChat",
"YandexGPT",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~tensorflow_hub.py | from typing import Any, List
from langchain.pydantic_v1 import BaseModel, Extra
from langchain.schema.embeddings import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""TensorflowHub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
"Could not import tensorflow-hub python package. "
"Please install it with `pip install tensorflow-hub``."
)
try:
import tensorflow_text # noqa
except ImportError:
raise ImportError(
"Could not import tensorflow_text python package. "
"Please install it with `pip install tensorflow_text``."
)
self.embed = tensorflow_hub.load(self.model_url)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~s3_directory.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.s3_file import S3FileLoader
if TYPE_CHECKING:
import botocore
class S3DirectoryLoader(BaseLoader):
"""Load from `Amazon AWS S3` directory."""
def __init__(
self,
bucket: str,
prefix: str = "",
*,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: Optional[bool] = True,
verify: Union[str, bool, None] = None,
endpoint_url: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
boto_config: Optional[botocore.client.Config] = None,
):
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
self.bucket = bucket
self.prefix = prefix
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.resource(
"s3",
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
config=self.boto_config,
)
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
loader = S3FileLoader(
self.bucket,
obj.key,
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
boto_config=self.boto_config,
)
docs.extend(loader.load())
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~sagemaker_callback.py | import json
import os
import shutil
import tempfile
from copy import deepcopy
from typing import Any, Dict, List, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
flatten_dict,
)
from langchain.schema import AgentAction, AgentFinish, LLMResult
def save_json(data: dict, file_path: str) -> None:
"""Save dict to local file path.
Parameters:
data (dict): The dictionary to be saved.
file_path (str): Local file path.
"""
with open(file_path, "w") as outfile:
json.dump(data, outfile)
class SageMakerCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs prompt artifacts and metrics to SageMaker Experiments.
Parameters:
run (sagemaker.experiments.run.Run): Run object where the experiment is logged.
"""
def __init__(self, run: Any) -> None:
"""Initialize callback handler."""
super().__init__()
self.run = run
self.metrics = {
"step": 0,
"starts": 0,
"ends": 0,
"errors": 0,
"text_ctr": 0,
"chain_starts": 0,
"chain_ends": 0,
"llm_starts": 0,
"llm_ends": 0,
"llm_streams": 0,
"tool_starts": 0,
"tool_ends": 0,
"agent_ends": 0,
}
# Create a temporary directory
self.temp_dir = tempfile.mkdtemp()
def _reset(self) -> None:
for k, v in self.metrics.items():
self.metrics[k] = 0
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] += 1
self.metrics["starts"] += 1
llm_starts = self.metrics["llm_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp["prompt"] = prompt
self.jsonf(
prompt_resp,
self.temp_dir,
f"llm_start_{llm_starts}_prompt_{idx}",
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.metrics["step"] += 1
self.metrics["llm_streams"] += 1
llm_streams = self.metrics["llm_streams"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"llm_new_tokens_{llm_streams}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
self.metrics["llm_ends"] += 1
self.metrics["ends"] += 1
llm_ends = self.metrics["llm_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
self.jsonf(
resp,
self.temp_dir,
f"llm_end_{llm_ends}_generation_{idx}",
)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.metrics["step"] += 1
self.metrics["chain_starts"] += 1
self.metrics["starts"] += 1
chain_starts = self.metrics["chain_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.jsonf(input_resp, self.temp_dir, f"chain_start_{chain_starts}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"chain_end_{chain_ends}")
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_start_{tool_starts}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1
tool_ends = self.metrics["tool_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"tool_end_{tool_ends}")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.metrics["step"] += 1
self.metrics["text_ctr"] += 1
text_ctr = self.metrics["text_ctr"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"on_text_{text_ctr}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
self.metrics["agent_ends"] += 1
self.metrics["ends"] += 1
agent_ends = self.metrics["agent_ends"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_finish_{agent_ends}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f"agent_action_{tool_starts}")
def jsonf(
self,
data: Dict[str, Any],
data_dir: str,
filename: str,
is_output: Optional[bool] = True,
) -> None:
"""To log the input data as json file artifact."""
file_path = os.path.join(data_dir, f"{filename}.json")
save_json(data, file_path)
self.run.log_file(file_path, name=filename, is_output=is_output)
def flush_tracker(self) -> None:
"""Reset the steps and delete the temporary local directory."""
self._reset()
shutil.rmtree(self.temp_dir)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~pubmed.py | from typing import List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from langchain.utilities.pubmed import PubMedAPIWrapper
class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
"""`PubMed API` retriever.
It wraps load() to get_relevant_documents().
It uses all PubMedAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load_docs(query=query)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~baseten.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field
logger = logging.getLogger(__name__)
class Baseten(LLM):
"""Baseten models.
To use, you should have the ``baseten`` python package installed,
and run ``baseten.login()`` with your Baseten API key.
The required ``model`` param can be either a model id or model
version id. Using a model version ID will result in
slightly faster invocation.
Any other model parameters can also
be passed in with the format input={model_param: value, ...}
The Baseten model must accept a dictionary of input with the key
"prompt" and return a dictionary with a key "data" which maps
to a list of response strings.
Example:
.. code-block:: python
from langchain.llms import Baseten
my_model = Baseten(model="MODEL_ID")
output = my_model("prompt")
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Baseten deployed model endpoint."""
try:
import baseten
except ImportError as exc:
raise ImportError(
"Could not import Baseten Python package. "
"Please install it with `pip install baseten`."
) from exc
# get the model and version
try:
model = baseten.deployed_model_version_id(self.model)
response = model.predict({"prompt": prompt, **kwargs})
except baseten.common.core.ApiError:
model = baseten.deployed_model_id(self.model)
response = model.predict({"prompt": prompt, **kwargs})
return "".join(response)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~self_hosted_hugging_face.py | import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~comprehend_moderation~amazon_comprehend_moderation.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain_experimental.comprehend_moderation.base_moderation import (
BaseModeration,
)
from langchain_experimental.comprehend_moderation.base_moderation_callbacks import (
BaseModerationCallbackHandler,
)
from langchain_experimental.pydantic_v1 import root_validator
class AmazonComprehendModerationChain(Chain):
"""A subclass of Chain, designed to apply moderation to LLMs."""
output_key: str = "output" #: :meta private:
"""Key used to fetch/store the output in data containers. Defaults to `output`"""
input_key: str = "input" #: :meta private:
"""Key used to fetch/store the input in data containers. Defaults to `input`"""
moderation_config: Optional[Dict[str, Any]] = None
"""Configuration settings for moderation"""
client: Optional[Any]
"""boto3 client object for connection to Amazon Comprehend"""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
moderation_callback: Optional[BaseModerationCallbackHandler] = None
"""Callback handler for moderation, this is different
from regular callbacks which can be used in addition to this."""
unique_id: Optional[str] = None
"""A unique id that can be used to identify or group a user or session"""
@root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
Creates an Amazon Comprehend client
Args:
values (Dict[str, Any]): A dictionary containing configuration values.
Returns:
Dict[str, Any]: A dictionary with the updated configuration values,
including the Amazon Comprehend client.
Raises:
ModuleNotFoundError: If the 'boto3' package is not installed.
ValueError: If there is an issue importing 'boto3' or loading
AWS credentials.
Example:
.. code-block:: python
config = {
"credentials_profile_name": "my-profile",
"region_name": "us-west-2"
}
updated_config = create_client(config)
comprehend_client = updated_config["client"]
"""
if values.get("client") is not None:
return values
try:
import boto3
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
values["client"] = session.client("comprehend", **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
@property
def output_keys(self) -> List[str]:
"""
Returns a list of output keys.
This method defines the output keys that will be used to access the output
values produced by the chain or function. It ensures that the specified keys
are available to access the outputs.
Returns:
List[str]: A list of output keys.
Note:
This method is considered private and may not be intended for direct
external use.
"""
return [self.output_key]
@property
def input_keys(self) -> List[str]:
"""
Returns a list of input keys expected by the prompt.
This method defines the input keys that the prompt expects in order to perform
its processing. It ensures that the specified keys are available for providing
input to the prompt.
Returns:
List[str]: A list of input keys.
Note:
This method is considered private and may not be intended for direct
external use.
"""
return [self.input_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""
Executes the moderation process on the input text and returns the processed
output.
This internal method performs the moderation process on the input text. It
converts the input prompt value to plain text, applies the specified filters,
and then converts the filtered output back to a suitable prompt value object.
Additionally, it provides the option to log information about the run using
the provided `run_manager`.
Args:
inputs: A dictionary containing input values
run_manager: A run manager to handle run-related events. Default is None
Returns:
Dict[str, str]: A dictionary containing the processed output of the
moderation process.
Raises:
ValueError: If there is an error during the moderation process
"""
if run_manager:
run_manager.on_text("Running AmazonComprehendModerationChain...\n")
moderation = BaseModeration(
client=self.client,
config=self.moderation_config,
moderation_callback=self.moderation_callback,
unique_id=self.unique_id,
run_manager=run_manager,
)
response = moderation.moderate(prompt=inputs[self.input_keys[0]])
return {self.output_key: response}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~file_management~file_search.py | import fnmatch
import os
from typing import Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.base import BaseTool
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileToolMixin, BaseTool):
"""Tool that searches for files in a subdirectory that match a regex pattern."""
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(
self,
pattern: str,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~semadb.py | from typing import Any, Iterable, List, Optional, Tuple
from uuid import uuid4
import numpy as np
import requests
from langchain.schema.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import DistanceStrategy
class SemaDB(VectorStore):
"""`SemaDB` vector store.
This vector store is a wrapper around the SemaDB database.
Example:
.. code-block:: python
from langchain.vectorstores import SemaDB
db = SemaDB('mycollection', 768, embeddings, DistanceStrategy.COSINE)
"""
HOST = "semadb.p.rapidapi.com"
BASE_URL = "https://" + HOST
def __init__(
self,
collection_name: str,
vector_size: int,
embedding: Embeddings,
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
api_key: str = "",
):
"""Initialise the SemaDB vector store."""
self.collection_name = collection_name
self.vector_size = vector_size
self.api_key = api_key or get_from_env("api_key", "SEMADB_API_KEY")
self._embedding = embedding
self.distance_strategy = distance_strategy
@property
def headers(self) -> dict:
"""Return the common headers."""
return {
"content-type": "application/json",
"X-RapidAPI-Key": self.api_key,
"X-RapidAPI-Host": SemaDB.HOST,
}
def _get_internal_distance_strategy(self) -> str:
"""Return the internal distance strategy."""
if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return "euclidean"
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
raise ValueError("Max inner product is not supported by SemaDB")
elif self.distance_strategy == DistanceStrategy.DOT_PRODUCT:
return "dot"
elif self.distance_strategy == DistanceStrategy.JACCARD:
raise ValueError("Max inner product is not supported by SemaDB")
elif self.distance_strategy == DistanceStrategy.COSINE:
return "cosine"
else:
raise ValueError(f"Unknown distance strategy {self.distance_strategy}")
def create_collection(self) -> bool:
"""Creates the corresponding collection in SemaDB."""
payload = {
"id": self.collection_name,
"vectorSize": self.vector_size,
"distanceMetric": self._get_internal_distance_strategy(),
}
response = requests.post(
SemaDB.BASE_URL + "/collections",
json=payload,
headers=self.headers,
)
return response.status_code == 200
def delete_collection(self) -> bool:
"""Deletes the corresponding collection in SemaDB."""
response = requests.delete(
SemaDB.BASE_URL + f"/collections/{self.collection_name}",
headers=self.headers,
)
return response.status_code == 200
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Add texts to the vector store."""
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
# Check dimensions
if len(embeddings[0]) != self.vector_size:
raise ValueError(
f"Embedding size mismatch {len(embeddings[0])} != {self.vector_size}"
)
# Normalise if needed
if self.distance_strategy == DistanceStrategy.COSINE:
embed_matrix = np.array(embeddings)
embed_matrix = embed_matrix / np.linalg.norm(
embed_matrix, axis=1, keepdims=True
)
embeddings = embed_matrix.tolist()
# Create points
ids: List[str] = []
points = []
if metadatas is not None:
for text, embedding, metadata in zip(texts, embeddings, metadatas):
new_id = str(uuid4())
ids.append(new_id)
points.append(
{
"id": new_id,
"vector": embedding,
"metadata": {**metadata, **{"text": text}},
}
)
else:
for text, embedding in zip(texts, embeddings):
new_id = str(uuid4())
ids.append(new_id)
points.append(
{
"id": new_id,
"vector": embedding,
"metadata": {"text": text},
}
)
# Insert points in batches
for i in range(0, len(points), batch_size):
batch = points[i : i + batch_size]
response = requests.post(
SemaDB.BASE_URL + f"/collections/{self.collection_name}/points",
json={"points": batch},
headers=self.headers,
)
if response.status_code != 200:
print("HERE--", batch)
raise ValueError(f"Error adding points: {response.text}")
failed_ranges = response.json()["failedRanges"]
if len(failed_ranges) > 0:
raise ValueError(f"Error adding points: {failed_ranges}")
# Return ids
return ids
@property
def embeddings(self) -> Embeddings:
"""Return the embeddings."""
return self._embedding
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
payload = {
"ids": ids,
}
response = requests.delete(
SemaDB.BASE_URL + f"/collections/{self.collection_name}/points",
json=payload,
headers=self.headers,
)
return response.status_code == 200 and len(response.json()["failedPoints"]) == 0
def _search_points(self, embedding: List[float], k: int = 4) -> List[dict]:
"""Search points."""
# Normalise if needed
if self.distance_strategy == DistanceStrategy.COSINE:
vec = np.array(embedding)
vec = vec / np.linalg.norm(vec)
embedding = vec.tolist()
# Perform search request
payload = {
"vector": embedding,
"limit": k,
}
response = requests.post(
SemaDB.BASE_URL + f"/collections/{self.collection_name}/points/search",
json=payload,
headers=self.headers,
)
if response.status_code != 200:
raise ValueError(f"Error searching: {response.text}")
return response.json()["points"]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
query_embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(query_embedding, k=k)
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Run similarity search with distance."""
query_embedding = self._embedding.embed_query(query)
points = self._search_points(query_embedding, k=k)
return [
(
Document(page_content=p["metadata"]["text"], metadata=p["metadata"]),
p["distance"],
)
for p in points
]
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
points = self._search_points(embedding, k=k)
return [
Document(page_content=p["metadata"]["text"], metadata=p["metadata"])
for p in points
]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = "",
vector_size: int = 0,
api_key: str = "",
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
**kwargs: Any,
) -> "SemaDB":
"""Return VectorStore initialized from texts and embeddings."""
if not collection_name:
raise ValueError("Collection name must be provided")
if not vector_size:
raise ValueError("Vector size must be provided")
if not api_key:
raise ValueError("API key must be provided")
semadb = cls(
collection_name,
vector_size,
embedding,
distance_strategy=distance_strategy,
api_key=api_key,
)
if not semadb.create_collection():
raise ValueError("Error creating collection")
semadb.add_texts(texts, metadatas=metadatas)
return semadb
| [
"application/json"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~url_selenium.py | """Loader that uses Selenium to load a page, then uses unstructured to load the html.
"""
import logging
from typing import TYPE_CHECKING, List, Literal, Optional, Union
if TYPE_CHECKING:
from selenium.webdriver import Chrome, Firefox
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class SeleniumURLLoader(BaseLoader):
"""Load `HTML` pages with `Selenium` and parse with `Unstructured`.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
browser (str): The browser to use, either 'chrome' or 'firefox'.
binary_location (Optional[str]): The location of the browser binary.
executable_path (Optional[str]): The path to the browser executable.
headless (bool): If True, the browser will run in headless mode.
arguments [List[str]]: List of arguments to pass to the browser.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
browser: Literal["chrome", "firefox"] = "chrome",
binary_location: Optional[str] = None,
executable_path: Optional[str] = None,
headless: bool = True,
arguments: List[str] = [],
):
"""Load a list of URLs using Selenium and unstructured."""
try:
import selenium # noqa:F401
except ImportError:
raise ImportError(
"selenium package not found, please install it with "
"`pip install selenium`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.browser = browser
self.binary_location = binary_location
self.executable_path = executable_path
self.headless = headless
self.arguments = arguments
def _get_driver(self) -> Union["Chrome", "Firefox"]:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == "chrome":
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(
options=chrome_options,
service=Service(executable_path=self.executable_path),
)
elif self.browser.lower() == "firefox":
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument("--headless")
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(
options=firefox_options,
service=Service(executable_path=self.executable_path),
)
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
def _build_metadata(self, url: str, driver: Union["Chrome", "Firefox"]) -> dict:
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
"""Build metadata based on the contents of the webpage"""
metadata = {
"source": url,
"title": "No title found.",
"description": "No description found.",
"language": "No language found.",
}
if title := driver.title:
metadata["title"] = title
try:
if description := driver.find_element(
By.XPATH, '//meta[@name="description"]'
):
metadata["description"] = (
description.get_attribute("content") or "No description found."
)
except NoSuchElementException:
pass
try:
if html_tag := driver.find_element(By.TAG_NAME, "html"):
metadata["language"] = (
html_tag.get_attribute("lang") or "No language found."
)
except NoSuchElementException:
pass
return metadata
def load(self) -> List[Document]:
"""Load the specified URLs using Selenium and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
driver = self._get_driver()
for url in self.urls:
try:
driver.get(url)
page_content = driver.page_source
elements = partition_html(text=page_content)
text = "\n\n".join([str(el) for el in elements])
metadata = self._build_metadata(url, driver)
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
driver.quit()
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~url_playwright.py | """Loader that uses Playwright to load a page, then uses unstructured to load the html.
"""
import logging
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.async_api import Response as AsyncResponse
from playwright.sync_api import Browser, Page, Response
logger = logging.getLogger(__name__)
class PlaywrightEvaluator(ABC):
"""Abstract base class for all evaluators.
Each evaluator should take a page, a browser instance, and a response
object, process the page as necessary, and return the resulting text.
"""
@abstractmethod
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
"""Synchronously process the page and return the resulting text.
Args:
page: The page to process.
browser: The browser instance.
response: The response from page.goto().
Returns:
text: The text content of the page.
"""
pass
@abstractmethod
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
"""Asynchronously process the page and return the resulting text.
Args:
page: The page to process.
browser: The browser instance.
response: The response from page.goto().
Returns:
text: The text content of the page.
"""
pass
class UnstructuredHtmlEvaluator(PlaywrightEvaluator):
"""Evaluates the page HTML content using the `unstructured` library."""
def __init__(self, remove_selectors: Optional[List[str]] = None):
"""Initialize UnstructuredHtmlEvaluator."""
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.remove_selectors = remove_selectors
def evaluate(self, page: "Page", browser: "Browser", response: "Response") -> str:
"""Synchronously process the HTML content of the page."""
from unstructured.partition.html import partition_html
for selector in self.remove_selectors or []:
elements = page.locator(selector).all()
for element in elements:
if element.is_visible():
element.evaluate("element => element.remove()")
page_source = page.content()
elements = partition_html(text=page_source)
return "\n\n".join([str(el) for el in elements])
async def evaluate_async(
self, page: "AsyncPage", browser: "AsyncBrowser", response: "AsyncResponse"
) -> str:
"""Asynchronously process the HTML content of the page."""
from unstructured.partition.html import partition_html
for selector in self.remove_selectors or []:
elements = await page.locator(selector).all()
for element in elements:
if await element.is_visible():
await element.evaluate("element => element.remove()")
page_source = await page.content()
elements = partition_html(text=page_source)
return "\n\n".join([str(el) for el in elements])
class PlaywrightURLLoader(BaseLoader):
"""Load `HTML` pages with `Playwright` and parse with `Unstructured`.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
headless (bool): If True, the browser will run in headless mode.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
headless: bool = True,
remove_selectors: Optional[List[str]] = None,
evaluator: Optional[PlaywrightEvaluator] = None,
):
"""Load a list of URLs using Playwright."""
try:
import playwright # noqa:F401
except ImportError:
raise ImportError(
"playwright package not found, please install it with "
"`pip install playwright`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headless = headless
if remove_selectors and evaluator:
raise ValueError(
"`remove_selectors` and `evaluator` cannot be both not None"
)
# Use the provided evaluator, if any, otherwise, use the default.
self.evaluator = evaluator or UnstructuredHtmlEvaluator(remove_selectors)
def load(self) -> List[Document]:
"""Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.sync_api import sync_playwright
docs: List[Document] = list()
with sync_playwright() as p:
browser = p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = browser.new_page()
response = page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = self.evaluator.evaluate(page, browser, response)
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
browser.close()
return docs
async def aload(self) -> List[Document]:
"""Load the specified URLs with Playwright and create Documents asynchronously.
Use this function when in a jupyter notebook environment.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.async_api import async_playwright
docs: List[Document] = list()
async with async_playwright() as p:
browser = await p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = await browser.new_page()
response = await page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = await self.evaluator.evaluate_async(page, browser, response)
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
await browser.close()
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~text_moderation.py | from __future__ import annotations
import logging
from typing import Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiTextModerationTool(EdenaiTool):
"""Tool that queries the Eden AI Explicit text detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_explicit_content_detection_text"
description = (
"A wrapper around edenai Services explicit content detection for text. "
"""Useful for when you have to scan text for offensive,
sexually explicit or suggestive content,
it checks also if there is any content of self-harm,
violence, racist or hate speech."""
"""the structure of the output is :
'the type of the explicit content : the likelihood of it being explicit'
the likelihood is a number
between 1 and 5, 1 being the lowest and 5 the highest.
something is explicit if the likelihood is equal or higher than 3.
for example :
nsfw_likelihood: 1
this is not explicit.
for example :
nsfw_likelihood: 3
this is explicit.
"""
"Input should be a string."
)
language: str
feature: str = "text"
subfeature: str = "moderation"
def _parse_response(self, response: list) -> str:
formatted_result = []
for result in response:
if "nsfw_likelihood" in result.keys():
formatted_result.append(
"nsfw_likelihood: " + str(result["nsfw_likelihood"])
)
for label, likelihood in zip(result["label"], result["likelihood"]):
formatted_result.append(f'"{label}": {str(likelihood)}')
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"text": query, "language": self.language}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~tair.py | from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _uuid_key() -> str:
return uuid.uuid4().hex
class Tair(VectorStore):
"""`Tair` vector store."""
def __init__(
self,
embedding_function: Embeddings,
url: str,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
search_params: Optional[dict] = None,
**kwargs: Any,
):
self.embedding_function = embedding_function
self.index_name = index_name
try:
from tair import Tair as TairClient
except ImportError:
raise ImportError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
try:
# connect to tair from url
client = TairClient.from_url(url, **kwargs)
except ValueError as e:
raise ValueError(f"Tair failed to connect: {e}")
self.client = client
self.content_key = content_key
self.metadata_key = metadata_key
self.search_params = search_params
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def create_index_if_not_exist(
self,
dim: int,
distance_type: str,
index_type: str,
data_type: str,
**kwargs: Any,
) -> bool:
index = self.client.tvs_get_index(self.index_name)
if index is not None:
logger.info("Index already exists")
return False
self.client.tvs_create_index(
self.index_name,
dim,
distance_type,
index_type,
data_type,
**kwargs,
)
return True
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
ids = []
keys = kwargs.get("keys", None)
use_hybrid_search = False
index = self.client.tvs_get_index(self.index_name)
if index is not None and index.get("lexical_algorithm") == "bm25":
use_hybrid_search = True
# Write data to tair
pipeline = self.client.pipeline(transaction=False)
embeddings = self.embedding_function.embed_documents(list(texts))
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else _uuid_key()
metadata = metadatas[i] if metadatas else {}
if use_hybrid_search:
# tair use TEXT attr hybrid search
pipeline.tvs_hset(
self.index_name,
key,
embeddings[i],
False,
**{
"TEXT": text,
self.content_key: text,
self.metadata_key: json.dumps(metadata),
},
)
else:
pipeline.tvs_hset(
self.index_name,
key,
embeddings[i],
False,
**{
self.content_key: text,
self.metadata_key: json.dumps(metadata),
},
)
ids.append(key)
pipeline.execute()
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
# Creates embedding vector from user query
embedding = self.embedding_function.embed_query(query)
keys_and_scores = self.client.tvs_knnsearch(
self.index_name, k, embedding, False, None, **kwargs
)
pipeline = self.client.pipeline(transaction=False)
for key, _ in keys_and_scores:
pipeline.tvs_hmget(
self.index_name, key, self.metadata_key, self.content_key
)
docs = pipeline.execute()
return [
Document(
page_content=d[1],
metadata=json.loads(d[0]),
)
for d in docs
]
@classmethod
def from_texts(
cls: Type[Tair],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
try:
from tair import tairvector
except ImportError:
raise ValueError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
if "tair_url" in kwargs:
kwargs.pop("tair_url")
distance_type = tairvector.DistanceMetric.InnerProduct
if "distance_type" in kwargs:
distance_type = kwargs.pop("distance_type")
index_type = tairvector.IndexType.HNSW
if "index_type" in kwargs:
index_type = kwargs.pop("index_type")
data_type = tairvector.DataType.Float32
if "data_type" in kwargs:
data_type = kwargs.pop("data_type")
index_params = {}
if "index_params" in kwargs:
index_params = kwargs.pop("index_params")
search_params = {}
if "search_params" in kwargs:
search_params = kwargs.pop("search_params")
keys = None
if "keys" in kwargs:
keys = kwargs.pop("keys")
try:
tair_vector_store = cls(
embedding,
url,
index_name,
content_key=content_key,
metadata_key=metadata_key,
search_params=search_params,
**kwargs,
)
except ValueError as e:
raise ValueError(f"tair failed to connect: {e}")
# Create embeddings for documents
embeddings = embedding.embed_documents(texts)
tair_vector_store.create_index_if_not_exist(
len(embeddings[0]),
distance_type,
index_type,
data_type,
**index_params,
)
tair_vector_store.add_texts(texts, metadatas, keys=keys)
return tair_vector_store
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts, embedding, metadatas, index_name, content_key, metadata_key, **kwargs
)
@staticmethod
def drop_index(
index_name: str = "langchain",
**kwargs: Any,
) -> bool:
"""
Drop an existing index.
Args:
index_name (str): Name of the index to drop.
Returns:
bool: True if the index is dropped successfully.
"""
try:
from tair import Tair as TairClient
except ImportError:
raise ValueError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
try:
if "tair_url" in kwargs:
kwargs.pop("tair_url")
client = TairClient.from_url(url=url, **kwargs)
except ValueError as e:
raise ValueError(f"Tair connection error: {e}")
# delete index
ret = client.tvs_del_index(index_name)
if ret == 0:
# index not exist
logger.info("Index does not exist")
return False
return True
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
"""Connect to an existing Tair index."""
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
search_params = {}
if "search_params" in kwargs:
search_params = kwargs.pop("search_params")
return cls(
embedding,
url,
index_name,
content_key=content_key,
metadata_key=metadata_key,
search_params=search_params,
**kwargs,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~dataforseo_api_search.py | import base64
from typing import Dict, Optional
from urllib.parse import quote
import aiohttp
import requests
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
class DataForSeoAPIWrapper(BaseModel):
"""Wrapper around the DataForSeo API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
default_params: dict = Field(
default={
"location_name": "United States",
"language_code": "en",
"depth": 10,
"se_name": "google",
"se_type": "organic",
}
)
"""Default parameters to use for the DataForSEO SERP API."""
params: dict = Field(default={})
"""Additional parameters to pass to the DataForSEO SERP API."""
api_login: Optional[str] = None
"""The API login to use for the DataForSEO SERP API."""
api_password: Optional[str] = None
"""The API password to use for the DataForSEO SERP API."""
json_result_types: Optional[list] = None
"""The JSON result types."""
json_result_fields: Optional[list] = None
"""The JSON result fields."""
top_count: Optional[int] = None
"""The number of top results to return."""
aiosession: Optional[aiohttp.ClientSession] = None
"""The aiohttp session to use for the DataForSEO SERP API."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that login and password exists in environment."""
login = get_from_dict_or_env(values, "api_login", "DATAFORSEO_LOGIN")
password = get_from_dict_or_env(values, "api_password", "DATAFORSEO_PASSWORD")
values["api_login"] = login
values["api_password"] = password
return values
async def arun(self, url: str) -> str:
"""Run request to DataForSEO SERP API and parse result async."""
return self._process_response(await self._aresponse_json(url))
def run(self, url: str) -> str:
"""Run request to DataForSEO SERP API and parse result async."""
return self._process_response(self._response_json(url))
def results(self, url: str) -> list:
res = self._response_json(url)
return self._filter_results(res)
async def aresults(self, url: str) -> list:
res = await self._aresponse_json(url)
return self._filter_results(res)
def _prepare_request(self, keyword: str) -> dict:
"""Prepare the request details for the DataForSEO SERP API."""
if self.api_login is None or self.api_password is None:
raise ValueError("api_login or api_password is not provided")
cred = base64.b64encode(
f"{self.api_login}:{self.api_password}".encode("utf-8")
).decode("utf-8")
headers = {"Authorization": f"Basic {cred}", "Content-Type": "application/json"}
obj = {"keyword": quote(keyword)}
obj = {**obj, **self.default_params, **self.params}
data = [obj]
_url = (
f"https://api.dataforseo.com/v3/serp/{obj['se_name']}"
f"/{obj['se_type']}/live/advanced"
)
return {
"url": _url,
"headers": headers,
"data": data,
}
def _check_response(self, response: dict) -> dict:
"""Check the response from the DataForSEO SERP API for errors."""
if response.get("status_code") != 20000:
raise ValueError(
f"Got error from DataForSEO SERP API: {response.get('status_message')}"
)
return response
def _response_json(self, url: str) -> dict:
"""Use requests to run request to DataForSEO SERP API and return results."""
request_details = self._prepare_request(url)
response = requests.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
)
response.raise_for_status()
return self._check_response(response.json())
async def _aresponse_json(self, url: str) -> dict:
"""Use aiohttp to request DataForSEO SERP API and return results async."""
request_details = self._prepare_request(url)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
) as response:
res = await response.json()
else:
async with self.aiosession.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
) as response:
res = await response.json()
return self._check_response(res)
def _filter_results(self, res: dict) -> list:
output = []
types = self.json_result_types if self.json_result_types is not None else []
for task in res.get("tasks", []):
for result in task.get("result", []):
for item in result.get("items", []):
if len(types) == 0 or item.get("type", "") in types:
self._cleanup_unnecessary_items(item)
if len(item) != 0:
output.append(item)
if self.top_count is not None and len(output) >= self.top_count:
break
return output
def _cleanup_unnecessary_items(self, d: dict) -> dict:
fields = self.json_result_fields if self.json_result_fields is not None else []
if len(fields) > 0:
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
if len(v) == 0:
del d[k]
elif k not in fields:
del d[k]
if "xpath" in d:
del d["xpath"]
if "position" in d:
del d["position"]
if "rectangle" in d:
del d["rectangle"]
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
return d
def _process_response(self, res: dict) -> str:
"""Process response from DataForSEO SERP API."""
toret = "No good search result found"
for task in res.get("tasks", []):
for result in task.get("result", []):
item_types = result.get("item_types")
items = result.get("items", [])
if "answer_box" in item_types:
toret = next(
item for item in items if item.get("type") == "answer_box"
).get("text")
elif "knowledge_graph" in item_types:
toret = next(
item for item in items if item.get("type") == "knowledge_graph"
).get("description")
elif "featured_snippet" in item_types:
toret = next(
item for item in items if item.get("type") == "featured_snippet"
).get("description")
elif "shopping" in item_types:
toret = next(
item for item in items if item.get("type") == "shopping"
).get("price")
elif "organic" in item_types:
toret = next(
item for item in items if item.get("type") == "organic"
).get("description")
if toret:
break
return toret
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~tensorflow_datasets.py | from typing import Callable, Dict, Iterator, List, Optional
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.utilities.tensorflow_datasets import TensorflowDatasets
class TensorflowDatasetLoader(BaseLoader):
"""Load from `TensorFlow Dataset`.
Attributes:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document
Example:
.. code-block:: python
from langchain.document_loaders import TensorflowDatasetLoader
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
tsds_client = TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=100,
sample_to_document_function=mlqaen_example_to_document,
)
"""
def __init__(
self,
dataset_name: str,
split_name: str,
load_max_docs: Optional[int] = 100,
sample_to_document_function: Optional[Callable[[Dict], Document]] = None,
):
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[
Callable[[Dict], Document]
] = sample_to_document_function
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets(
dataset_name=self.dataset_name,
split_name=self.split_name,
load_max_docs=self.load_max_docs,
sample_to_document_function=self.sample_to_document_function,
)
def lazy_load(self) -> Iterator[Document]:
yield from self._tfds_client.lazy_load()
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~parent_document_retriever.py | import uuid
from typing import List, Optional
from langchain.retrievers import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.text_splitter import TextSplitter
class ParentDocumentRetriever(MultiVectorRetriever):
"""Retrieve small chunks then retrieve their parent documents.
When splitting documents for retrieval, there are often conflicting desires:
1. You may want to have small documents, so that their embeddings can most
accurately reflect their meaning. If too long, then the embeddings can
lose meaning.
2. You want to have long enough documents that the context of each chunk is
retained.
The ParentDocumentRetriever strikes that balance by splitting and storing
small chunks of data. During retrieval, it first fetches the small chunks
but then looks up the parent ids for those chunks and returns those larger
documents.
Note that "parent document" refers to the document that a small chunk
originated from. This can either be the whole raw document OR a larger
chunk.
Examples:
.. code-block:: python
# Imports
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
# Initialize the retriever
retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
"""
child_splitter: TextSplitter
"""The text splitter to use to create child documents."""
"""The key to use to track the parent id. This will be stored in the
metadata of child documents."""
parent_splitter: Optional[TextSplitter] = None
"""The text splitter to use to create parent documents.
If none, then the parent documents will be the raw documents passed in."""
def add_documents(
self,
documents: List[Document],
ids: Optional[List[str]] = None,
add_to_docstore: bool = True,
) -> None:
"""Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
if self.parent_splitter is not None:
documents = self.parent_splitter.split_documents(documents)
if ids is None:
doc_ids = [str(uuid.uuid4()) for _ in documents]
if not add_to_docstore:
raise ValueError(
"If ids are not passed in, `add_to_docstore` MUST be True"
)
else:
if len(documents) != len(ids):
raise ValueError(
"Got uneven list of documents and ids. "
"If `ids` is provided, should be same length as `documents`."
)
doc_ids = ids
docs = []
full_docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = self.child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[self.id_key] = _id
docs.extend(sub_docs)
full_docs.append((_id, doc))
self.vectorstore.add_documents(docs)
if add_to_docstore:
self.docstore.mset(full_docs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~retry.py | from __future__ import annotations
from typing import Any, TypeVar
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseOutputParser,
BasePromptTemplate,
OutputParserException,
PromptValue,
)
from langchain.schema.language_model import BaseLanguageModel
NAIVE_COMPLETION_RETRY = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Please try again:"""
NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Details: {error}
Please try again:"""
NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY)
NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template(
NAIVE_COMPLETION_RETRY_WITH_ERROR
)
T = TypeVar("T")
class RetryOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt and the completion to another
LLM, and telling it the completion did not satisfy criteria in the prompt.
"""
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT,
max_retries: int = 1,
) -> RetryOutputParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
RetryOutputParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
prompt=prompt_value.to_string(), completion=completion
)
raise OutputParserException("Failed to parse")
async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
prompt=prompt_value.to_string(), completion=completion
)
raise OutputParserException("Failed to parse")
def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry"
class RetryWithErrorOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt, the completion, AND the error
that was raised to another language model and telling it that the completion
did not work, and raised the given error. Differs from RetryOutputParser
in that this implementation provides the error that was raised back to the
LLM, which in theory should give it more information on how to fix it.
"""
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT,
max_retries: int = 1,
) -> RetryWithErrorOutputParser[T]:
"""Create a RetryWithErrorOutputParser from an LLM.
Args:
llm: The LLM to use to retry the completion.
parser: The parser to use to parse the output.
prompt: The prompt to use to retry the completion.
max_retries: The maximum number of times to retry the completion.
Returns:
A RetryWithErrorOutputParser.
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
prompt=prompt_value.to_string(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
prompt=prompt_value.to_string(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry_with_error"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~openapi~utils~openapi_utils.py | """Utility functions for parsing an OpenAPI spec. Kept for backwards compat."""
from langchain.utilities.openapi import HTTPVerb, OpenAPISpec
__all__ = ["HTTPVerb", "OpenAPISpec"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~baiducloud_bos_directory.py | from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BaiduBOSDirectoryLoader(BaseLoader):
"""Load from `Baidu BOS directory`."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError(
"Please install bce-python-sdk with `pip install bce-python-sdk`."
)
client = BosClient(self.conf)
contents = []
marker = ""
while True:
response = client.list_objects(
bucket_name=self.bucket,
prefix=self.prefix,
marker=marker,
max_keys=1000,
)
contents_len = len(response.contents)
contents.extend(response.contents)
if response.is_truncated or contents_len < int(str(response.max_keys)):
break
marker = response.next_marker
from baidu_bos_file import BaiduBOSFileLoader
for content in contents:
if str(content.key).endswith("/"):
continue
loader = BaiduBOSFileLoader(self.conf, self.bucket, str(content.key))
yield loader.load()[0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~llmonitor_callback.py | import importlib.metadata
import logging
import os
import traceback
import warnings
from contextvars import ContextVar
from typing import Any, Dict, List, Literal, Union
from uuid import UUID
import requests
from packaging.version import parse
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema.agent import AgentAction, AgentFinish
from langchain.schema.messages import BaseMessage
from langchain.schema.output import LLMResult
DEFAULT_API_URL = "https://app.llmonitor.com"
user_ctx = ContextVar[Union[str, None]]("user_ctx", default=None)
user_props_ctx = ContextVar[Union[str, None]]("user_props_ctx", default=None)
class UserContextManager:
"""Context manager for LLMonitor user context."""
def __init__(self, user_id: str, user_props: Any = None) -> None:
user_ctx.set(user_id)
user_props_ctx.set(user_props)
def __enter__(self) -> Any:
pass
def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> Any:
user_ctx.set(None)
user_props_ctx.set(None)
def identify(user_id: str, user_props: Any = None) -> UserContextManager:
"""Builds an LLMonitor UserContextManager
Parameters:
- `user_id`: The user id.
- `user_props`: The user properties.
Returns:
A context manager that sets the user context.
"""
return UserContextManager(user_id, user_props)
def _serialize(obj: Any) -> Union[Dict[str, Any], List[Any], Any]:
if hasattr(obj, "to_json"):
return obj.to_json()
if isinstance(obj, dict):
return {key: _serialize(value) for key, value in obj.items()}
if isinstance(obj, list):
return [_serialize(element) for element in obj]
return obj
def _parse_input(raw_input: Any) -> Any:
if not raw_input:
return None
if not isinstance(raw_input, dict):
return _serialize(raw_input)
input_value = raw_input.get("input")
inputs_value = raw_input.get("inputs")
question_value = raw_input.get("question")
query_value = raw_input.get("query")
if input_value:
return input_value
if inputs_value:
return inputs_value
if question_value:
return question_value
if query_value:
return query_value
return _serialize(raw_input)
def _parse_output(raw_output: dict) -> Any:
if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get("text")
output_value = raw_output.get("output")
output_text_value = raw_output.get("output_text")
answer_value = raw_output.get("answer")
result_value = raw_output.get("result")
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output)
def _parse_lc_role(
role: str,
) -> Union[Literal["user", "ai", "system", "function"], None]:
if role == "human":
return "user"
elif role == "ai":
return "ai"
elif role == "system":
return "system"
elif role == "function":
return "function"
else:
return None
def _get_user_id(metadata: Any) -> Any:
if user_ctx.get() is not None:
return user_ctx.get()
metadata = metadata or {}
user_id = metadata.get("user_id")
if user_id is None:
user_id = metadata.get("userId") # legacy, to delete in the future
return user_id
def _get_user_props(metadata: Any) -> Any:
if user_props_ctx.get() is not None:
return user_props_ctx.get()
metadata = metadata or {}
return metadata.get("user_props", None)
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
parsed = {"text": message.content, "role": _parse_lc_role(message.type)}
function_call = (message.additional_kwargs or {}).get("function_call")
if function_call is not None:
parsed["functionCall"] = function_call
return parsed
def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) -> List[Dict[str, Any]]:
return [_parse_lc_message(message) for message in messages]
class LLMonitorCallbackHandler(BaseCallbackHandler):
"""Callback Handler for LLMonitor`.
#### Parameters:
- `app_id`: The app id of the app you want to report to. Defaults to
`None`, which means that `LLMONITOR_APP_ID` will be used.
- `api_url`: The url of the LLMonitor API. Defaults to `None`,
which means that either `LLMONITOR_API_URL` environment variable
or `https://app.llmonitor.com` will be used.
#### Raises:
- `ValueError`: if `app_id` is not provided either as an
argument or as an environment variable.
- `ConnectionError`: if the connection to the API fails.
#### Example:
```python
from langchain.llms import OpenAI
from langchain.callbacks import LLMonitorCallbackHandler
llmonitor_callback = LLMonitorCallbackHandler()
llm = OpenAI(callbacks=[llmonitor_callback],
metadata={"userId": "user-123"})
llm.predict("Hello, how are you?")
```
"""
__api_url: str
__app_id: str
__verbose: bool
__llmonitor_version: str
__has_valid_config: bool
def __init__(
self,
app_id: Union[str, None] = None,
api_url: Union[str, None] = None,
verbose: bool = False,
) -> None:
super().__init__()
self.__has_valid_config = True
try:
import llmonitor
self.__llmonitor_version = importlib.metadata.version("llmonitor")
self.__track_event = llmonitor.track_event
except ImportError:
warnings.warn(
"""[LLMonitor] To use the LLMonitor callback handler you need to
have the `llmonitor` Python package installed. Please install it
with `pip install llmonitor`"""
)
self.__has_valid_config = False
if parse(self.__llmonitor_version) < parse("0.0.20"):
warnings.warn(
f"""[LLMonitor] The installed `llmonitor` version is
{self.__llmonitor_version} but `LLMonitorCallbackHandler` requires
at least version 0.0.20 upgrade `llmonitor` with `pip install
--upgrade llmonitor`"""
)
self.__has_valid_config = False
self.__has_valid_config = True
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
if _app_id is None:
warnings.warn(
"""[LLMonitor] app_id must be provided either as an argument or as
an environment variable"""
)
self.__has_valid_config = False
else:
self.__app_id = _app_id
if self.__has_valid_config is False:
return None
try:
res = requests.get(f"{self.__api_url}/api/app/{self.__app_id}")
if not res.ok:
raise ConnectionError()
except Exception:
warnings.warn(
f"""[LLMonitor] Could not connect to the LLMonitor API at
{self.__api_url}"""
)
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
name = kwargs.get("invocation_params", {}).get("model_name")
input = _parse_input(prompts)
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
metadata=metadata,
user_props=user_props,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_llm_start: {e}")
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
name = kwargs.get("invocation_params", {}).get("model_name")
input = _parse_lc_messages(messages[0])
self.__track_event(
"llm",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
metadata=metadata,
user_props=user_props,
)
except Exception as e:
logging.warning(
f"[LLMonitor] An error occurred in on_chat_model_start: {e}"
)
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
token_usage = (response.llm_output or {}).get("token_usage", {})
parsed_output = [
{
"text": generation.text,
"role": "ai",
**(
{
"functionCall": generation.message.additional_kwargs[
"function_call"
]
}
if hasattr(generation, "message")
and hasattr(generation.message, "additional_kwargs")
and "function_call" in generation.message.additional_kwargs
else {}
),
}
for generation in response.generations[0]
]
self.__track_event(
"llm",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=parsed_output,
token_usage={
"prompt": token_usage.get("prompt_tokens"),
"completion": token_usage.get("completion_tokens"),
},
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_llm_end: {e}")
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
name = serialized.get("name")
self.__track_event(
"tool",
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input_str,
tags=tags,
metadata=metadata,
user_props=user_props,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_tool_start: {e}")
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
**kwargs: Any,
) -> None:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_tool_end: {e}")
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = serialized.get("id", [None, None, None, None])[3]
type = "chain"
metadata = metadata or {}
agentName = metadata.get("agent_name")
if agentName is None:
agentName = metadata.get("agentName")
if name == "AgentExecutor" or name == "PlanAndExecute":
type = "agent"
if agentName is not None:
type = "agent"
name = agentName
if parent_run_id is not None:
type = "chain"
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
input = _parse_input(inputs)
self.__track_event(
type,
"start",
user_id=user_id,
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
tags=tags,
metadata=metadata,
user_props=user_props,
)
except Exception as e:
warnings.warn(f"[LLMonitor] An error occurred in on_chain_start: {e}")
def on_chain_end(
self,
outputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(outputs)
self.__track_event(
"chain",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_chain_end: {e}")
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
name = action.tool
input = _parse_input(action.tool_input)
self.__track_event(
"tool",
"start",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name,
input=input,
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_agent_action: {e}")
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(finish.return_values)
self.__track_event(
"agent",
"end",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output,
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_agent_finish: {e}")
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"chain",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_chain_error: {e}")
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"tool",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_tool_error: {e}")
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
if self.__has_valid_config is False:
return
try:
self.__track_event(
"llm",
"error",
run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={"message": str(error), "stack": traceback.format_exc()},
)
except Exception as e:
logging.warning(f"[LLMonitor] An error occurred in on_llm_error: {e}")
__all__ = ["LLMonitorCallbackHandler", "identify"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~confluence.py | import logging
from enum import Enum
from io import BytesIO
from typing import Any, Callable, Dict, List, Optional, Union
import requests
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ContentFormat(str, Enum):
"""Enumerator of the content formats of Confluence page."""
EDITOR = "body.editor"
EXPORT_VIEW = "body.export_view"
ANONYMOUS_EXPORT_VIEW = "body.anonymous_export_view"
STORAGE = "body.storage"
VIEW = "body.view"
def get_content(self, page: dict) -> str:
return page["body"][self.name.lower()]["value"]
class ConfluenceLoader(BaseLoader):
"""Load `Confluence` pages.
Port of https://llamahub.ai/l/confluence
This currently supports username/api_key, Oauth2 login or personal access token
authentication.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceReader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Confluence API supports difference format of page content. The storage format is the
raw XML representation for storage. The view format is the HTML representation for
viewing with macros are rendered as though it is viewed by users. You can pass
a enum `content_format` argument to `load()` to specify the content format, this is
set to `ContentFormat.STORAGE` by default, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`, `ContentFormat.STORAGE`,
and `ContentFormat.VIEW`.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain.document_loaders import ConfluenceLoader
loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345"
)
documents = loader.load(space_key="SPACE",limit=50)
# Server on perm
loader = ConfluenceLoader(
url="https://confluence.yoursite.com/",
username="me",
api_key="your_password",
cloud=False
)
documents = loader.load(space_key="SPACE",limit=50)
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param token: _description_, defaults to None
:type token: str, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(
url=url,
api_key=api_key,
username=username,
session=session,
oauth2=oauth2,
token=token,
)
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
try:
from atlassian import Confluence # noqa: F401
except ImportError:
raise ImportError(
"`atlassian` package not found, please run "
"`pip install atlassian-python-api`"
)
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
if session:
self.confluence = Confluence(url=url, session=session, **confluence_kwargs)
elif oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
elif token:
self.confluence = Confluence(
url=url, token=token, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
@staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided, "
"the other must be as well."
)
non_null_creds = list(
x is not None for x in ((api_key or username), session, oauth2, token)
)
if sum(non_null_creds) > 1:
all_names = ("(api_key, username)", "session", "oath2", "token")
provided = tuple(n for x, n in zip(non_null_creds, all_names) if x)
errors.append(
f"Cannot provide a value for more than one of: {all_names}. Received "
f"values for: {provided}"
)
if oauth2 and set(oauth2.keys()) != {
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
}:
errors.append(
"You have either omitted require keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`"
)
return errors or None
def load(
self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_restricted_content: bool = False,
include_archived_content: bool = False,
include_attachments: bool = False,
include_comments: bool = False,
content_format: ContentFormat = ContentFormat.STORAGE,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
ocr_languages: Optional[str] = None,
keep_markdown_format: bool = False,
keep_newlines: bool = False,
) -> List[Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`, "
"`label`, `cql` parameters."
)
docs = []
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
status="any" if include_archived_content else "current",
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
max_pages=max_pages,
)
ids_by_label = [page["id"] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(
self._search_content_by_cql,
cql=cql,
limit=limit,
max_pages=max_pages,
include_archived_spaces=include_archived_content,
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(
page_id=page_id, expand=f"{content_format.value},version"
)
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
)
docs.append(doc)
return docs
def _search_content_by_cql(
self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any
) -> List[dict]:
url = "rest/api/content/search"
params: Dict[str, Any] = {"cql": cql}
params.update(kwargs)
if include_archived_spaces is not None:
params["includeArchivedSpaces"] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get("results", [])
def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the result key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
def is_public_page(self, page: dict) -> bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page["id"])
return (
page["status"] == "current"
and not restrictions["read"]["restrictions"]["user"]["results"]
and not restrictions["read"]["restrictions"]["group"]["results"]
)
def process_pages(
self,
pages: List[dict],
include_restricted_content: bool,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> List[Document]:
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
docs.append(doc)
return docs
def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> Document:
if keep_markdown_format:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
"`markdownify` package not found, please run "
"`pip install markdownify`"
)
if include_comments or not keep_markdown_format:
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run "
"`pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
if keep_markdown_format:
# Use markdownify to keep the page Markdown style
text = markdownify(content, heading_style="ATX") + "".join(attachment_texts)
else:
if keep_newlines:
text = BeautifulSoup(
content.replace("</p>", "\n</p>").replace("<br />", "\n"), "lxml"
).get_text(" ") + "".join(attachment_texts)
else:
text = BeautifulSoup(content, "lxml").get_text(
" ", strip=True
) + "".join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
" ", strip=True
)
for comment in comments
]
text = text + "".join(comment_texts)
metadata = {
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
}
if "version" in page and "when" in page["version"]:
metadata["when"] = page["version"]["when"]
return Document(
page_content=text,
metadata=metadata,
)
def process_attachment(
self,
page_id: str,
ocr_languages: Optional[str] = None,
) -> List[str]:
try:
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`Pillow` package not found, " "please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
try:
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url, ocr_languages)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url, ocr_languages)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f"Attachment not found at {absolute_url}")
continue
else:
raise
return texts
def process_pdf(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from pdf2image import convert_from_bytes # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, "
"please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image, lang=ocr_languages)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
def process_image(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, "
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
def process_doc(self, link: str) -> str:
try:
import docx2txt # noqa: F401
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
def process_xls(self, link: str) -> str:
import io
import os
try:
import xlrd # noqa: F401
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
try:
import pandas as pd
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
filename = os.path.basename(link)
# Getting the whole content of the url after filename,
# Example: ".csv?version=2&modificationDate=1631800010678&cacheVersion=1&api=v2"
file_extension = os.path.splitext(filename)[1]
if file_extension.startswith(
".csv"
): # if the extension found in the url is ".csv"
content_string = response.content.decode("utf-8")
df = pd.read_csv(io.StringIO(content_string))
text += df.to_string(index=False, header=False) + "\n\n"
else:
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n"
text += "\n"
return text
def process_svg(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
from reportlab.graphics import renderPM # noqa: F401
from svglib.svglib import svg2rlg # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, "
"please run `pip install pytesseract Pillow reportlab svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~storage~encoder_backed.py | from typing import (
Any,
Callable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
from langchain.schema import BaseStore
K = TypeVar("K")
V = TypeVar("V")
class EncoderBackedStore(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
.. code-block:: python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an EncodedStore."""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> List[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
def yield_keys(
self, *, prefix: Optional[str] = None
) -> Union[Iterator[K], Iterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~llms~llamaapi.py | import json
import logging
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Tuple,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
logger = logging.getLogger(__name__)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content") or ""
if _dict.get("function_call"):
_dict["function_call"]["arguments"] = json.dumps(
_dict["function_call"]["arguments"], ensure_ascii=False
)
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatLlamaAPI(BaseChatModel):
client: Any #: :meta private:
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
_params = {"messages": message_dicts}
final_params = {**params, **kwargs, **_params}
response = self.client.run(final_params).json()
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._client_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
return ChatResult(generations=generations)
@property
def _client_params(self) -> Mapping[str, Any]:
"""Get the parameters used for the client."""
return {}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "llama-api"
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~email.py | import os
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
class UnstructuredEmailLoader(UnstructuredFileLoader):
"""Load email files using `Unstructured`.
Works with both
.eml and .msg files. You can process attachments in addition to the
e-mail message itself by passing process_attachments=True into the
constructor for the loader. By default, attachments will be processed
with the unstructured partition function. If you already know the document
types of the attachments, you can specify another partitioning function
with the attachment partitioner kwarg.
Example
-------
from langchain.document_loaders import UnstructuredEmailLoader
loader = UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements")
loader.load()
Example
-------
from langchain.document_loaders import UnstructuredEmailLoader
loader = UnstructuredEmailLoader(
"example_data/fake-email-attachment.eml",
mode="elements",
process_attachments=True,
)
loader.load()
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
process_attachments = unstructured_kwargs.get("process_attachments")
attachment_partitioner = unstructured_kwargs.get("attachment_partitioner")
if process_attachments and attachment_partitioner is None:
from unstructured.partition.auto import partition
unstructured_kwargs["attachment_partitioner"] = partition
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype
filetype = detect_filetype(self.file_path)
if filetype == FileType.EML:
from unstructured.partition.email import partition_email
return partition_email(filename=self.file_path, **self.unstructured_kwargs)
elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG:
from unstructured.partition.msg import partition_msg
return partition_msg(filename=self.file_path, **self.unstructured_kwargs)
else:
raise ValueError(
f"Filetype {filetype} is not supported in UnstructuredEmailLoader."
)
class OutlookMessageLoader(BaseLoader):
"""
Loads Outlook Message files using extract_msg.
https://github.com/TeamMsgExtractor/msg-extractor
"""
def __init__(self, file_path: str):
"""Initialize with a file path.
Args:
file_path: The path to the Outlook Message file.
"""
self.file_path = file_path
if not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file" % self.file_path)
try:
import extract_msg # noqa:F401
except ImportError:
raise ImportError(
"extract_msg is not installed. Please install it with "
"`pip install extract_msg`"
)
def load(self) -> List[Document]:
"""Load data into document objects."""
import extract_msg
msg = extract_msg.Message(self.file_path)
return [
Document(
page_content=msg.body,
metadata={
"source": self.file_path,
"subject": msg.subject,
"sender": msg.sender,
"date": msg.date,
},
)
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~ainetwork~owner.py | import builtins
import json
from typing import List, Optional, Type, Union
from langchain.callbacks.manager import AsyncCallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.ainetwork.base import AINBaseTool, OperationType
class RuleSchema(BaseModel):
"""Schema for owner operations."""
type: OperationType = Field(...)
path: str = Field(..., description="Blockchain reference path")
address: Optional[Union[str, List[str]]] = Field(
None, description="A single address or a list of addresses"
)
write_owner: Optional[bool] = Field(
False, description="Authority to edit the `owner` property of the path"
)
write_rule: Optional[bool] = Field(
False, description="Authority to edit `write rule` for the path"
)
write_function: Optional[bool] = Field(
False, description="Authority to `set function` for the path"
)
branch_owner: Optional[bool] = Field(
False, description="Authority to initialize `owner` of sub-paths"
)
class AINOwnerOps(AINBaseTool):
"""Tool for owner operations."""
name: str = "AINownerOps"
description: str = """
Rules for `owner` in AINetwork Blockchain database.
An address set as `owner` can modify permissions according to its granted authorities
## Path Rule
- (/[a-zA-Z_0-9]+)+
- Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located.
## Address Rules
- 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address
- *: All addresses permitted
- Defaults to the current session's address
## SET
- `SET` alters permissions for specific addresses, while other addresses remain unaffected.
- When removing an address of `owner`, set all authorities for that address to false.
- message `write_owner permission evaluated false` if fail
### Example
- type: SET
- path: /apps/langchain
- address: [<address 1>, <address 2>]
- write_owner: True
- write_rule: True
- write_function: True
- branch_owner: True
## GET
- Provides all addresses with `owner` permissions and their authorities in the path.
### Example
- type: GET
- path: /apps/langchain
""" # noqa: E501
args_schema: Type[BaseModel] = RuleSchema
async def _arun(
self,
type: OperationType,
path: str,
address: Optional[Union[str, List[str]]] = None,
write_owner: Optional[bool] = None,
write_rule: Optional[bool] = None,
write_function: Optional[bool] = None,
branch_owner: Optional[bool] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
from ain.types import ValueOnlyTransactionInput
try:
if type is OperationType.SET:
if address is None:
address = self.interface.wallet.defaultAccount.address
if isinstance(address, str):
address = [address]
res = await self.interface.db.ref(path).setOwner(
transactionInput=ValueOnlyTransactionInput(
value={
".owner": {
"owners": {
address: {
"write_owner": write_owner or False,
"write_rule": write_rule or False,
"write_function": write_function or False,
"branch_owner": branch_owner or False,
}
for address in address
}
}
}
)
)
elif type is OperationType.GET:
res = await self.interface.db.ref(path).getOwner()
else:
raise ValueError(f"Unsupported 'type': {type}.")
return json.dumps(res, ensure_ascii=False)
except Exception as e:
return f"{builtins.type(e).__name__}: {str(e)}"
| [
"\nRules for `owner` in AINetwork Blockchain database.\nAn address set as `owner` can modify permissions according to its granted authorities\n\n## Path Rule\n- (/[a-zA-Z_0-9]+)+\n- Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located.\n\n## Address Rules\n- 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address\n- *: All addresses permitted\n- Defaults to the current session's address\n\n## SET\n- `SET` alters permissions for specific addresses, while other addresses remain unaffected.\n- When removing an address of `owner`, set all authorities for that address to false.\n- message `write_owner permission evaluated false` if fail\n\n### Example\n- type: SET\n- path: /apps/langchain\n- address: [<address 1>, <address 2>]\n- write_owner: True\n- write_rule: True\n- write_function: True\n- branch_owner: True\n\n## GET\n- Provides all addresses with `owner` permissions and their authorities in the path.\n\n### Example\n- type: GET\n- path: /apps/langchain\n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~combining.py | from __future__ import annotations
from typing import Any, Dict, List
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseOutputParser
class CombiningOutputParser(BaseOutputParser):
"""Combine multiple output parsers into one."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parsers: List[BaseOutputParser]
@root_validator()
def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate the parsers."""
parsers = values["parsers"]
if len(parsers) < 2:
raise ValueError("Must have at least two parsers")
for parser in parsers:
if parser._type == "combining":
raise ValueError("Cannot nest combining parsers")
if parser._type == "list":
raise ValueError("Cannot combine list parsers")
return values
@property
def _type(self) -> str:
"""Return the type key."""
return "combining"
def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
initial = f"For your first output: {self.parsers[0].get_format_instructions()}"
subsequent = "\n".join(
f"Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}" # noqa: E501
for p in self.parsers[1:]
)
return f"{initial}\n{subsequent}"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output of an LLM call."""
texts = text.split("\n\n")
output = dict()
for txt, parser in zip(texts, self.parsers):
output.update(parser.parse(txt.strip()))
return output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~max_compute.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.max_compute import MaxComputeAPIWrapper
class MaxComputeLoader(BaseLoader):
"""Load from `Alibaba Cloud MaxCompute` table."""
def __init__(
self,
query: str,
api_wrapper: MaxComputeAPIWrapper,
*,
page_content_columns: Optional[Sequence[str]] = None,
metadata_columns: Optional[Sequence[str]] = None,
):
"""Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written.
"""
self.query = query
self.api_wrapper = api_wrapper
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
@classmethod
def from_params(
cls,
query: str,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
**kwargs: Any,
) -> MaxComputeLoader:
"""Convenience constructor that builds the MaxCompute API wrapper from
given parameters.
Args:
query: SQL query to execute.
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
api_wrapper = MaxComputeAPIWrapper.from_params(
endpoint, project, access_id=access_id, secret_access_key=secret_access_key
)
return cls(query, api_wrapper, **kwargs)
def lazy_load(self) -> Iterator[Document]:
for row in self.api_wrapper.query(self.query):
if self.page_content_columns:
page_content_data = {
k: v for k, v in row.items() if k in self.page_content_columns
}
else:
page_content_data = row
page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items())
if self.metadata_columns:
metadata = {k: v for k, v in row.items() if k in self.metadata_columns}
else:
metadata = {k: v for k, v in row.items() if k not in page_content_data}
yield Document(page_content=page_content, metadata=metadata)
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~redis~filters.py | from enum import Enum
from functools import wraps
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Union
from langchain.utilities.redis import TokenEscaper
# disable mypy error for dunder method overrides
# mypy: disable-error-code="override"
class RedisFilterOperator(Enum):
"""RedisFilterOperator enumerator is used to create RedisFilterExpressions."""
EQ = 1
NE = 2
LT = 3
GT = 4
LE = 5
GE = 6
OR = 7
AND = 8
LIKE = 9
IN = 10
class RedisFilter:
"""Collection of RedisFilterFields."""
@staticmethod
def text(field: str) -> "RedisText":
return RedisText(field)
@staticmethod
def num(field: str) -> "RedisNum":
return RedisNum(field)
@staticmethod
def tag(field: str) -> "RedisTag":
return RedisTag(field)
class RedisFilterField:
"""Base class for RedisFilterFields."""
escaper: "TokenEscaper" = TokenEscaper()
OPERATORS: Dict[RedisFilterOperator, str] = {}
def __init__(self, field: str):
self._field = field
self._value: Any = None
self._operator: RedisFilterOperator = RedisFilterOperator.EQ
def equals(self, other: "RedisFilterField") -> bool:
if not isinstance(other, type(self)):
return False
return self._field == other._field and self._value == other._value
def _set_value(
self, val: Any, val_type: type, operator: RedisFilterOperator
) -> None:
# check that the operator is supported by this class
if operator not in self.OPERATORS:
raise ValueError(
f"Operator {operator} not supported by {self.__class__.__name__}. "
+ f"Supported operators are {self.OPERATORS.values()}."
)
if not isinstance(val, val_type):
raise TypeError(
f"Right side argument passed to operator {self.OPERATORS[operator]} "
f"with left side "
f"argument {self.__class__.__name__} must be of type {val_type}, "
f"received value {val}"
)
self._value = val
self._operator = operator
def check_operator_misuse(func: Callable) -> Callable:
"""Decorator to check for misuse of equality operators."""
@wraps(func)
def wrapper(instance: Any, *args: Any, **kwargs: Any) -> Any:
# Extracting 'other' from positional arguments or keyword arguments
other = kwargs.get("other") if "other" in kwargs else None
if not other:
for arg in args:
if isinstance(arg, type(instance)):
other = arg
break
if isinstance(other, type(instance)):
raise ValueError(
"Equality operators are overridden for FilterExpression creation. Use "
".equals() for equality checks"
)
return func(instance, *args, **kwargs)
return wrapper
class RedisTag(RedisFilterField):
"""A RedisFilterField representing a tag in a Redis index."""
OPERATORS: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.IN: "==",
}
OPERATOR_MAP: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "@%s:{%s}",
RedisFilterOperator.NE: "(-@%s:{%s})",
RedisFilterOperator.IN: "@%s:{%s}",
}
def __init__(self, field: str):
"""Create a RedisTag FilterField
Args:
field (str): The name of the RedisTag field in the index to be queried
against.
"""
super().__init__(field)
def _set_tag_value(
self, other: Union[List[str], str], operator: RedisFilterOperator
) -> None:
if isinstance(other, list):
if not all(isinstance(tag, str) for tag in other):
raise ValueError("All tags must be strings")
else:
other = [other]
self._set_value(other, list, operator)
@check_operator_misuse
def __eq__(self, other: Union[List[str], str]) -> "RedisFilterExpression":
"""Create a RedisTag equality filter expression
Args:
other (Union[List[str], str]): The tag(s) to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") == "nike"
"""
self._set_tag_value(other, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: Union[List[str], str]) -> "RedisFilterExpression":
"""Create a RedisTag inequality filter expression
Args:
other (Union[List[str], str]): The tag(s) to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") != "nike"
"""
self._set_tag_value(other, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
@property
def _formatted_tag_value(self) -> str:
return "|".join([self.escaper.escape(tag) for tag in self._value])
def __str__(self) -> str:
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
"""Return the Redis Query syntax for a RedisTag filter expression"""
return self.OPERATOR_MAP[self._operator] % (
self._field,
self._formatted_tag_value,
)
class RedisNum(RedisFilterField):
"""A RedisFilterField representing a numeric field in a Redis index."""
OPERATORS: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.LT: "<",
RedisFilterOperator.GT: ">",
RedisFilterOperator.LE: "<=",
RedisFilterOperator.GE: ">=",
}
OPERATOR_MAP: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "@%s:[%f %f]",
RedisFilterOperator.NE: "(-@%s:[%f %f])",
RedisFilterOperator.GT: "@%s:[(%f +inf]",
RedisFilterOperator.LT: "@%s:[-inf (%f]",
RedisFilterOperator.GE: "@%s:[%f +inf]",
RedisFilterOperator.LE: "@%s:[-inf %f]",
}
def __str__(self) -> str:
"""Return the Redis Query syntax for a Numeric filter expression"""
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
if (
self._operator == RedisFilterOperator.EQ
or self._operator == RedisFilterOperator.NE
):
return self.OPERATOR_MAP[self._operator] % (
self._field,
self._value,
self._value,
)
else:
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
@check_operator_misuse
def __eq__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a Numeric equality filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("zipcode") == 90210
"""
self._set_value(other, Number, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a Numeric inequality filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("zipcode") != 90210
"""
self._set_value(other, Number, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
def __gt__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a RedisNumeric greater than filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") > 18
"""
self._set_value(other, Number, RedisFilterOperator.GT)
return RedisFilterExpression(str(self))
def __lt__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a Numeric less than filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") < 18
"""
self._set_value(other, Number, RedisFilterOperator.LT)
return RedisFilterExpression(str(self))
def __ge__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a Numeric greater than or equal to filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") >= 18
"""
self._set_value(other, Number, RedisFilterOperator.GE)
return RedisFilterExpression(str(self))
def __le__(self, other: Union[int, float]) -> "RedisFilterExpression":
"""Create a Numeric less than or equal to filter expression
Args:
other (Number): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") <= 18
"""
self._set_value(other, Number, RedisFilterOperator.LE)
return RedisFilterExpression(str(self))
class RedisText(RedisFilterField):
"""A RedisFilterField representing a text field in a Redis index."""
OPERATORS = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.LIKE: "%",
}
OPERATOR_MAP = {
RedisFilterOperator.EQ: '@%s:"%s"',
RedisFilterOperator.NE: '(-@%s:"%s")',
RedisFilterOperator.LIKE: "@%s:%s",
}
@check_operator_misuse
def __eq__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText equality filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") == "engineer"
"""
self._set_value(other, str, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText inequality filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") != "engineer"
"""
self._set_value(other, str, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
def __mod__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText like filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") % "engineer"
"""
self._set_value(other, str, RedisFilterOperator.LIKE)
return RedisFilterExpression(str(self))
def __str__(self) -> str:
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
try:
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
except KeyError:
raise Exception("Invalid operator")
class RedisFilterExpression:
"""A logical expression of RedisFilterFields.
RedisFilterExpressions can be combined using the & and | operators to create
complex logical expressions that evaluate to the Redis Query language.
This presents an interface by which users can create complex queries
without having to know the Redis Query language.
Filter expressions are not initialized directly. Instead they are built
by combining RedisFilterFields using the & and | operators.
Examples:
>>> from langchain.vectorstores.redis import RedisTag, RedisNum
>>> brand_is_nike = RedisTag("brand") == "nike"
>>> price_is_under_100 = RedisNum("price") < 100
>>> filter = brand_is_nike & price_is_under_100
>>> print(str(filter))
(@brand:{nike} @price:[-inf (100)])
"""
def __init__(
self,
_filter: Optional[str] = None,
operator: Optional[RedisFilterOperator] = None,
left: Optional["RedisFilterExpression"] = None,
right: Optional["RedisFilterExpression"] = None,
):
self._filter = _filter
self._operator = operator
self._left = left
self._right = right
def __and__(self, other: "RedisFilterExpression") -> "RedisFilterExpression":
return RedisFilterExpression(
operator=RedisFilterOperator.AND, left=self, right=other
)
def __or__(self, other: "RedisFilterExpression") -> "RedisFilterExpression":
return RedisFilterExpression(
operator=RedisFilterOperator.OR, left=self, right=other
)
def __str__(self) -> str:
# top level check that allows recursive calls to __str__
if not self._filter and not self._operator:
raise ValueError("Improperly initialized RedisFilterExpression")
# allow for single filter expression without operators as last
# expression in the chain might not have an operator
if self._operator:
operator_str = " | " if self._operator == RedisFilterOperator.OR else " "
return f"({str(self._left)}{operator_str}{str(self._right)})"
# check that base case, the filter is set
if not self._filter:
raise ValueError("Improperly initialized RedisFilterExpression")
return self._filter
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~office365~messages_search.py | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Extra, Field
from langchain.tools.office365.base import O365BaseTool
from langchain.tools.office365.utils import clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default=None,
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:[email protected], bcc:[email protected], body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve very few results, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool):
"""Class for searching email messages in Office 365
Free, but setup is required
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime("%Y-%m-%dT%H:%M:%S%z")
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~wikipedia.py | from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaLoader(BaseLoader):
"""Load from `Wikipedia`.
The hard limit on the number of downloaded Documents is 300 for now.
Each wiki page represents one Document.
"""
def __init__(
self,
query: str,
lang: str = "ru",
load_max_docs: Optional[int] = 100,
load_all_available_meta: Optional[bool] = False,
doc_content_chars_max: Optional[int] = 4000,
):
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
def load(self) -> List[Document]:
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(
lang=self.lang,
top_k_results=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max,
)
docs = client.load(self.query)
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~llms~jsonformer_decoder.py | """Experimental implementation of jsonformer wrapped LLM."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_experimental.pydantic_v1 import Field, root_validator
if TYPE_CHECKING:
import jsonformer
def import_jsonformer() -> jsonformer:
"""Lazily import jsonformer."""
try:
import jsonformer
except ImportError:
raise ImportError(
"Could not import jsonformer python package. "
"Please install it with `pip install jsonformer`."
)
return jsonformer
class JsonFormer(HuggingFacePipeline):
"""Jsonformer wrapped LLM using HuggingFace Pipeline API.
This pipeline is experimental and not yet stable.
"""
json_schema: dict = Field(..., description="The JSON Schema to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."
)
debug: bool = Field(default=False, description="Debug mode.")
# TODO: move away from `root_validator` since it is deprecated in pydantic v2
# and causes mypy type-checking failures (hence the `type: ignore`)
@root_validator # type: ignore[call-overload]
def check_jsonformer_installation(cls, values: dict) -> dict:
import_jsonformer()
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
jsonformer = import_jsonformer()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
model = jsonformer.Jsonformer(
model=pipeline.model,
tokenizer=pipeline.tokenizer,
json_schema=self.json_schema,
prompt=prompt,
max_number_tokens=self.max_new_tokens,
debug=self.debug,
)
text = model()
return json.dumps(text)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~output_parsers~rail_parser.py | from __future__ import annotations
from typing import Any, Callable, Dict, Optional
from langchain.schema import BaseOutputParser
class GuardrailsOutputParser(BaseOutputParser):
"""Parse the output of an LLM call using Guardrails."""
guard: Any
"""The Guardrails object."""
api: Optional[Callable]
"""The API to use for the Guardrails object."""
args: Any
"""The arguments to pass to the API."""
kwargs: Any
"""The keyword arguments to pass to the API."""
@property
def _type(self) -> str:
return "guardrails"
@classmethod
def from_rail(
cls,
rail_file: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
"""Create a GuardrailsOutputParser from a rail file.
Args:
rail_file: a rail file.
num_reasks: number of times to re-ask the question.
api: the API to use for the Guardrails object.
*args: The arguments to pass to the API
**kwargs: The keyword arguments to pass to the API.
Returns:
GuardrailsOutputParser
"""
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_rail(rail_file, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
@classmethod
def from_rail_string(
cls,
rail_str: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
@classmethod
def from_pydantic(
cls,
output_class: Any,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ImportError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_pydantic(output_class, "", num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
def parse(self, text: str) -> Dict:
return self.guard.parse(text, llm_api=self.api, *self.args, **self.kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~chains~test_neptune_cypher_qa.py | def test_import() -> None:
from langchain.chains import NeptuneOpenCypherQAChain # noqa: F401
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~sequential.py | """Chain pipeline where the outputs of one step feed directly into next."""
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils.input import get_color_mapping
class SequentialChain(Chain):
"""Chain where the outputs of one chain feed directly into next."""
chains: List[Chain]
input_variables: List[str]
output_variables: List[str] #: :meta private:
return_all: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return expected input keys to the chain.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return self.output_variables
@root_validator(pre=True)
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that the correct inputs exist for all chains."""
chains = values["chains"]
input_variables = values["input_variables"]
memory_keys = list()
if "memory" in values and values["memory"] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
raise ValueError(
f"The input key(s) {''.join(overlapping_keys)} are found "
f"in the Memory keys ({memory_keys}) - please use input and "
f"memory keys that don't overlap."
)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.memory_variables)
if missing_vars:
raise ValueError(
f"Missing required input keys: {missing_vars}, "
f"only had {known_variables}"
)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
raise ValueError(
f"Chain returned keys that already exist: {overlapping_keys}"
)
known_variables |= set(chain.output_keys)
if "output_variables" not in values:
if values.get("return_all", False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values["output_variables"] = output_keys
else:
missing_vars = set(values["output_variables"]).difference(known_variables)
if missing_vars:
raise ValueError(
f"Expected output variables that were not found: {missing_vars}."
)
return values
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
known_values = inputs.copy()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
for i, chain in enumerate(self.chains):
outputs = await chain.acall(
known_values, return_only_outputs=True, callbacks=callbacks
)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
class SimpleSequentialChain(Chain):
"""Simple chain where the outputs of one step feed directly into next."""
chains: List[Chain]
strip_outputs: bool = False
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that chains are all single input/output."""
for chain in values["chains"]:
if len(chain.input_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one input, got "
f"{chain} with {len(chain.input_keys)} inputs."
)
if len(chain.output_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one output, got "
f"{chain} with {len(chain.output_keys)} outputs."
)
return values
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(_input, callbacks=_run_manager.get_child(f"step_{i+1}"))
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(
_input, color=color_mapping[str(i)], end="\n", verbose=self.verbose
)
return {self.output_key: _input}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = await chain.arun(
_input, callbacks=_run_manager.get_child(f"step_{i+1}")
)
if self.strip_outputs:
_input = _input.strip()
await _run_manager.on_text(
_input, color=color_mapping[str(i)], end="\n", verbose=self.verbose
)
return {self.output_key: _input}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~docarray~in_memory.py | """Wrapper around in-memory storage."""
from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.docarray.base import (
DocArrayIndex,
_check_docarray_import,
)
class DocArrayInMemorySearch(DocArrayIndex):
"""In-memory `DocArray` storage for exact search.
To use it, you should have the ``docarray`` package with version >=0.32.0 installed.
You can install it with `pip install "langchain[docarray]"`.
"""
@classmethod
def from_params(
cls,
embedding: Embeddings,
metric: Literal[
"cosine_sim", "euclidian_dist", "sgeuclidean_dist"
] = "cosine_sim",
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Initialize DocArrayInMemorySearch store.
Args:
embedding (Embeddings): Embedding function.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import InMemoryExactNNIndex
doc_cls = cls._get_doc_cls(space=metric, **kwargs)
doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore
return cls(doc_index, embedding)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
"""
store = cls.from_params(embedding, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~docarray.py | from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.utils import maximal_marginal_relevance
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
mmr = "mmr"
class DocArrayRetriever(BaseRetriever):
"""`DocArray Document Indices` retriever.
Currently, it supports 5 backends:
InMemoryExactNNIndex, HnswDocumentIndex, QdrantDocumentIndex,
ElasticDocIndex, and WeaviateDocumentIndex.
Args:
index: One of the above-mentioned index instances
embeddings: Embedding model to represent text as vectors
search_field: Field to consider for searching in the documents.
Should be an embedding/vector/tensor.
content_field: Field that represents the main content in your document schema.
Will be used as a `page_content`. Everything else will go into `metadata`.
search_type: Type of search to perform (similarity / mmr)
filters: Filters applied for document retrieval.
top_k: Number of documents to return
"""
index: Any
embeddings: Embeddings
search_field: str
content_field: str
search_type: SearchType = SearchType.similarity
top_k: int = 1
filters: Optional[Any] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. "
f"Choose either 'similarity' or 'mmr'."
)
return results
def _search(
self, query_emb: np.ndarray, top_k: int
) -> List[Union[Dict[str, Any], Any]]:
"""
Perform a search using the query embedding and return top_k documents.
Args:
query_emb: Query represented as an embedding
top_k: Number of documents to return
Returns:
A list of top_k documents matching the query
"""
from docarray.index import ElasticDocIndex, WeaviateDocumentIndex
filter_args = {}
search_field = self.search_field
if isinstance(self.index, WeaviateDocumentIndex):
filter_args["where_filter"] = self.filters
search_field = ""
elif isinstance(self.index, ElasticDocIndex):
filter_args["query"] = self.filters
else:
filter_args["filter_query"] = self.filters
if self.filters:
query = (
self.index.build_query() # get empty query object
.find(
query=query_emb, search_field=search_field
) # add vector similarity search
.filter(**filter_args) # add filter search
.build(limit=top_k) # build the query
)
# execute the combined query and return the results
docs = self.index.execute_query(query)
if hasattr(docs, "documents"):
docs = docs.documents
docs = docs[:top_k]
else:
docs = self.index.find(
query=query_emb, search_field=search_field, limit=top_k
).documents
return docs
def _similarity_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
def _mmr_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a maximal marginal relevance (mmr) search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of diverse documents related to the query
"""
docs = self._search(query_emb=query_emb, top_k=20)
mmr_selected = maximal_marginal_relevance(
query_emb,
[
doc[self.search_field]
if isinstance(doc, dict)
else getattr(doc, self.search_field)
for doc in docs
],
k=self.top_k,
)
results = [self._docarray_to_langchain_doc(docs[idx]) for idx in mmr_selected]
return results
def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]) -> Document:
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else doc.__fields__
if self.content_field not in fields:
raise ValueError(
f"Document does not contain the content field - {self.content_field}."
)
lc_doc = Document(
page_content=doc[self.content_field]
if isinstance(doc, dict)
else getattr(doc, self.content_field)
)
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if (
isinstance(value, (str, int, float, bool))
and name != self.content_field
):
lc_doc.metadata[name] = value
return lc_doc
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~vllm.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.llms.openai import BaseOpenAI
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema.output import Generation, LLMResult
class VLLM(BaseLLM):
"""VLLM language model."""
model: str = ""
"""The name or path of a HuggingFace Transformers model."""
tensor_parallel_size: Optional[int] = 1
"""The number of GPUs to use for distributed execution with tensor parallelism."""
trust_remote_code: Optional[bool] = False
"""Trust remote code (e.g., from HuggingFace) when downloading the model
and tokenizer."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
best_of: Optional[int] = None
"""Number of output sequences that are generated from the prompt."""
presence_penalty: float = 0.0
"""Float that penalizes new tokens based on whether they appear in the
generated text so far"""
frequency_penalty: float = 0.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far"""
temperature: float = 1.0
"""Float that controls the randomness of the sampling."""
top_p: float = 1.0
"""Float that controls the cumulative probability of the top tokens to consider."""
top_k: int = -1
"""Integer that controls the number of top tokens to consider."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
stop: Optional[List[str]] = None
"""List of strings that stop the generation when they are generated."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
max_new_tokens: int = 512
"""Maximum number of tokens to generate per output sequence."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
dtype: str = "auto"
"""The data type for the model weights and activations."""
download_dir: Optional[str] = None
"""Directory to download and load the weights. (Default to the default
cache dir of huggingface)"""
vllm_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""
client: Any #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
values["client"] = VLLModel(
model=values["model"],
tensor_parallel_size=values["tensor_parallel_size"],
trust_remote_code=values["trust_remote_code"],
dtype=values["dtype"],
download_dir=values["download_dir"],
**values["vllm_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"use_beam_search": self.use_beam_search,
"logprobs": self.logprobs,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
from vllm import SamplingParams
# build sampling parameters
params = {**self._default_params, **kwargs, "stop": stop}
sampling_params = SamplingParams(**params)
# call the model
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm"
class VLLMOpenAI(BaseOpenAI):
"""vLLM OpenAI-compatible API client"""
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
}
return {
"model": self.model_name,
**openai_creds,
**self._default_params,
"logit_bias": None,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm-openai"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~weather.py | """Simple reader that reads weather data from OpenWeatherMap API"""
from __future__ import annotations
from datetime import datetime
from typing import Iterator, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
class WeatherDataLoader(BaseLoader):
"""Load weather data with `Open Weather Map` API.
Reads the forecast & current weather of any location using OpenWeatherMap's free
API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free
OpenWeatherMap API.
"""
def __init__(
self,
client: OpenWeatherMapAPIWrapper,
places: Sequence[str],
) -> None:
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
@classmethod
def from_params(
cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None
) -> WeatherDataLoader:
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key)
return cls(client, places)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {"queried_at": datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
def load(
self,
) -> List[Document]:
"""Load weather data for the given locations."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~llm_rails.py | """Wrapper around LLMRails vector database."""
from __future__ import annotations
import json
import logging
import os
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import requests
from langchain.pydantic_v1 import Field
from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
class LLMRails(VectorStore):
"""Implementation of Vector Store using LLMRails.
See https://llmrails.com/
Example:
.. code-block:: python
from langchain.vectorstores import LLMRails
vectorstore = LLMRails(
api_key=llm_rails_api_key,
datastore_id=datastore_id
)
"""
def __init__(
self,
datastore_id: Optional[str] = None,
api_key: Optional[str] = None,
):
"""Initialize with LLMRails API."""
self._datastore_id = datastore_id or os.environ.get("LLM_RAILS_DATASTORE_ID")
self._api_key = api_key or os.environ.get("LLM_RAILS_API_KEY")
if self._api_key is None:
logging.warning("Can't find Rails credentials in environment.")
self._session = requests.Session() # to reuse connections
self.datastore_id = datastore_id
self.base_url = "https://api.llmrails.com/v1"
def _get_post_headers(self) -> dict:
"""Returns headers that should be attached to each post request."""
return {"X-API-KEY": self._api_key}
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
Returns:
List of ids from adding the texts into the vectorstore.
"""
names: List[str] = []
for text in texts:
doc_name = str(uuid.uuid4())
response = self._session.post(
f"{self.base_url}/datastores/{self._datastore_id}/text",
json={"name": doc_name, "text": text},
verify=True,
headers=self._get_post_headers(),
)
if response.status_code != 200:
logging.error(
f"Create request failed for doc_name = {doc_name} with status code "
f"{response.status_code}, reason {response.reason}, text "
f"{response.text}"
)
return names
names.append(doc_name)
return names
def add_files(
self,
files_list: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> bool:
"""
LLMRails provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
Returns:
List of ids associated with each of the files indexed
"""
files = []
for file in files_list:
if not os.path.exists(file):
logging.error(f"File {file} does not exist, skipping")
continue
files.append(("file", (os.path.basename(file), open(file, "rb"))))
response = self._session.post(
f"{self.base_url}/datastores/{self._datastore_id}/file",
files=files,
verify=True,
headers=self._get_post_headers(),
)
if response.status_code != 200:
logging.error(
f"Create request failed for datastore = {self._datastore_id} "
f"with status code {response.status_code}, reason {response.reason}, "
f"text {response.text}"
)
return False
return True
def similarity_search_with_score(
self, query: str, k: int = 5
) -> List[Tuple[Document, float]]:
"""Return LLMRails documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5 Max 10.
alpha: parameter for hybrid search .
Returns:
List of Documents most similar to the query and score for each.
"""
response = self._session.post(
headers=self._get_post_headers(),
url=f"{self.base_url}/datastores/{self._datastore_id}/search",
data=json.dumps({"k": k, "text": query}),
timeout=10,
)
if response.status_code != 200:
logging.error(
"Query failed %s",
f"(code {response.status_code}, reason {response.reason}, details "
f"{response.text})",
)
return []
results = response.json()["results"]
docs = [
(
Document(
page_content=x["text"],
metadata={
key: value
for key, value in x["metadata"].items()
if key != "score"
},
),
x["metadata"]["score"],
)
for x in results
]
return docs
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return LLMRails documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 5.
Returns:
List of Documents most similar to the query
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> LLMRails:
"""Construct LLMRails wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import LLMRails
llm_rails = LLMRails.from_texts(
texts,
datastore_id=datastore_id,
api_key=llm_rails_api_key
)
"""
# Note: LLMRails generates its own embeddings, so we ignore the provided
# embeddings (required by interface)
llm_rails = cls(**kwargs)
llm_rails.add_texts(texts)
return llm_rails
def as_retriever(self, **kwargs: Any) -> LLMRailsRetriever:
return LLMRailsRetriever(vectorstore=self, **kwargs)
class LLMRailsRetriever(VectorStoreRetriever):
"""Retriever for LLMRails."""
vectorstore: LLMRails
search_kwargs: dict = Field(default_factory=lambda: {"k": 5})
"""Search params.
k: Number of Documents to return. Defaults to 5.
alpha: parameter for hybrid search .
"""
def add_texts(self, texts: List[str]) -> None:
"""Add text to the datastore.
Args:
texts (List[str]): The text
"""
self.vectorstore.add_texts(texts)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_loaders~langsmith.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Iterator, List, Optional, Union
from langchain.chat_loaders.base import BaseChatLoader
from langchain.load import load
from langchain.schema.chat import ChatSession
if TYPE_CHECKING:
from langsmith.client import Client
from langsmith.schemas import Run
logger = logging.getLogger(__name__)
class LangSmithRunChatLoader(BaseChatLoader):
"""
Load chat sessions from a list of LangSmith "llm" runs.
Attributes:
runs (Iterable[Union[str, Run]]): The list of LLM run IDs or run objects.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(
self, runs: Iterable[Union[str, Run]], client: Optional["Client"] = None
):
"""
Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created.
"""
from langsmith.client import Client
self.runs = runs
self.client = client or Client()
def _load_single_chat_session(self, llm_run: "Run") -> ChatSession:
"""
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
"""
chat_session = LangSmithRunChatLoader._get_messages_from_llm_run(llm_run)
functions = LangSmithRunChatLoader._get_functions_from_llm_run(llm_run)
if functions:
chat_session["functions"] = functions
return chat_session
@staticmethod
def _get_messages_from_llm_run(llm_run: "Run") -> ChatSession:
"""
Extract messages from a LangSmith LLM run.
:param llm_run: The LLM run object.
:return: ChatSession with the extracted messages.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
if "messages" not in llm_run.inputs:
raise ValueError(f"Run has no 'messages' inputs. Got {llm_run.inputs}")
if not llm_run.outputs:
raise ValueError("Cannot convert pending run")
messages = load(llm_run.inputs)["messages"]
message_chunk = load(llm_run.outputs)["generations"][0]["message"]
return ChatSession(messages=messages + [message_chunk])
@staticmethod
def _get_functions_from_llm_run(llm_run: "Run") -> Optional[List[Dict]]:
"""
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
return llm_run.extra.get("invocation_params", {}).get("functions")
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iterable of run IDs.
This method fetches the runs and converts them to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
for run_obj in self.runs:
try:
if hasattr(run_obj, "id"):
run = run_obj
else:
run = self.client.read_run(run_obj)
session = self._load_single_chat_session(run)
yield session
except ValueError as e:
logger.warning(f"Could not load run {run_obj}: {repr(e)}")
continue
class LangSmithDatasetChatLoader(BaseChatLoader):
"""
Load chat sessions from a LangSmith dataset with the "chat" data type.
Attributes:
dataset_name (str): The name of the LangSmith dataset.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(self, *, dataset_name: str, client: Optional["Client"] = None):
"""
Initialize a new LangSmithChatDatasetLoader instance.
:param dataset_name: The name of the LangSmith dataset.
:param client: An instance of LangSmith client; if not provided,
a new client instance will be created.
"""
try:
from langsmith.client import Client
except ImportError as e:
raise ImportError(
"The LangSmith client is required to load LangSmith datasets.\n"
"Please install it with `pip install langsmith`"
) from e
self.dataset_name = dataset_name
self.client = client or Client()
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the specified LangSmith dataset.
This method fetches the chat data from the dataset and
converts each data point to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
from langchain.adapters import openai as oai_adapter # noqa: E402
data = self.client.read_dataset_openai_finetuning(
dataset_name=self.dataset_name
)
for data_point in data:
yield ChatSession(
messages=[
oai_adapter.convert_dict_to_message(m)
for m in data_point.get("messages", [])
],
functions=data_point.get("functions"),
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~baby_agi~task_execution.py | from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
class TaskExecutionChain(LLMChain):
"""Chain to execute tasks."""
@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
execution_template = (
"Ты - искусственный интеллект, который"
" выполняет одну задачу на основе следующей цели: "
"{objective}."
"Учти эти ранее выполненные задачи: {context}."
" Твоя задача: {task}. Ответ:"
)
prompt = PromptTemplate(
template=execution_template,
input_variables=["objective", "context", "task"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
| [
"context",
"Ты - искусственный интеллект, который выполняет одну задачу на основе следующей цели: {objective}.Учти эти ранее выполненные задачи: {context}. Твоя задача: {task}. Ответ:"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~indexes~prompts~entity_summarization.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = """Ты - AI-ассистент, помогающий человеку отслеживать факты о релевантных людях, местах и концепциях в их жизни. Обнови резюме предоставленной сущности в разделе "Сущность" на основе последней строки твоего разговора с человеком. Если ты пишешь резюме в первый раз, верни одно предложение.
Обновление должно включать только факты, которые передаются в последней строке разговора о предоставленной сущности, и должно содержать только факты о предоставленной сущности.
Если нет новой информации о предоставленной сущности или информация не стоит отмечать (не важный или релевантный факт для долгосрочного запоминания), верни существующее резюме без изменений.
Полная история разговора (для контекста):
{history}
Сущность для резюме:
{entity}
Существующее резюме {entity}:
{summary}
Последняя строка разговора:
Человек: {input}
Обновленное резюме:"""
ENTITY_SUMMARIZATION_PROMPT = PromptTemplate(
input_variables=["entity", "summary", "history", "input"],
template=_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE,
)
| [
"entity",
"input",
"Ты - AI-ассистент, помогающий человеку отслеживать факты о релевантных людях, местах и концепциях в их жизни. Обнови резюме предоставленной сущности в разделе \"Сущность\" на основе последней строки твоего разговора с человеком. Если ты пишешь резюме в первый раз, верни одно предложение.\nОбновление должно включать только факты, которые передаются в последней строке разговора о предоставленной сущности, и должно содержать только факты о предоставленной сущности.\n\nЕсли нет новой информации о предоставленной сущности или информация не стоит отмечать (не важный или релевантный факт для долгосрочного запоминания), верни существующее резюме без изменений.\n\nПолная история разговора (для контекста):\n{history}\n\nСущность для резюме:\n{entity}\n\nСуществующее резюме {entity}:\n{summary}\n\nПоследняя строка разговора:\nЧеловек: {input}\nОбновленное резюме:"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~output_parsers~test_regex_dict.py | """Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict)
assert DEF_EXPECTED_RESULT == result_dict
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~tools~nuclia~test_nuclia.py | import base64
import json
import os
from pathlib import Path
from typing import Any
from unittest import mock
import pytest
from langchain.tools.nuclia.tool import NucliaUnderstandingAPI
README_PATH = Path(__file__).parents[4] / "README.md"
class FakeUploadResponse:
status_code = 200
text = "fake_uuid"
class FakePushResponse:
status_code = 200
def json(self) -> Any:
return {"uuid": "fake_uuid"}
class FakePullResponse:
status_code = 200
def json(self) -> Any:
return {
"status": "ok",
"payload": base64.b64encode(bytes('{"some": "data"}}', "utf-8")),
}
def FakeParseFromString(**args: Any) -> Any:
def ParseFromString(self: Any, data: str) -> None:
self.uuid = "fake_uuid"
return ParseFromString
def fakepost(**kwargs: Any) -> Any:
def fn(url: str, **kwargs: Any) -> Any:
if url.endswith("/processing/upload"):
return FakeUploadResponse()
elif url.endswith("/processing/push"):
return FakePushResponse()
else:
raise Exception("Invalid POST URL")
return fn
def fakeget(**kwargs: Any) -> Any:
def fn(url: str, **kwargs: Any) -> Any:
if url.endswith("/processing/pull"):
return FakePullResponse()
else:
raise Exception("Invalid GET URL")
return fn
@mock.patch.dict(os.environ, {"NUCLIA_NUA_KEY": "_a_key_"})
@pytest.mark.requires("nucliadb_protos")
def test_nuclia_tool() -> None:
with mock.patch(
"nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString",
new_callable=FakeParseFromString,
):
with mock.patch("requests.post", new_callable=fakepost):
with mock.patch("requests.get", new_callable=fakeget):
nua = NucliaUnderstandingAPI(enable_ml=False)
uuid = nua.run(
{
"action": "push",
"id": "1",
"path": str(README_PATH),
"text": None,
}
)
assert uuid == "fake_uuid"
data = nua.run(
{"action": "pull", "id": "1", "path": None, "text": None}
)
assert json.loads(data)["uuid"] == "fake_uuid"
@pytest.mark.asyncio
@pytest.mark.requires("nucliadb_protos")
async def test_async_call() -> None:
with mock.patch(
"nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString",
new_callable=FakeParseFromString,
):
with mock.patch("requests.post", new_callable=fakepost):
with mock.patch("requests.get", new_callable=fakeget):
with mock.patch("os.environ.get", return_value="_a_key_"):
nua = NucliaUnderstandingAPI(enable_ml=False)
data = await nua.arun(
{
"action": "push",
"id": "1",
"path": str(README_PATH),
"text": None,
}
)
assert json.loads(data)["uuid"] == "fake_uuid"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~openai_functions~qa_with_structure.py | from typing import Any, List, Optional, Type, Union
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
from langchain.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseLLMOutputParser
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import HumanMessage, SystemMessage
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, Type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and issubclass(schema, BaseModel)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
if isinstance(schema, type) and issubclass(schema, BaseModel):
schema_dict = schema.schema()
else:
schema_dict = schema
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages)
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
| [
"{context}",
"You are a world class algorithm to answer questions in a specific format.",
"Question: {question}",
"Tips: Make sure to answer in the correct format",
"Answer question using the following context"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~constitutional_ai~principles.py | """Constitutional principles from https://arxiv.org/pdf/2212.08073.pdf (Bai et al. 2022)
UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
"""
# flake8: noqa
from typing import Dict
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
PRINCIPLES: Dict[str, ConstitutionalPrinciple] = {
"harmful1": ConstitutionalPrinciple(
name="harmful1",
critique_request="Identify specific ways in which the assistant’s last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.",
),
"harmful2": ConstitutionalPrinciple(
name="harmful2",
critique_request="Explain ways in which the assistant’s preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.",
),
"harmful3": ConstitutionalPrinciple(
name="harmful3",
critique_request="Identify all ways in which the assistant’s last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.",
),
"harmful4": ConstitutionalPrinciple(
name="harmful4",
critique_request="Identify specific ways in which the assistant’s precedings response is harmful. Also point out potential harmfulness in the human’s questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.",
),
"insensitive": ConstitutionalPrinciple(
name="insensitive",
critique_request="Discuss the assistant’s last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
revision_request="Revise the assistant’s last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
),
"offensive": ConstitutionalPrinciple(
name="offensive",
critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human’s dialogue.",
revision_request="Please write a new version of the assistant’s response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
),
"harmful5": ConstitutionalPrinciple(
name="harmful5",
critique_request="What’s the strongest argument you could give that the assistant’s last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
revision_request="Amend the assistant’s last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
),
"age-innappropriate": ConstitutionalPrinciple(
name="age-innappropriate",
critique_request="Did the assistant’s preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
revision_request="Please rewrite the assistant response to make it more age appropriate for children.",
),
"derogatory": ConstitutionalPrinciple(
name="derogatory",
critique_request="Think carefully about whether the human’s request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should’ve responded instead.",
revision_request="Please rewrite the assistant’s response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
),
"illegal": ConstitutionalPrinciple(
name="illegal",
critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.",
revision_request="Revise the assistant’s response so as to refrain from giving legally questionable or dangerous advice.",
),
"controversial": ConstitutionalPrinciple(
name="controversial",
critique_request="Discuss ways in which the assistant’s response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
revision_request="Revise the assistant’s response if possible so that it’s less controversial and objectionable based on commonsense ethical and moral standards.",
),
"harmful6": ConstitutionalPrinciple(
name="harmful6",
critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.",
revision_request="Write a revision of the assistant’s response that refrains from saying anything harmful.",
),
"thoughtful": ConstitutionalPrinciple(
name="thoughtful",
critique_request="Please comment on whether the assistant’s last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.",
),
"misogynistic": ConstitutionalPrinciple(
name="misogynistic",
critique_request="Take a look at the assistant’s last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
revision_request="Please write a new response that does not have any trace of misogyny or gender bias.",
),
"criminal": ConstitutionalPrinciple(
name="criminal",
critique_request="Read the assistant’s last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.",
),
"harmful7": ConstitutionalPrinciple(
name="harmful7",
critique_request="Critique the assistant’s last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.",
),
"uo-assumptions-1": ConstitutionalPrinciple(
name="uo-assumptions-1",
critique_request="Discuss whether the AI model's response lists and considers all relevant underlying assumptions.",
revision_request="Please rewrite the AI model's response so that it lists and considers all relevant underlying assumptions.",
),
"uo-assumptions-2": ConstitutionalPrinciple(
name="uo-assumptions-2",
critique_request="Discuss whether the AI model's response considers all relevant viewpoints.",
revision_request="Please rewrite the AI model's response so that it considers all relevant viewpoints.",
),
"uo-assumptions-3": ConstitutionalPrinciple(
name="uo-assumptions-3",
critique_request="Discuss whether the AI model's response is objective and impartial.",
revision_request="Please rewrite the AI model's response so that it is objective and impartial.",
),
"uo-reasoning-1": ConstitutionalPrinciple(
name="uo-reasoning-1",
critique_request="Discuss whether the AI model's response considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.",
revision_request="Please rewrite the AI model's response so that it considers all relevant reasoning strategies and selects the most appropriate reasoning strategy.",
),
"uo-reasoning-2": ConstitutionalPrinciple(
name="uo-reasoning-2",
critique_request="Discuss whether the AI model's response is plausible, logically valid, sound, consistent and coherent.",
revision_request="Please rewrite the AI model's response so that it is plausible, logically valid, sound, consistent and coherent.",
),
"uo-reasoning-3": ConstitutionalPrinciple(
name="uo-reasoning-3",
critique_request="Discuss whether reasoning in the AI model's response is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.",
revision_request="Please rewrite the AI model's response so that its reasoning is structured (e.g. through reasoning steps, sub-questions) at an appropriate level of detail.",
),
"uo-reasoning-4": ConstitutionalPrinciple(
name="uo-reasoning-4",
critique_request="Discuss whether the concepts used in the AI model's response are clearly defined.",
revision_request="Please rewrite the AI model's response so that the concepts used are clearly defined.",
),
"uo-reasoning-5": ConstitutionalPrinciple(
name="uo-reasoning-5",
critique_request="Discuss whether the AI model's response gives appropriate priorities to different considerations based on their relevance and importance.",
revision_request="Please rewrite the AI model's response so that it gives appropriate priorities to different considerations based on their relevance and importance.",
),
"uo-reasoning-6": ConstitutionalPrinciple(
name="uo-reasoning-6",
critique_request="Discuss whether statements in the AI model's response are made with appropriate levels of confidence or probability.",
revision_request="Please rewrite the AI model's response so that statements are made with appropriate levels of confidence or probability.",
),
"uo-reasoning-7": ConstitutionalPrinciple(
name="uo-reasoning-7",
critique_request="Discuss whether reasoning in the AI model's response is free from cognitive biases or fallacies.",
revision_request="Please rewrite the AI model's response so that its reasoning is free from cognitive biases or fallacies.",
),
"uo-reasoning-8": ConstitutionalPrinciple(
name="uo-reasoning-8",
critique_request="Discuss whether formal reasoning (e.g. using math, computer code) in the AI model's response is correct.",
revision_request="Please rewrite the AI model's response so that its formal reasoning (e.g. using math, computer code) is correct.",
),
"uo-reasoning-9": ConstitutionalPrinciple(
name="uo-reasoning-9",
critique_request="Discuss whether external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly in the AI model's response.",
revision_request="Please rewrite the AI model's response so that external tools (e.g. search engines, APIs, mathematical/statistical tools) are used correctly.",
),
"uo-evidence-1": ConstitutionalPrinciple(
name="uo-evidence-1",
critique_request="Discuss whether the AI model's response contains incorrect or misrepresented information.",
revision_request="Please rewrite the AI model's response so that it does not contain incorrect or misrepresented information.",
),
"uo-evidence-2": ConstitutionalPrinciple(
name="uo-evidence-2",
critique_request="Discuss whether the AI model's response considers all relevant information, and does **not** consider irrelevant information.",
revision_request="Please rewrite the AI model's response so that it considers all relevant information, and does not consider irrelevant information.",
),
"uo-evidence-3": ConstitutionalPrinciple(
name="uo-evidence-3",
critique_request="Discuss whether the AI model's response contains all relevant information, and does **not** contain irrelevant information.",
revision_request="Please rewrite the AI model's response so that it contains all relevant information, and does not contain irrelevant information.",
),
"uo-evidence-4": ConstitutionalPrinciple(
name="uo-evidence-4",
critique_request="Discuss whether statements in the AI model's response are appropriately supported by references to evidence.",
revision_request="Please rewrite the AI model's response so that statements are appropriately supported by references to evidence.",
),
"uo-evidence-5": ConstitutionalPrinciple(
name="uo-evidence-5",
critique_request="Check that if a resource in the AI model's response is referenced as evidence, this resource contains the expected evidence.",
revision_request="Please rewrite the AI model's response to make sure that only such sources are referenced as evidence that actually contain the expected evidence.",
),
"uo-security-1": ConstitutionalPrinciple(
name="uo-security-1",
critique_request="Discuss whether the AI model's response appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.",
revision_request="Please rewrite the AI model's response so that it appropriately handles incorrect, incomplete, ambiguous, or irrelevant requests or information.",
),
"uo-security-2": ConstitutionalPrinciple(
name="uo-security-2",
critique_request="Discuss whether the AI model's response appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.",
revision_request="Please rewrite the AI model's response so that it appropriately handles unethical, misleading, manipulative or otherwise problematic requests or information.",
),
"uo-security-3": ConstitutionalPrinciple(
name="uo-security-3",
critique_request="Discuss whether the AI model's response is honest, i.e. it is not deceptive or manipulative.",
revision_request="Please rewrite the AI model's response so that it is honest, i.e. it is not deceptive or manipulative.",
),
"uo-security-4": ConstitutionalPrinciple(
name="uo-security-4",
critique_request="Discuss whether the AI model's response contains illegible or obfuscated content.",
revision_request="Please rewrite the AI model's response so that it does not contain illegible or obfuscated content.",
),
"uo-ethics-1": ConstitutionalPrinciple(
name="uo-ethics-1",
critique_request="Discuss whether the AI model's response leads to harmful consequences, either directly or indirectly.",
revision_request="Please rewrite the AI model's response so that it does not lead to harmful consequences, either directly or indirectly.",
),
"uo-ethics-2": ConstitutionalPrinciple(
name="uo-ethics-2",
critique_request="Discuss whether the AI model's response exhibits problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.",
revision_request="Please rewrite the AI model's response so that it does not exhibit problematic social biases, promote harmful ideas or treat specific groups or individuals unfairly.",
),
"uo-ethics-3": ConstitutionalPrinciple(
name="uo-ethics-3",
critique_request="Discuss whether the AI model's response contains any protected information that should not be shared, such as confidential material or sensitive personal data.",
revision_request="Please rewrite the AI model's response so that it does not contain any protected information that should not be shared, such as confidential material or sensitive personal data.",
),
"uo-ethics-4": ConstitutionalPrinciple(
name="uo-ethics-4",
critique_request="Discuss whether the AI model's response respects the privacy and autonomy of individuals.",
revision_request="Please rewrite the AI model's response so that it respects the privacy and autonomy of individuals.",
),
"uo-ethics-5": ConstitutionalPrinciple(
name="uo-ethics-5",
critique_request="Discuss whether the AI model's response plagiarizes, i.e. it does not contain third-party content without indicating the source.",
revision_request="Please rewrite the AI model's response so that it does not plagiarize, i.e. it does not contain third-party content without indicating the source.",
),
"uo-ethics-6": ConstitutionalPrinciple(
name="uo-ethics-6",
critique_request="Discuss whether the AI model's response is unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.",
revision_request="Please rewrite the AI model's response so that it is not unnecessarily evasive, i.e. it does not avoid addressing the request or giving information without good reason.",
),
"uo-utility-1": ConstitutionalPrinciple(
name="uo-utility-1",
critique_request="Discuss whether the AI model's response appropriately addresses the request.",
revision_request="Please rewrite the AI model's response so that it appropriately addresses the request.",
),
"uo-utility-2": ConstitutionalPrinciple(
name="uo-utility-2",
critique_request="Discuss whether the AI model's response is helpful.",
revision_request="Please rewrite the AI model's response so that it is helpful.",
),
"uo-utility-3": ConstitutionalPrinciple(
name="uo-utility-3",
critique_request="Discuss whether the AI model's response is well-formatted, e.g. free from syntactic or grammatical errors.",
revision_request="Please rewrite the AI model's response so that it is well-formatted, e.g. free from syntactic or grammatical errors.",
),
"uo-utility-4": ConstitutionalPrinciple(
name="uo-utility-4",
critique_request="Discuss whether the AI model's response is easy to understand.",
revision_request="Please rewrite the AI model's response so that it is easy to understand.",
),
"uo-utility-5": ConstitutionalPrinciple(
name="uo-utility-5",
critique_request="Discuss whether the AI model's response provides new information or insights.",
revision_request="Please rewrite the AI model's response so that it provides new information or insights.",
),
"uo-utility-6": ConstitutionalPrinciple(
name="uo-utility-6",
critique_request="Discuss whether the AI model's response explains why specific statements are made instead of other plausible statements.",
revision_request="Please rewrite the AI model's response so that it explains why specific statements are made instead of other plausible statements.",
),
"uo-utility-7": ConstitutionalPrinciple(
name="uo-utility-7",
critique_request="Discuss whether the AI model's response gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.",
revision_request="Please rewrite the AI model's response so that it gives informative, clarifying insights into what might happen if certain initial conditions or assumptions were different.",
),
"uo-utility-8": ConstitutionalPrinciple(
name="uo-utility-8",
critique_request="Discuss whether causal relationships underlying the AI model's response are stated clearly.",
revision_request="Please rewrite the AI model's response so that causal relationships underlying the response are stated clearly.",
),
"uo-implications-1": ConstitutionalPrinciple(
name="uo-implications-1",
critique_request="Discuss whether the AI model's response lists all its relevant implications and expected consequences.",
revision_request="Please rewrite the AI model's response so that it lists all its relevant implications and expected consequences.",
),
"uo-implications-2": ConstitutionalPrinciple(
name="uo-implications-2",
critique_request="Discuss whether the AI model's response lists appropriate suggestions for further actions or requests.",
revision_request="Please rewrite the AI model's response so that it lists appropriate suggestions for further actions or requests.",
),
"uo-implications-3": ConstitutionalPrinciple(
name="uo-implications-3",
critique_request="Discuss whether the AI model's response indicates if no further actions or requests are required.",
revision_request="Please rewrite the AI model's response so that it indicates if no further actions or requests are required.",
),
}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~aim_callback.py | from copy import deepcopy
from typing import Any, Dict, List, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
def import_aim() -> Any:
"""Import the aim python package and raise an error if it is not installed."""
try:
import aim
except ImportError:
raise ImportError(
"To use the Aim callback manager you need to have the"
" `aim` python package installed."
"Please install it with `pip install aim`"
)
return aim
class BaseMetadataCallbackHandler:
"""This class handles the metadata and associated function states for callbacks.
Attributes:
step (int): The current step.
starts (int): The number of times the start method has been called.
ends (int): The number of times the end method has been called.
errors (int): The number of times the error method has been called.
text_ctr (int): The number of times the text method has been called.
ignore_llm_ (bool): Whether to ignore llm callbacks.
ignore_chain_ (bool): Whether to ignore chain callbacks.
ignore_agent_ (bool): Whether to ignore agent callbacks.
ignore_retriever_ (bool): Whether to ignore retriever callbacks.
always_verbose_ (bool): Whether to always be verbose.
chain_starts (int): The number of times the chain start method has been called.
chain_ends (int): The number of times the chain end method has been called.
llm_starts (int): The number of times the llm start method has been called.
llm_ends (int): The number of times the llm end method has been called.
llm_streams (int): The number of times the text method has been called.
tool_starts (int): The number of times the tool start method has been called.
tool_ends (int): The number of times the tool end method has been called.
agent_ends (int): The number of times the agent end method has been called.
"""
def __init__(self) -> None:
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return self.always_verbose_
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
def get_custom_callback_meta(self) -> Dict[str, Any]:
return {
"step": self.step,
"starts": self.starts,
"ends": self.ends,
"errors": self.errors,
"text_ctr": self.text_ctr,
"chain_starts": self.chain_starts,
"chain_ends": self.chain_ends,
"llm_starts": self.llm_starts,
"llm_ends": self.llm_ends,
"llm_streams": self.llm_streams,
"tool_starts": self.tool_starts,
"tool_ends": self.tool_ends,
"agent_ends": self.agent_ends,
}
def reset_callback_meta(self) -> None:
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
return None
class AimCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Aim.
Parameters:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run
and then logs the response to Aim.
"""
def __init__(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 10,
log_system_params: bool = True,
) -> None:
"""Initialize callback handler."""
super().__init__()
aim = import_aim()
self.repo = repo
self.experiment_name = experiment_name
self.system_tracking_interval = system_tracking_interval
self.log_system_params = log_system_params
self._run = aim.Run(
repo=self.repo,
experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
)
self._run_hash = self._run.hash
self.action_records: list = []
def setup(self, **kwargs: Any) -> None:
aim = import_aim()
if not self._run:
if self._run_hash:
self._run = aim.Run(
self._run_hash,
repo=self.repo,
system_tracking_interval=self.system_tracking_interval,
)
else:
self._run = aim.Run(
repo=self.repo,
experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params,
)
self._run_hash = self._run.hash
if kwargs:
for key, value in kwargs.items():
self._run.set(key, value, strict=False)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
aim = import_aim()
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = {"action": "on_llm_start"}
resp.update(self.get_custom_callback_meta())
prompts_res = deepcopy(prompts)
self._run.track(
[aim.Text(prompt) for prompt in prompts_res],
name="on_llm_start",
context=resp,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
aim = import_aim()
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = {"action": "on_llm_end"}
resp.update(self.get_custom_callback_meta())
response_res = deepcopy(response)
generated = [
aim.Text(generation.text)
for generations in response_res.generations
for generation in generations
]
self._run.track(
generated,
name="on_llm_end",
context=resp,
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
aim = import_aim()
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = {"action": "on_chain_start"}
resp.update(self.get_custom_callback_meta())
inputs_res = deepcopy(inputs)
self._run.track(
aim.Text(inputs_res["input"]), name="on_chain_start", context=resp
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
aim = import_aim()
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = {"action": "on_chain_end"}
resp.update(self.get_custom_callback_meta())
outputs_res = deepcopy(outputs)
self._run.track(
aim.Text(outputs_res["output"]), name="on_chain_end", context=resp
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {"action": "on_tool_start"}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(input_str), name="on_tool_start", context=resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
aim = import_aim()
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = {"action": "on_tool_end"}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(output), name="on_tool_end", context=resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
aim = import_aim()
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = {"action": "on_agent_finish"}
resp.update(self.get_custom_callback_meta())
finish_res = deepcopy(finish)
text = "OUTPUT:\n{}\n\nLOG:\n{}".format(
finish_res.return_values["output"], finish_res.log
)
self._run.track(aim.Text(text), name="on_agent_finish", context=resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {
"action": "on_agent_action",
"tool": action.tool,
}
resp.update(self.get_custom_callback_meta())
action_res = deepcopy(action)
text = "TOOL INPUT:\n{}\n\nLOG:\n{}".format(
action_res.tool_input, action_res.log
)
self._run.track(aim.Text(text), name="on_agent_action", context=resp)
def flush_tracker(
self,
repo: Optional[str] = None,
experiment_name: Optional[str] = None,
system_tracking_interval: Optional[int] = 10,
log_system_params: bool = True,
langchain_asset: Any = None,
reset: bool = True,
finish: bool = False,
) -> None:
"""Flush the tracker and reset the session.
Args:
repo (:obj:`str`, optional): Aim repository path or Repo object to which
Run object is bound. If skipped, default Repo is used.
experiment_name (:obj:`str`, optional): Sets Run's `experiment` property.
'default' if not specified. Can be used later to query runs/sequences.
system_tracking_interval (:obj:`int`, optional): Sets the tracking interval
in seconds for system usage metrics (CPU, Memory, etc.). Set to `None`
to disable system metrics tracking.
log_system_params (:obj:`bool`, optional): Enable/Disable logging of system
params such as installed packages, git info, environment variables, etc.
langchain_asset: The langchain asset to save.
reset: Whether to reset the session.
finish: Whether to finish the run.
Returns:
None
"""
if langchain_asset:
try:
for key, value in langchain_asset.dict().items():
self._run.set(key, value, strict=False)
except Exception:
pass
if finish or reset:
self._run.close()
self.reset_callback_meta()
if reset:
self.__init__( # type: ignore
repo=repo if repo else self.repo,
experiment_name=experiment_name
if experiment_name
else self.experiment_name,
system_tracking_interval=system_tracking_interval
if system_tracking_interval
else self.system_tracking_interval,
log_system_params=log_system_params
if log_system_params
else self.log_system_params,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~comprehend_moderation~intent.py | import asyncio
import warnings
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationIntentionError,
)
class ComprehendIntent:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Intent",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _get_arn(self) -> str:
region_name = self.client.meta.region_name
service = "comprehend"
intent_endpoint = "document-classifier-endpoint/prompt-intent"
return f"arn:aws:{service}:{region_name}:aws:{intent_endpoint}"
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Check and validate the intent of the given prompt text.
Args:
comprehend_client: Comprehend client for intent classification
prompt_value (str): The input text to be checked for unintended intent
config (Dict[str, Any]): Configuration settings for intent checks
Raises:
ValueError: If unintended intent is found in the prompt text based
on the specified threshold.
Returns:
str: The input prompt_value.
Note:
This function checks the intent of the provided prompt text using
Comprehend's classify_document API and raises an error if unintended
intent is detected with a score above the specified threshold.
"""
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
BaseModerationActions,
)
threshold = config.get("threshold", 0.5) if config else 0.5
action = (
config.get("action", BaseModerationActions.STOP)
if config
else BaseModerationActions.STOP
)
intent_found = False
if action == BaseModerationActions.ALLOW:
warnings.warn(
"You have allowed content with Harmful content."
"Defaulting to STOP action..."
)
action = BaseModerationActions.STOP
endpoint_arn = self._get_arn()
response = self.client.classify_document(
Text=prompt_value, EndpointArn=endpoint_arn
)
if self.callback and self.callback.intent_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = response
for class_result in response["Classes"]:
if (
class_result["Score"] >= threshold
and class_result["Name"] == "UNDESIRED_PROMPT"
):
intent_found = True
break
if self.callback and self.callback.intent_callback:
if intent_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_intent(self.moderation_beacon, self.unique_id)
)
if intent_found:
raise ModerationIntentionError
return prompt_value
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~blackboard.py | import contextlib
import re
from pathlib import Path
from typing import Any, List, Optional, Tuple
from urllib.parse import unquote
from langchain.docstore.document import Document
from langchain.document_loaders.directory import DirectoryLoader
from langchain.document_loaders.pdf import PyPDFLoader
from langchain.document_loaders.web_base import WebBaseLoader
class BlackboardLoader(WebBaseLoader):
"""Load a `Blackboard` course.
This loader is not compatible with all Blackboard courses. It is only
compatible with courses that use the new Blackboard interface.
To use this loader, you must have the BbRouter cookie. You can get this
cookie by logging into the course and then copying the value of the
BbRouter cookie from the browser's developer tools.
Example:
.. code-block:: python
from langchain.document_loaders import BlackboardLoader
loader = BlackboardLoader(
blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1",
bbrouter="expires:12345...",
)
documents = loader.load()
""" # noqa: E501
def __init__(
self,
blackboard_course_url: str,
bbrouter: str,
load_all_recursively: bool = True,
basic_auth: Optional[Tuple[str, str]] = None,
cookies: Optional[dict] = None,
continue_on_failure: bool = False,
):
"""Initialize with blackboard course url.
The BbRouter cookie is required for most blackboard courses.
Args:
blackboard_course_url: Blackboard course url.
bbrouter: BbRouter cookie.
load_all_recursively: If True, load all documents recursively.
basic_auth: Basic auth credentials.
cookies: Cookies.
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
Raises:
ValueError: If blackboard course url is invalid.
"""
super().__init__(
web_paths=(blackboard_course_url), continue_on_failure=continue_on_failure
)
# Get base url
try:
self.base_url = blackboard_course_url.split("/webapps/blackboard")[0]
except IndexError:
raise IndexError(
"Invalid blackboard course url. "
"Please provide a url that starts with "
"https://<blackboard_url>/webapps/blackboard"
)
if basic_auth is not None:
self.session.auth = basic_auth
# Combine cookies
if cookies is None:
cookies = {}
cookies.update({"BbRouter": bbrouter})
self.session.cookies.update(cookies)
self.load_all_recursively = load_all_recursively
self.check_bs4()
def check_bs4(self) -> None:
"""Check if BeautifulSoup4 is installed.
Raises:
ImportError: If BeautifulSoup4 is not installed.
"""
try:
import bs4 # noqa: F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BlackboardLoader. "
"Please install it with `pip install beautifulsoup4`."
)
def load(self) -> List[Document]:
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f"Fetching documents from {url}")
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f"Fetching documents from {self.web_path}")
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
def _get_folder_path(self, soup: Any) -> str:
"""Get the folder path to save the Documents in.
Args:
soup: BeautifulSoup4 soup object.
Returns:
Folder path.
"""
# Get the course name
course_name = soup.find("span", {"id": "crumb_1"})
if course_name is None:
raise ValueError("No course name found.")
course_name = course_name.text.strip()
# Prepare the folder path
course_name_clean = (
unquote(course_name)
.replace(" ", "_")
.replace("/", "_")
.replace(":", "_")
.replace(",", "_")
.replace("?", "_")
.replace("'", "_")
.replace("!", "_")
.replace('"', "_")
)
# Get the folder path
folder_path = Path(".") / course_name_clean
return str(folder_path)
def _get_documents(self, soup: Any) -> List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
def _get_attachments(self, soup: Any) -> List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
# Get content list
content_list = soup.find("ul", {"class": "contentList"})
if content_list is None:
raise ValueError("No content list found.")
content_list: BeautifulSoup # type: ignore
# Get all attachments
attachments = []
for attachment in content_list.find_all("ul", {"class": "attachments"}):
attachment: Tag # type: ignore
for link in attachment.find_all("a"):
link: Tag # type: ignore
href = link.get("href")
# Only add if href is not None and does not start with #
if href is not None and not href.startswith("#"):
attachments.append(href)
return attachments
def _download_attachments(self, attachments: List[str]) -> None:
"""Download all attachments.
Args:
attachments: List of attachments.
"""
# Make sure the folder exists
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
# Download all attachments
for attachment in attachments:
self.download(attachment)
def _load_documents(self) -> List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
# Create the document loader
loader = DirectoryLoader(
path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore
)
# Load the documents
documents = loader.load()
# Return all documents
return documents
def _get_paths(self, soup: Any) -> List[str]:
"""Get all relative paths in the navbar."""
relative_paths = []
course_menu = soup.find("ul", {"class": "courseMenu"})
if course_menu is None:
raise ValueError("No course menu found.")
for link in course_menu.find_all("a"):
href = link.get("href")
if href is not None and href.startswith("/"):
relative_paths.append(href)
return relative_paths
def download(self, path: str) -> None:
"""Download a file from an url.
Args:
path: Path to the file.
"""
# Get the file content
response = self.session.get(self.base_url + path, allow_redirects=True)
# Get the filename
filename = self.parse_filename(response.url)
# Write the file to disk
with open(Path(self.folder_path) / filename, "wb") as f:
f.write(response.content)
def parse_filename(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == ".pdf":
return url_path.name
else:
return self._parse_filename_from_url(url)
def _parse_filename_from_url(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
Raises:
ValueError: If the filename could not be parsed.
"""
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
if filename_matches:
filename = filename_matches.group(1)
else:
raise ValueError(f"Could not parse filename from {url}")
if ".pdf" not in filename:
raise ValueError(f"Incorrect file type: {filename}")
filename = filename.split(".pdf")[0] + ".pdf"
filename = unquote(filename)
filename = filename.replace("%20", " ")
return filename
if __name__ == "__main__":
loader = BlackboardLoader(
"https://<YOUR BLACKBOARD URL"
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
"<YOUR BBROUTER COOKIE HERE>",
load_all_recursively=True,
)
documents = loader.load()
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~storage~upstash_redis.py | from typing import Any, Iterator, List, Optional, Sequence, Tuple, cast
from langchain.schema import BaseStore
class UpstashRedisStore(BaseStore[str, str]):
"""BaseStore implementation using Upstash Redis as the underlying store."""
def __init__(
self,
*,
client: Any = None,
url: Optional[str] = None,
token: Optional[str] = None,
ttl: Optional[int] = None,
namespace: Optional[str] = None,
) -> None:
"""Initialize the UpstashRedisStore with HTTP API.
Must provide either an Upstash Redis client or a url.
Args:
client: An Upstash Redis instance
url: UPSTASH_REDIS_REST_URL
token: UPSTASH_REDIS_REST_TOKEN
ttl: time to expire keys in seconds if provided,
if None keys will never expire
namespace: if provided, all keys will be prefixed with this namespace
"""
try:
from upstash_redis import Redis
except ImportError as e:
raise ImportError(
"UpstashRedisStore requires the upstash_redis library to be installed. "
"pip install upstash_redis"
) from e
if client and url:
raise ValueError(
"Either an Upstash Redis client or a url must be provided, not both."
)
if client:
if not isinstance(client, Redis):
raise TypeError(
f"Expected Upstash Redis client, got {type(client).__name__}."
)
_client = client
else:
if not url or not token:
raise ValueError(
"Either an Upstash Redis client or url and token must be provided."
)
_client = Redis(url=url, token=token)
self.client = _client
if not isinstance(ttl, int) and ttl is not None:
raise TypeError(f"Expected int or None, got {type(ttl)} instead.")
self.ttl = ttl
self.namespace = namespace
def _get_prefixed_key(self, key: str) -> str:
"""Get the key with the namespace prefix.
Args:
key (str): The original key.
Returns:
str: The key with the namespace prefix.
"""
delimiter = "/"
if self.namespace:
return f"{self.namespace}{delimiter}{key}"
return key
def mget(self, keys: Sequence[str]) -> List[Optional[str]]:
"""Get the values associated with the given keys."""
keys = [self._get_prefixed_key(key) for key in keys]
return cast(
List[Optional[str]],
self.client.mget(*keys),
)
def mset(self, key_value_pairs: Sequence[Tuple[str, str]]) -> None:
"""Set the given key-value pairs."""
for key, value in key_value_pairs:
self.client.set(self._get_prefixed_key(key), value, ex=self.ttl)
def mdelete(self, keys: Sequence[str]) -> None:
"""Delete the given keys."""
_keys = [self._get_prefixed_key(key) for key in keys]
self.client.delete(*_keys)
def yield_keys(self, *, prefix: Optional[str] = None) -> Iterator[str]:
"""Yield keys in the store."""
if prefix:
pattern = self._get_prefixed_key(prefix)
else:
pattern = self._get_prefixed_key("*")
cursor, keys = self.client.scan(0, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1 :]
yield relative_key
else:
yield key
while cursor != 0:
cursor, keys = self.client.scan(cursor, match=pattern)
for key in keys:
if self.namespace:
relative_key = key[len(self.namespace) + 1 :]
yield relative_key
else:
yield key
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~textgen.py | import json
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field
from langchain.schema.output import GenerationChunk
logger = logging.getLogger(__name__)
class TextGen(LLM):
"""text-generation-webui models.
To use, you should have the text-generation-webui installed, a model loaded,
and --api added as a command-line option.
Suggested installation, use one-click installer for your OS:
https://github.com/oobabooga/text-generation-webui#one-click-installers
Parameters below taken from text-generation-webui api example:
https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:8500")
"""
model_url: str
"""The full URL to the textgen webui including http[s]://host:port """
preset: Optional[str] = None
"""The preset to use in the textgen webui """
max_new_tokens: Optional[int] = 250
"""The maximum number of tokens to generate."""
do_sample: bool = Field(True, alias="do_sample")
"""Do sample"""
temperature: Optional[float] = 1.3
"""Primary factor to control randomness of outputs. 0 = deterministic
(only the most likely token is used). Higher value = more randomness."""
top_p: Optional[float] = 0.1
"""If not set to 1, select tokens with probabilities adding up to less than this
number. Higher value = higher range of possible random results."""
typical_p: Optional[float] = 1
"""If not set to 1, select only tokens that are at least this much more likely to
appear than random tokens, given the prior text."""
epsilon_cutoff: Optional[float] = 0 # In units of 1e-4
"""Epsilon cutoff"""
eta_cutoff: Optional[float] = 0 # In units of 1e-4
"""ETA cutoff"""
repetition_penalty: Optional[float] = 1.18
"""Exponential penalty factor for repeating prior tokens. 1 means no penalty,
higher value = less repetition, lower value = more repetition."""
top_k: Optional[float] = 40
"""Similar to top_p, but select instead only the top_k most likely tokens.
Higher value = higher range of possible random results."""
min_length: Optional[int] = 0
"""Minimum generation length in tokens."""
no_repeat_ngram_size: Optional[int] = 0
"""If not set to 0, specifies the length of token sets that are completely blocked
from repeating at all. Higher values = blocks larger phrases,
lower values = blocks words or letters from repeating.
Only 0 or high values are a good idea in most cases."""
num_beams: Optional[int] = 1
"""Number of beams"""
penalty_alpha: Optional[float] = 0
"""Penalty Alpha"""
length_penalty: Optional[float] = 1
"""Length Penalty"""
early_stopping: bool = Field(False, alias="early_stopping")
"""Early stopping"""
seed: int = Field(-1, alias="seed")
"""Seed (-1 for random)"""
add_bos_token: bool = Field(True, alias="add_bos_token")
"""Add the bos_token to the beginning of prompts.
Disabling this can make the replies more creative."""
truncation_length: Optional[int] = 2048
"""Truncate the prompt up to this length. The leftmost tokens are removed if
the prompt exceeds this length. Most models require this to be at most 2048."""
ban_eos_token: bool = Field(False, alias="ban_eos_token")
"""Ban the eos_token. Forces the model to never end the generation prematurely."""
skip_special_tokens: bool = Field(True, alias="skip_special_tokens")
"""Skip special tokens. Some specific models need this unset."""
stopping_strings: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling textgen."""
return {
"max_new_tokens": self.max_new_tokens,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_p": self.top_p,
"typical_p": self.typical_p,
"epsilon_cutoff": self.epsilon_cutoff,
"eta_cutoff": self.eta_cutoff,
"repetition_penalty": self.repetition_penalty,
"top_k": self.top_k,
"min_length": self.min_length,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
"num_beams": self.num_beams,
"penalty_alpha": self.penalty_alpha,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"seed": self.seed,
"add_bos_token": self.add_bos_token,
"truncation_length": self.truncation_length,
"ban_eos_token": self.ban_eos_token,
"skip_special_tokens": self.skip_special_tokens,
"stopping_strings": self.stopping_strings,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_url": self.model_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "textgen"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
if self.stopping_strings and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
params["stopping_strings"] = self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}")
result = ""
return result
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
else:
print(f"ERROR: response: {response}")
result = ""
return result
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_transformers~long_context_reorder.py | """Reorder documents"""
from typing import Any, List, Sequence
from langchain.pydantic_v1 import BaseModel
from langchain.schema import BaseDocumentTransformer, Document
def _litm_reordering(documents: List[Document]) -> List[Document]:
"""Los in the middle reorder: the most relevant will be at the
middle of the list and more relevant elements at beginning / end.
See: https://arxiv.org/abs//2307.03172"""
documents.reverse()
reordered_result = []
for i, value in enumerate(documents):
if i % 2 == 1:
reordered_result.append(value)
else:
reordered_result.insert(0, value)
return reordered_result
class LongContextReorder(BaseDocumentTransformer, BaseModel):
"""Lost in the middle:
Performance degrades when models must access relevant information
in the middle of long contexts.
See: https://arxiv.org/abs//2307.03172"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Reorders documents."""
return _litm_reordering(list(documents))
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~mlflow_callback.py | import os
import random
import string
import tempfile
import traceback
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
)
from langchain.schema import AgentAction, AgentFinish, LLMResult
from langchain.utils import get_from_dict_or_env
def import_mlflow() -> Any:
"""Import the mlflow python package and raise an error if it is not installed."""
try:
import mlflow
except ImportError:
raise ImportError(
"To use the mlflow callback manager you need to have the `mlflow` python "
"package installed. Please install it with `pip install mlflow>=2.3.0`"
)
return mlflow
def analyze_text(
text: str,
nlp: Any = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
textstat = import_textstat()
spacy = import_spacy()
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
# "text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update({"text_complexity_metrics": text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
doc = nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(str): The html string."""
formatted_prompt = prompt.replace("\n", "<br>")
formatted_generation = generation.replace("\n", "<br>")
return f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
class MlflowLogger:
"""Callback Handler that logs metrics and artifacts to mlflow server.
Parameters:
name (str): Name of the run.
experiment (str): Name of the experiment.
tags (dict): Tags to be attached for the run.
tracking_uri (str): MLflow tracking server uri.
This handler implements the helper functions to initialize,
log metrics and artifacts to the mlflow server.
"""
def __init__(self, **kwargs: Any):
self.mlflow = import_mlflow()
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
self.mlflow.set_tracking_uri("databricks")
self.mlf_expid = self.mlflow.tracking.fluent._get_experiment_id()
self.mlf_exp = self.mlflow.get_experiment(self.mlf_expid)
else:
tracking_uri = get_from_dict_or_env(
kwargs, "tracking_uri", "MLFLOW_TRACKING_URI", ""
)
self.mlflow.set_tracking_uri(tracking_uri)
# User can set other env variables described here
# > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server
experiment_name = get_from_dict_or_env(
kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME"
)
self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name)
if self.mlf_exp is not None:
self.mlf_expid = self.mlf_exp.experiment_id
else:
self.mlf_expid = self.mlflow.create_experiment(experiment_name)
self.start_run(kwargs["run_name"], kwargs["run_tags"])
def start_run(self, name: str, tags: Dict[str, str]) -> None:
"""To start a new run, auto generates the random suffix for name"""
if name.endswith("-%"):
rname = "".join(random.choices(string.ascii_uppercase + string.digits, k=7))
name = name.replace("%", rname)
self.run = self.mlflow.MlflowClient().create_run(
self.mlf_expid, run_name=name, tags=tags
)
def finish_run(self) -> None:
"""To finish the run."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.end_run()
def metric(self, key: str, value: float) -> None:
"""To log metric to mlflow server."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_metric(key, value)
def metrics(
self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0
) -> None:
"""To log all metrics in the input dict."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_metrics(data)
def jsonf(self, data: Dict[str, Any], filename: str) -> None:
"""To log the input data as json file artifact."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_dict(data, f"{filename}.json")
def table(self, name: str, dataframe) -> None: # type: ignore
"""To log the input pandas dataframe as a html table"""
self.html(dataframe.to_html(), f"table_{name}")
def html(self, html: str, filename: str) -> None:
"""To log the input html string as html file artifact."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_text(html, f"{filename}.html")
def text(self, text: str, filename: str) -> None:
"""To log the input text as text file artifact."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_text(text, f"{filename}.txt")
def artifact(self, path: str) -> None:
"""To upload the file from given path as artifact."""
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.log_artifact(path)
def langchain_artifact(self, chain: Any) -> None:
with self.mlflow.start_run(
run_id=self.run.info.run_id, experiment_id=self.mlf_expid
):
self.mlflow.langchain.log_model(chain, "langchain-model")
class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs metrics and artifacts to mlflow server.
Parameters:
name (str): Name of the run.
experiment (str): Name of the experiment.
tags (dict): Tags to be attached for the run.
tracking_uri (str): MLflow tracking server uri.
This handler will utilize the associated callback method called and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to mlflow server.
"""
def __init__(
self,
name: Optional[str] = "langchainrun-%",
experiment: Optional[str] = "langchain",
tags: Optional[Dict] = None,
tracking_uri: Optional[str] = None,
) -> None:
"""Initialize callback handler."""
import_pandas()
import_textstat()
import_mlflow()
spacy = import_spacy()
super().__init__()
self.name = name
self.experiment = experiment
self.tags = tags or {}
self.tracking_uri = tracking_uri
self.temp_dir = tempfile.TemporaryDirectory()
self.mlflg = MlflowLogger(
tracking_uri=self.tracking_uri,
experiment_name=self.experiment,
run_name=self.name,
run_tags=self.tags,
)
self.action_records: list = []
self.nlp = spacy.load("en_core_web_sm")
self.metrics = {
"step": 0,
"starts": 0,
"ends": 0,
"errors": 0,
"text_ctr": 0,
"chain_starts": 0,
"chain_ends": 0,
"llm_starts": 0,
"llm_ends": 0,
"llm_streams": 0,
"tool_starts": 0,
"tool_ends": 0,
"agent_ends": 0,
}
self.records: Dict[str, Any] = {
"on_llm_start_records": [],
"on_llm_token_records": [],
"on_llm_end_records": [],
"on_chain_start_records": [],
"on_chain_end_records": [],
"on_tool_start_records": [],
"on_tool_end_records": [],
"on_text_records": [],
"on_agent_finish_records": [],
"on_agent_action_records": [],
"action_records": [],
}
def _reset(self) -> None:
for k, v in self.metrics.items():
self.metrics[k] = 0
for k, v in self.records.items():
self.records[k] = []
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.metrics["step"] += 1
self.metrics["llm_starts"] += 1
self.metrics["starts"] += 1
llm_starts = self.metrics["llm_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp["prompt"] = prompt
self.records["on_llm_start_records"].append(prompt_resp)
self.records["action_records"].append(prompt_resp)
self.mlflg.jsonf(prompt_resp, f"llm_start_{llm_starts}_prompt_{idx}")
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.metrics["step"] += 1
self.metrics["llm_streams"] += 1
llm_streams = self.metrics["llm_streams"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_llm_token_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.metrics["step"] += 1
self.metrics["llm_ends"] += 1
self.metrics["ends"] += 1
llm_ends = self.metrics["llm_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(
analyze_text(
generation.text,
nlp=self.nlp,
)
)
complexity_metrics: Dict[str, float] = generation_resp.pop("text_complexity_metrics") # type: ignore # noqa: E501
self.mlflg.metrics(
complexity_metrics,
step=self.metrics["step"],
)
self.records["on_llm_end_records"].append(generation_resp)
self.records["action_records"].append(generation_resp)
self.mlflg.jsonf(resp, f"llm_end_{llm_ends}_generation_{idx}")
dependency_tree = generation_resp["dependency_tree"]
entities = generation_resp["entities"]
self.mlflg.html(dependency_tree, "dep-" + hash_string(generation.text))
self.mlflg.html(entities, "ent-" + hash_string(generation.text))
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.metrics["step"] += 1
self.metrics["chain_starts"] += 1
self.metrics["starts"] += 1
chain_starts = self.metrics["chain_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.records["on_chain_start_records"].append(input_resp)
self.records["action_records"].append(input_resp)
self.mlflg.jsonf(input_resp, f"chain_start_{chain_starts}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.metrics["step"] += 1
self.metrics["chain_ends"] += 1
self.metrics["ends"] += 1
chain_ends = self.metrics["chain_ends"]
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_chain_end_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"chain_end_{chain_ends}")
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_tool_start_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_start_{tool_starts}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1
tool_ends = self.metrics["tool_ends"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_tool_end_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_end_{tool_ends}")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.metrics["step"] += 1
self.metrics["errors"] += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.metrics["step"] += 1
self.metrics["text_ctr"] += 1
text_ctr = self.metrics["text_ctr"]
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_text_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"on_text_{text_ctr}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.metrics["step"] += 1
self.metrics["agent_ends"] += 1
self.metrics["ends"] += 1
agent_ends = self.metrics["agent_ends"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_agent_finish_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"agent_finish_{agent_ends}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.metrics["step"] += 1
self.metrics["tool_starts"] += 1
self.metrics["starts"] += 1
tool_starts = self.metrics["tool_starts"]
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics["step"])
self.records["on_agent_action_records"].append(resp)
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"agent_action_{tool_starts}")
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.records["on_llm_start_records"])
on_llm_end_records_df = pd.DataFrame(self.records["on_llm_end_records"])
llm_input_columns = ["step", "prompt"]
if "name" in on_llm_start_records_df.columns:
llm_input_columns.append("name")
elif "id" in on_llm_start_records_df.columns:
# id is llm class's full import path. For example:
# ["langchain", "llms", "openai", "AzureOpenAI"]
on_llm_start_records_df["name"] = on_llm_start_records_df["id"].apply(
lambda id_: id_[-1]
)
llm_input_columns.append("name")
llm_input_prompts_df = (
on_llm_start_records_df[llm_input_columns]
.dropna(axis=1)
.rename({"step": "prompt_step"}, axis=1)
)
complexity_metrics_columns = []
visualizations_columns = []
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
# "text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
visualizations_columns = ["dependency_tree", "entities"]
llm_outputs_df = (
on_llm_end_records_df[
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns
]
.dropna(axis=1)
.rename({"step": "output_step", "text": "output"}, axis=1)
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df["chat_html"] = session_analysis_df[
["prompt", "output"]
].apply(
lambda row: construct_html_from_prompt_and_generation(
row["prompt"], row["output"]
),
axis=1,
)
return session_analysis_df
def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None:
pd = import_pandas()
self.mlflg.table("action_records", pd.DataFrame(self.records["action_records"]))
session_analysis_df = self._create_session_analysis_df()
chat_html = session_analysis_df.pop("chat_html")
chat_html = chat_html.replace("\n", "", regex=True)
self.mlflg.table("session_analysis", pd.DataFrame(session_analysis_df))
self.mlflg.html("".join(chat_html.tolist()), "chat_html")
if langchain_asset:
# To avoid circular import error
# mlflow only supports LLMChain asset
if "langchain.chains.llm.LLMChain" in str(type(langchain_asset)):
self.mlflg.langchain_artifact(langchain_asset)
else:
langchain_asset_path = str(Path(self.temp_dir.name, "model.json"))
try:
langchain_asset.save(langchain_asset_path)
self.mlflg.artifact(langchain_asset_path)
except ValueError:
try:
langchain_asset.save_agent(langchain_asset_path)
self.mlflg.artifact(langchain_asset_path)
except AttributeError:
print("Could not save model.")
traceback.print_exc()
pass
except NotImplementedError:
print("Could not save model.")
traceback.print_exc()
pass
except NotImplementedError:
print("Could not save model.")
traceback.print_exc()
pass
if finish:
self.mlflg.finish_run()
self._reset()
| [
"\n",
"prompt_step"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~clearml_callback.py | from __future__ import annotations
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
hash_string,
import_pandas,
import_spacy,
import_textstat,
load_json,
)
from langchain.schema import AgentAction, AgentFinish, LLMResult
if TYPE_CHECKING:
import pandas as pd
def import_clearml() -> Any:
"""Import the clearml python package and raise an error if it is not installed."""
try:
import clearml # noqa: F401
except ImportError:
raise ImportError(
"To use the clearml callback manager you need to have the `clearml` python "
"package installed. Please install it with `pip install clearml`"
)
return clearml
class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to ClearML.
Parameters:
job_type (str): The type of clearml task such as "inference", "testing" or "qc"
project_name (str): The clearml project name
tags (list): Tags to add to the task
task_name (str): Name of the clearml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to ClearML
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to the ClearML console.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
project_name: Optional[str] = "langchain_callback_demo",
tags: Optional[Sequence] = None,
task_name: Optional[str] = None,
visualize: bool = False,
complexity_metrics: bool = False,
stream_logs: bool = False,
) -> None:
"""Initialize callback handler."""
clearml = import_clearml()
spacy = import_spacy()
super().__init__()
self.task_type = task_type
self.project_name = project_name
self.tags = tags
self.task_name = task_name
self.visualize = visualize
self.complexity_metrics = complexity_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
# Check if ClearML task already exists (e.g. in pipeline)
if clearml.Task.current_task():
self.task = clearml.Task.current_task()
else:
self.task = clearml.Task.init( # type: ignore
task_type=self.task_type,
project_name=self.project_name,
tags=self.tags,
task_name=self.task_name,
output_uri=True,
)
self.logger = self.task.get_logger()
warning = (
"The clearml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/allegroai/clearml/issues with the tag `langchain`."
)
self.logger.report_text(warning, level=30, print_console=True)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
self.visualize = visualize
self.nlp = spacy.load("en_core_web_sm")
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(resp)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self.logger.report_text(prompt_resp)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(self.analyze_text(generation.text))
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.logger.report_text(generation_resp)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs.get("input", inputs.get("human_input"))
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp["input"] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
else:
raise ValueError("Unexpected data format provided!")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_chain_end",
"outputs": outputs.get("output", outputs.get("text")),
}
)
resp.update(self.get_custom_callback_meta())
self.on_chain_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.on_tool_end_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_finish_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.on_agent_action_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
def analyze_text(self, text: str) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
textstat = import_textstat()
spacy = import_spacy()
if self.complexity_metrics:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(
text
),
"dale_chall_readability_score": textstat.dale_chall_readability_score(
text
),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update(text_complexity_metrics)
if self.visualize and self.nlp and self.temp_dir.name is not None:
doc = self.nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
dep_output_path = Path(
self.temp_dir.name, hash_string(f"dep-{text}") + ".html"
)
dep_output_path.open("w", encoding="utf-8").write(dep_out)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
ent_output_path = Path(
self.temp_dir.name, hash_string(f"ent-{text}") + ".html"
)
ent_output_path.open("w", encoding="utf-8").write(ent_out)
self.logger.report_media(
"Dependencies Plot", text, local_path=dep_output_path
)
self.logger.report_media("Entities Plot", text, local_path=ent_output_path)
return resp
@staticmethod
def _build_llm_df(
base_df: pd.DataFrame, base_df_fields: Sequence, rename_map: Mapping
) -> pd.DataFrame:
base_df_fields = [field for field in base_df_fields if field in base_df]
rename_map = {
map_entry_k: map_entry_v
for map_entry_k, map_entry_v in rename_map.items()
if map_entry_k in base_df_fields
}
llm_df = base_df[base_df_fields].dropna(axis=1)
if rename_map:
llm_df = llm_df.rename(rename_map, axis=1)
return llm_df
def _create_session_analysis_df(self) -> Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = ClearMLCallbackHandler._build_llm_df(
base_df=on_llm_end_records_df,
base_df_fields=["step", "prompts"]
+ (["name"] if "name" in on_llm_end_records_df else ["id"]),
rename_map={"step": "prompt_step"},
)
complexity_metrics_columns = []
visualizations_columns: List = []
if self.complexity_metrics:
complexity_metrics_columns = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog_index",
"coleman_liau_index",
"automated_readability_index",
"dale_chall_readability_score",
"difficult_words",
"linsear_write_formula",
"gunning_fog",
"text_standard",
"fernandez_huerta",
"szigriszt_pazos",
"gutierrez_polini",
"crawford",
"gulpease_index",
"osman",
]
llm_outputs_df = ClearMLCallbackHandler._build_llm_df(
on_llm_end_records_df,
[
"step",
"text",
"token_usage_total_tokens",
"token_usage_prompt_tokens",
"token_usage_completion_tokens",
]
+ complexity_metrics_columns
+ visualizations_columns,
{"step": "output_step", "text": "output"},
)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
return session_analysis_df
def flush_tracker(
self,
name: Optional[str] = None,
langchain_asset: Any = None,
finish: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
pd = import_pandas()
clearml = import_clearml()
# Log the action records
self.logger.report_table(
"Action Records", name, table_plot=pd.DataFrame(self.action_records)
)
# Session analysis
session_analysis_df = self._create_session_analysis_df()
self.logger.report_table(
"Session Analysis", name, table_plot=session_analysis_df
)
if self.stream_logs:
self.logger.report_text(
{
"action_records": pd.DataFrame(self.action_records),
"session_analysis": session_analysis_df,
}
)
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, "model.json")
try:
langchain_asset.save(langchain_asset_path)
# Create output model and connect it to the task
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
output_model = clearml.OutputModel(
task=self.task, config_text=load_json(langchain_asset_path)
)
output_model.update_weights(
weights_filename=str(langchain_asset_path),
auto_delete_file=False,
target_filename=name,
)
except NotImplementedError as e:
print("Could not save model.")
print(repr(e))
pass
# Cleanup after adding everything to ClearML
self.task.flush(wait_for_uploads=True)
self.temp_dir.cleanup()
self.temp_dir = tempfile.TemporaryDirectory()
self.reset_callback_meta()
if finish:
self.task.close()
| [
"name",
"prompt_step"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~arize_callback.py | from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import import_pandas
from langchain.schema import AgentAction, AgentFinish, LLMResult
class ArizeCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs to Arize."""
def __init__(
self,
model_id: Optional[str] = None,
model_version: Optional[str] = None,
SPACE_KEY: Optional[str] = None,
API_KEY: Optional[str] = None,
) -> None:
"""Initialize callback handler."""
super().__init__()
self.model_id = model_id
self.model_version = model_version
self.space_key = SPACE_KEY
self.api_key = API_KEY
self.prompt_records: List[str] = []
self.response_records: List[str] = []
self.prediction_ids: List[str] = []
self.pred_timestamps: List[int] = []
self.response_embeddings: List[float] = []
self.prompt_embeddings: List[float] = []
self.prompt_tokens = 0
self.completion_tokens = 0
self.total_tokens = 0
self.step = 0
from arize.pandas.embeddings import EmbeddingGenerator, UseCases
from arize.pandas.logger import Client
self.generator = EmbeddingGenerator.from_use_case(
use_case=UseCases.NLP.SEQUENCE_CLASSIFICATION,
model_name="distilbert-base-uncased",
tokenizer_max_length=512,
batch_size=256,
)
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY":
raise ValueError("❌ CHANGE SPACE AND API KEYS")
else:
print("✅ Arize client setup done! Now you can start using Arize!")
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
for prompt in prompts:
self.prompt_records.append(prompt.replace("\n", ""))
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
pd = import_pandas()
from arize.utils.types import (
EmbeddingColumnNames,
Environments,
ModelTypes,
Schema,
)
# Safe check if 'llm_output' and 'token_usage' exist
if response.llm_output and "token_usage" in response.llm_output:
self.prompt_tokens = response.llm_output["token_usage"].get(
"prompt_tokens", 0
)
self.total_tokens = response.llm_output["token_usage"].get(
"total_tokens", 0
)
self.completion_tokens = response.llm_output["token_usage"].get(
"completion_tokens", 0
)
else:
self.prompt_tokens = (
self.total_tokens
) = self.completion_tokens = 0 # assign default value
for generations in response.generations:
for generation in generations:
prompt = self.prompt_records[self.step]
self.step = self.step + 1
prompt_embedding = pd.Series(
self.generator.generate_embeddings(
text_col=pd.Series(prompt.replace("\n", " "))
).reset_index(drop=True)
)
# Assigning text to response_text instead of response
response_text = generation.text.replace("\n", " ")
response_embedding = pd.Series(
self.generator.generate_embeddings(
text_col=pd.Series(generation.text.replace("\n", " "))
).reset_index(drop=True)
)
pred_timestamp = datetime.now().timestamp()
# Define the columns and data
columns = [
"prediction_ts",
"response",
"prompt",
"response_vector",
"prompt_vector",
"prompt_token",
"completion_token",
"total_token",
]
data = [
[
pred_timestamp,
response_text,
prompt,
response_embedding[0],
prompt_embedding[0],
self.prompt_tokens,
self.total_tokens,
self.completion_tokens,
]
]
# Create the DataFrame
df = pd.DataFrame(data, columns=columns)
# Declare prompt and response columns
prompt_columns = EmbeddingColumnNames(
vector_column_name="prompt_vector", data_column_name="prompt"
)
response_columns = EmbeddingColumnNames(
vector_column_name="response_vector", data_column_name="response"
)
schema = Schema(
timestamp_column_name="prediction_ts",
tag_column_names=[
"prompt_token",
"completion_token",
"total_token",
],
prompt_column_names=prompt_columns,
response_column_names=response_columns,
)
response_from_arize = self.arize_client.log(
dataframe=df,
schema=schema,
model_id=self.model_id,
model_version=self.model_version,
model_type=ModelTypes.GENERATIVE_LLM,
environment=Environments.PRODUCTION,
)
if response_from_arize.status_code == 200:
print("✅ Successfully logged data to Arize!")
else:
print(f'❌ Logging failed "{response_from_arize.text}"')
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
pass
def on_text(self, text: str, **kwargs: Any) -> None:
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
pass
| [
"prompt_vector",
"\n",
" "
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~gradient_ai.py | import asyncio
import logging
import os
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Dict, List, Optional, Tuple
import aiohttp
import numpy as np
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
__all__ = ["GradientEmbeddings"]
class GradientEmbeddings(BaseModel, Embeddings):
"""Gradient.ai Embedding models.
GradientLLM is a class to interact with Embedding Models on gradient.ai
To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your
API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace,
or alternatively provide them as keywords to the constructor of this class.
Example:
.. code-block:: python
from langchain.embeddings import GradientEmbeddings
GradientEmbeddings(
model="bge-large",
gradient_workspace_id="12345614fc0_workspace",
gradient_access_token="gradientai-access_token",
)
"""
model: str
"Underlying gradient.ai model id."
gradient_workspace_id: Optional[str] = None
"Underlying gradient.ai workspace_id."
gradient_access_token: Optional[str] = None
"""gradient.ai API Token, which can be generated by going to
https://auth.gradient.ai/select-workspace
and selecting "Access tokens" under the profile drop-down.
"""
gradient_api_url: str = "https://api.gradient.ai/api"
"""Endpoint URL to use."""
client: Any = None #: :meta private:
"""Gradient client."""
# LLM call kwargs
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["gradient_access_token"] = get_from_dict_or_env(
values, "gradient_access_token", "GRADIENT_ACCESS_TOKEN"
)
values["gradient_workspace_id"] = get_from_dict_or_env(
values, "gradient_workspace_id", "GRADIENT_WORKSPACE_ID"
)
values["gradient_api_url"] = get_from_dict_or_env(
values, "gradient_api_url", "GRADIENT_API_URL"
)
values["client"] = TinyAsyncGradientEmbeddingClient(
access_token=values["gradient_access_token"],
workspace_id=values["gradient_workspace_id"],
host=values["gradient_api_url"],
)
try:
import gradientai # noqa
except ImportError:
logging.warning(
"DeprecationWarning: `GradientEmbeddings` will use "
"`pip install gradientai` in future releases of langchain."
)
except Exception:
pass
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model,
texts=texts,
)
return embeddings
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.client.aembed(
model=self.model,
texts=texts,
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Gradient's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Gradient's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
class TinyAsyncGradientEmbeddingClient: #: :meta private:
"""A helper tool to embed Gradient. Not part of Langchain's or Gradients stable API,
direct use discouraged.
To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your
API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace,
or alternatively provide them as keywords to the constructor of this class.
Example:
.. code-block:: python
mini_client = TinyAsyncGradientEmbeddingClient(
workspace_id="12345614fc0_workspace",
access_token="gradientai-access_token",
)
embeds = mini_client.embed(
model="bge-large",
text=["doc1", "doc2"]
)
# or
embeds = await mini_client.aembed(
model="bge-large",
text=["doc1", "doc2"]
)
"""
def __init__(
self,
access_token: Optional[str] = None,
workspace_id: Optional[str] = None,
host: str = "https://api.gradient.ai/api",
aiosession: Optional[aiohttp.ClientSession] = None,
) -> None:
self.access_token = access_token or os.environ.get(
"GRADIENT_ACCESS_TOKEN", None
)
self.workspace_id = workspace_id or os.environ.get(
"GRADIENT_WORKSPACE_ID", None
)
self.host = host
self.aiosession = aiosession
if self.access_token is None or len(self.access_token) < 10:
raise ValueError(
"env variable `GRADIENT_ACCESS_TOKEN` or "
" param `access_token` must be set "
)
if self.workspace_id is None or len(self.workspace_id) < 3:
raise ValueError(
"env variable `GRADIENT_WORKSPACE_ID` or "
" param `workspace_id` must be set"
)
if self.host is None or len(self.host) < 3:
raise ValueError(" param `host` must be set to a valid url")
self._batch_size = 128
@staticmethod
def _permute(
texts: List[str], sorter: Callable = len
) -> Tuple[List[str], Callable]:
"""Sort texts in ascending order, and
delivers a lambda expr, which can sort a same length list
https://github.com/UKPLab/sentence-transformers/blob/
c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156
Args:
texts (List[str]): _description_
sorter (Callable, optional): _description_. Defaults to len.
Returns:
Tuple[List[str], Callable]: _description_
Example:
```
texts = ["one","three","four"]
perm_texts, undo = self._permute(texts)
texts == undo(perm_texts)
```
"""
if len(texts) == 1:
# special case query
return texts, lambda t: t
length_sorted_idx = np.argsort([-sorter(sen) for sen in texts])
texts_sorted = [texts[idx] for idx in length_sorted_idx]
return texts_sorted, lambda unsorted_embeddings: [ # noqa E731
unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx)
]
def _batch(self, texts: List[str]) -> List[List[str]]:
"""
splits Lists of text parts into batches of size max `self._batch_size`
When encoding vector database,
Args:
texts (List[str]): List of sentences
self._batch_size (int, optional): max batch size of one request.
Returns:
List[List[str]]: Batches of List of sentences
"""
if len(texts) == 1:
# special case query
return [texts]
batches = []
for start_index in range(0, len(texts), self._batch_size):
batches.append(texts[start_index : start_index + self._batch_size])
return batches
@staticmethod
def _unbatch(batch_of_texts: List[List[Any]]) -> List[Any]:
if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1:
# special case query
return batch_of_texts[0]
texts = []
for sublist in batch_of_texts:
texts.extend(sublist)
return texts
def _kwargs_post_request(self, model: str, texts: List[str]) -> Dict[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
model (str): _description_
texts (List[str]): _description_
Returns:
Dict[str, Collection[str]]: _description_
"""
return dict(
url=f"{self.host}/embeddings/{model}",
headers={
"authorization": f"Bearer {self.access_token}",
"x-gradient-workspace-id": f"{self.workspace_id}",
"accept": "application/json",
"content-type": "application/json",
},
json=dict(
inputs=[{"input": i} for i in texts],
),
)
def _sync_request_embed(
self, model: str, batch_texts: List[str]
) -> List[List[float]]:
response = requests.post(
**self._kwargs_post_request(model=model, texts=batch_texts)
)
if response.status_code != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
return [e["embedding"] for e in response.json()["embeddings"]]
def embed(self, model: str, texts: List[str]) -> List[List[float]]:
"""call the embedding of model
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
# Request
map_args = (
self._sync_request_embed,
[model] * len(perm_texts_batched),
perm_texts_batched,
)
if len(perm_texts_batched) == 1:
embeddings_batch_perm = list(map(*map_args))
else:
with ThreadPoolExecutor(32) as p:
embeddings_batch_perm = list(p.map(*map_args))
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
async def _async_request(
self, session: aiohttp.ClientSession, kwargs: Dict[str, Any]
) -> List[List[float]]:
async with session.post(**kwargs) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
embedding = (await response.json())["embeddings"]
return [e["embedding"] for e in embedding]
async def aembed(self, model: str, texts: List[str]) -> List[List[float]]:
"""call the embedding of model, async method
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
# Request
if self.aiosession is None:
self.aiosession = aiohttp.ClientSession(
trust_env=True, connector=aiohttp.TCPConnector(limit=32)
)
async with self.aiosession as session:
embeddings_batch_perm = await asyncio.gather(
*[
self._async_request(
session=session,
**self._kwargs_post_request(model=model, texts=t),
)
for t in perm_texts_batched
]
)
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
| [
"application/json"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~agents~test_mrkl_output_parser.py | import pytest
from langchain.agents.mrkl.output_parser import (
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
MRKLOutputParser,
)
from langchain.schema import AgentAction, AgentFinish, OutputParserException
mrkl_output_parser = MRKLOutputParser()
def test_valid_action_and_action_input_parse() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar"""
agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore
assert agent_action.tool == "foo"
assert agent_action.tool_input == "bar"
def test_valid_final_answer_parse() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta """
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_missing_action() -> None:
llm_output = """I can use the `foo` tool to achieve the goal."""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation == MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE
)
def test_missing_action_input() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action: foo"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
exception_info.value.observation
== MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE
)
def test_final_answer_before_parsable_action() -> None:
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore
assert (
agent_finish.return_values.get("output")
== "The best pizza to eat is margaritta"
)
def test_final_answer_after_parsable_action() -> None:
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)
assert (
"Parsing LLM output produced both a final answer and a parse-able action"
in exception_info.value.args[0]
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~runnable~fallbacks.py | import asyncio
from typing import (
TYPE_CHECKING,
Any,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain.load.dump import dumpd
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable.base import Runnable, RunnableSerializable
from langchain.schema.runnable.config import (
RunnableConfig,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
patch_config,
)
from langchain.schema.runnable.utils import (
ConfigurableFieldSpec,
Input,
Output,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
"""
A Runnable that can fallback to other Runnables if it fails.
"""
runnable: Runnable[Input, Output]
fallbacks: Sequence[Runnable[Input, Output]]
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,)
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Type[Input]:
return self.runnable.InputType
@property
def OutputType(self) -> Type[Output]:
return self.runnable.OutputType
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self.runnable.get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self.runnable.get_output_schema(config)
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in [self.runnable, *self.fallbacks]
for spec in step.config_specs
)
def config_schema(self, *, include: Sequence[str]) -> Type[BaseModel]:
return self.runnable.config_schema(include=include)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def runnables(self) -> Iterator[Runnable[Input, Output]]:
yield self.runnable
yield from self.fallbacks
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = runnable.invoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise e
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
run_manager.on_chain_error(first_error)
raise first_error
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
first_error = None
for runnable in self.runnables:
try:
output = await runnable.ainvoke(
input,
patch_config(config, callbacks=run_manager.get_child()),
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await run_manager.on_chain_error(e)
raise e
else:
await run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await run_manager.on_chain_error(first_error)
raise first_error
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import CallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
input if isinstance(input, dict) else {"input": input},
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
]
first_error = None
for runnable in self.runnables:
try:
outputs = runnable.batch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
raise e
else:
for rm, output in zip(run_managers, outputs):
rm.on_chain_end(output)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
for rm in run_managers:
rm.on_chain_error(first_error)
raise first_error
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain.callbacks.manager import AsyncCallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
)
)
first_error = None
for runnable in self.runnables:
try:
outputs = await runnable.abatch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(config, callbacks=rm.get_child())
for rm, config in zip(run_managers, configs)
],
return_exceptions=return_exceptions,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers))
else:
await asyncio.gather(
*(
rm.on_chain_end(output)
for rm, output in zip(run_managers, outputs)
)
)
return outputs
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
await asyncio.gather(*(rm.on_chain_error(first_error) for rm in run_managers))
raise first_error
| [] |
2024-01-10 | GuralTOO/File_store_server | upload.py | from dotenv import load_dotenv
import openai
import WeaviateClient
import os
import SupabaseClient
from load_pdf import load_pdf_with_textract as load_pdf
class_name = "File_store_ver2"
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_context_for_authors(properties=[""], k=3, path=""):
properties.append("path")
properties.append("page_number")
pathFilter = {"path": "path", "operator": "Equal", "valueString": path}
page_filter = {"path": "page_number",
"operator": "Equal", "valueString": "1"}
client = WeaviateClient.get_client()
text_query = "Can you return the names of the authors of the paper?"
results = (
client.query
.get(class_name=class_name, properties=properties)
.with_where({
"operator": "And",
"operands": [pathFilter, page_filter]
})
.with_near_text({"concepts": text_query})
.with_limit(k)
.do()
)
search_result = ""
for i in range(len(results["data"]["Get"][class_name])):
search_result += results["data"]["Get"][class_name][i][properties[0]] + ".\n"
return search_result
def get_context_for_methods(properties=[""], k=3, path=""):
properties.append("path")
pathFilter = {"path": "path", "operator": "Equal", "valueString": path}
client = WeaviateClient.get_client()
text_query = "Can you provide the research methods, procedures, experimental protocols, practical applications, and detailed methodology described in the paper?"
results = (
client.query
.get(class_name=class_name, properties=properties)
.with_where(pathFilter)
.with_near_text({"concepts": text_query})
.with_limit(k)
.do()
)
search_result = ""
for i in range(len(results["data"]["Get"][class_name])):
search_result += results["data"]["Get"][class_name][i][properties[0]] + ".\n"
return search_result
def get_context_for_key_results(properties=[""], k=3, path=""):
print("path in results: ", path)
properties.append("path")
pathFilter = {"path": "path", "operator": "Equal", "valueString": path}
client = WeaviateClient.get_client()
text_query = "Can you provide the main results, discussion, outcomes, conclusions, and findings described in the paper?"
results = (
client.query
.get(class_name=class_name, properties=properties)
.with_where(pathFilter)
.with_near_text({"concepts": text_query})
.with_limit(k)
.do()
)
print("path after filter: ", path)
print()
search_result = ""
for i in range(len(results["data"]["Get"][class_name])):
search_result += results["data"]["Get"][class_name][i][properties[0]] + ".\n"
for p in properties:
print(results["data"]["Get"][class_name][i][p] + ".\n")
return search_result
def analyze_research(path=""):
'''
questions = ["Based on the following excerpts from this research paper, return the list of the authors of this research paper",
"Based on the following excerpts from this research paper, return the list of the research methods used in this research paper.",
"Based on the following excerpts from this research paper, return the list of the key results presented in this research paper."]
'''
questions = ["Extract the author names based on the given context only. Do not make any assumptions.",
"Extract the main methods described in the given context only. Restrict the output to a list of 10 or less. Do not make any assumptions.",
"Extract the key results described in the given context only. Restrict the output to a list of 10 or less. Do not make any assumptions."] # "extract" works better than "return a list of"
contexts = [get_context_for_authors(properties=["text"], k=3, path=path), get_context_for_methods(
properties=["text"], k=3, path=path), get_context_for_key_results(properties=["text"], k=3, path=path)]
for i in range(len(questions)):
question = questions[i]
context = contexts[i]
response = openai.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=f"Question: \"\"\"{question}\"\"\"\nContext: \"\"\"{context}\"\"\"\n",
max_tokens=3000,
temperature=0.2, # reduced the temperature
)
if i == 0:
authors = response.choices[0].text
elif i == 1:
methods = response.choices[0].text
elif i == 2:
key_results = response.choices[0].text
# make a json object with the following properties: authors, methods, key_results
# return the json object
return {"authors": authors, "methods": methods, "key_results": key_results}
async def upload_file(document_type, path, url, contentType="research"):
# Load the file to Weaviate
result = await load_pdf(class_name=class_name, properties={
"type": contentType, "path": path, "url": url})
contentType = "research" #TODO: get logic for different types of files (overwriting as a fix for now)
print("uploaded file with path: ", path, " and content type: ", contentType)
# if contentType is not "research" then we don't need to extract the authors, methods, and key results
if contentType != "research":
return
analysis = analyze_research(path=path)
print("analysis: ", analysis)
# instead of returning the metadata, save it directly to the db
SupabaseClient.update_metadata_in_database(path=path, metadata=analysis)
print("updated metadata in database for path: ", path)
return
| [
"Question: \"\"\"PLACEHOLDER\"\"\"\nContext: \"\"\"PLACEHOLDER\"\"\"\n"
] |
2024-01-10 | GuralTOO/File_store_server | load_pdf.py | import boto3
import requests
import time
from pdf2image import convert_from_path
from concurrent.futures import ThreadPoolExecutor
import asyncio
import io
from WeaviateClient import add_item, add_batch_items
import openai
import tracemalloc
tracemalloc.start()
import functools
def get_chunks_with_overlap(text, chunk_size=1000, overlap_size=150):
"""Splits the text into chunks of specified size with overlap."""
chunks = []
index = 0
while index < len(text):
# If we are not at the start, move back to create overlap
if index > 0 and (index - overlap_size) > 0:
index -= overlap_size
# Find the end of the chunk
end = min(index + chunk_size, len(text))
# Ensure the chunk ends on a space (if not the end of the text)
if end < len(text):
while end > index and text[end] != ' ':
end -= 1
# Add the chunk
chunks.append(text[index:end].strip())
# Move the index forward
index = end
return chunks
# Function to process a single page
async def process_page(executor, client, image):
loop = asyncio.get_event_loop()
with io.BytesIO() as image_bytes:
image.save(image_bytes, format='JPEG')
image_bytes_val = image_bytes.getvalue()
# Prepare the function call with functools.partial
func = functools.partial(client.detect_document_text, Document={'Bytes': image_bytes_val})
# Call Textract using run_in_executor
response = await loop.run_in_executor(executor, func)
text = '\n'.join([block['Text'] for block in response['Blocks'] if block['BlockType'] == 'LINE'])
return text
def get_title(text, model="gpt-3.5-turbo"):
try:
response = openai.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": f"Extract the title of the paper from the given context. \
Note that it may be in multiple lines. Do not make any assumptions.\nContext:\"\"\"{text}\"\"\""}],
max_tokens=1000,
temperature=0.0)
extracted_title = response.choices[0].message.content
return extracted_title
except:
try:
# assuming exception is due to too many tokens
# chunk the text and choose first 3000 tokens
text = get_chunks_with_overlap(text=text, chunk_size=3000,overlap_size=0)[0]
response = openai.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": f"Extract the title of the paper from the given context. \
Note that it may be in multiple lines. Do not make any assumptions.\nContext:\"\"\"{text}\"\"\""}],
max_tokens=1000,
temperature=0.0)
extracted_title = response.choices[0].message.content
return extracted_title
except Exception as e:
print("Error finding the title: ", e)
return "" # currently not returning any title on exception
async def load_pdf_with_textract(class_name, properties=None):
start_time = time.time()
url = properties["url"]
response = requests.get(url)
with open('document.pdf', 'wb') as file:
file.write(response.content)
# Convert PDF to images
images = convert_from_path('document.pdf')
# Setup AWS Textract client
session = boto3.Session()
client = session.client('textract')
# Setup thread pool for running I/O tasks in parallel within a context manager
with ThreadPoolExecutor(max_workers=len(images)) as executor:
tasks = [process_page(executor, client, image) for image in images]
results = await asyncio.gather(*tasks)
# time to get the text from the images
images_time = time.time()
print("time to get the text from the images: " + str(images_time - start_time))
combined_pages = []
overlap_size = 150
for i in range(len(results)):
combined_text = results[i]
if i < len(results) - 1: # if not the last page, append the beginning of the next page
combined_text += "\n" + results[i + 1][:overlap_size] # overlap_size from get_chunks_with_overlap
combined_pages.append((i + 1, combined_text)) # Store page number with combined text
batch_items = [] # Initialize a list to collect batch items
for page_number, text in combined_pages:
# extract title from the first page
if page_number == 1:
extracted_title = get_title(text)
text_chunks = get_chunks_with_overlap(text)
for chunk in text_chunks:
modified_properties = properties.copy()
modified_properties["page_number"] = str(page_number)
modified_properties["text"] = chunk
modified_properties["title"] = extracted_title
batch_items.append(modified_properties) # Add item to the batch list
# When batch size is reached, send the batch
if len(batch_items) >= 100:
add_batch_items(class_name, batch_items)
batch_items = [] # Reset the batch list after sending
if batch_items:
add_batch_items(class_name, batch_items) # Send the remaining batch
end_time = time.time()
print("time elapsed: " + str(end_time - start_time))
print("uploaded file to class: " + class_name + " with path: " + properties["path"] + ", title: " +
modified_properties["title"] + ", and url: " + properties["url"])
return "Success"
# import pypdf
# from pdf2image import convert_from_path
# import pytesseract
# import json
# import os
# import requests
# import datetime
# from utils import utils
# import tempfile
# import time
# from WeaviateClient import add_item
# import concurrent.futures
# def load_pdf(class_name, properties=None):
# print(properties)
# start_time = time.time()
# try:
# url = properties["url"]
# print("loading pdf: " + url + "...")
# # load file from a given url
# response = requests.get(url)
# print(response)
# response.raise_for_status()
# with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
# tmp_file.write(response.content)
# with open(tmp_file.name, "rb") as pdf_file:
# pdf_reader = pypdf.PdfReader(pdf_file)
# print("file loaded")
# num_pages = len(pdf_reader.pages)
# pages_text = []
# pageCounter = 0
# print("file has " + str(num_pages) + " pages")
# def process_page(page):
# print("reading page: " + str(page + 1) + "...")
# local_path = os.path.abspath(tmp_file.name)
# images = convert_from_path(
# local_path, first_page=page + 1, last_page=page + 1)
# # if there are images in the page, use OCR to extract text
# if images:
# page_image = images[0]
# page_text = pytesseract.image_to_string(page_image)
# page_text = ""
# # pages_text.append(page_text)
# # if there are no images in the page, use PyPDF2 to extract text
# else:
# print("no images found in page " + str(page + 1) + "...")
# page_obj = pdf_reader.getPage(page)
# page_text = page_obj.extractText()
# pages_text.append(page_text)
# # print("page " + str(page + 1) + ": " + page_text)
# # split text into into chunks of 1000 characters when the word ends
# text_chunks = utils.get_chunks(page_text)
# for chunk in text_chunks:
# modified_properties = properties.copy()
# modified_properties["page_number"] = str(page)
# modified_properties["text"] = chunk
# # add_item(class_name=class_name, item=modified_properties)
# # parallelize the process_page function
# # with concurrent.futures.ThreadPoolExecutor() as executor:
# # executor.map(process_page, range(num_pages))
# for page in range(num_pages):
# process_page(page)
# pageCounter += num_pages
# # end timer
# end_time = time.time()
# print("time elapsed: " + str(end_time - start_time))
# return "Success"
# except Exception as e:
# print("Error loading pdf:", e)
# return "Failure"
# async def main():
# url_8page = "https://emoimoycgytvcixzgjiy.supabase.co/storage/v1/object/sign/documents/23c88506-31e5-43c7-911c-d6df61fbbf7b/curve-stablecoin.pdf?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJkb2N1bWVudHMvMjNjODg1MDYtMzFlNS00M2M3LTkxMWMtZDZkZjYxZmJiZjdiL2N1cnZlLXN0YWJsZWNvaW4ucGRmIiwiaWF0IjoxNzAzOTA4MzA3LCJleHAiOjE3MDQ1MTMxMDd9.PVXyAmoZqWlrSt2-v5ma6P9oZrFlm-7vqTSytAAkcNo&t=2023-12-30T03%3A51%3A47.332Z"
# url_29page = "https://emoimoycgytvcixzgjiy.supabase.co/storage/v1/object/sign/documents/06ca3fba-93e4-493d-9c22-5c5ddc15d352/G3-2023-404312_Merged_PDF.pdf?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJkb2N1bWVudHMvMDZjYTNmYmEtOTNlNC00OTNkLTljMjItNWM1ZGRjMTVkMzUyL0czLTIwMjMtNDA0MzEyX01lcmdlZF9QREYucGRmIiwiaWF0IjoxNzAzOTA4Mjc3LCJleHAiOjE3MDQ1MTMwNzd9.3CJFZeo6s7XchyaWmyD-6rkxU-JqnQPulZfgLOc5KB8&t=2023-12-30T03%3A51%3A17.288Z"
# # short_1706.03762.pdf
# # url_attn_short = 'https://emoimoycgytvcixzgjiy.supabase.co/storage/v1/object/sign/documents/c75767dd-172c-463c-aafc-1e2dfddc1b32/yoho/Folder%20X/Folder%20X/short_1706.03762.pdf?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJkb2N1bWVudHMvYzc1NzY3ZGQtMTcyYy00NjNjLWFhZmMtMWUyZGZkZGMxYjMyL3lvaG8vRm9sZGVyIFgvRm9sZGVyIFgvc2hvcnRfMTcwNi4wMzc2Mi5wZGYiLCJpYXQiOjE3MDM5MTc4OTIsImV4cCI6MTcwMzkxNzk1Mn0.iL-8ARX2INdXIy5XtvIQYAXsmUhvD-er4Kb6y1abJTc'
# url_4page = "https://emoimoycgytvcixzgjiy.supabase.co/storage/v1/object/sign/documents/539e9941-5673-4561-8f7b-ddb523a4b537/Test/Additional/CVF_2024_short.pdf?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1cmwiOiJkb2N1bWVudHMvNTM5ZTk5NDEtNTY3My00NTYxLThmN2ItZGRiNTIzYTRiNTM3L1Rlc3QvQWRkaXRpb25hbC9DVkZfMjAyNF9zaG9ydC5wZGYiLCJpYXQiOjE3MDQ0Njc0OTQsImV4cCI6MTczNjAwMzQ5NH0.m0rbeiCSkHAfOzqN3t6IDFvoT386og90m45J8O6nLw4&t=2024-01-05T15%3A11%3A35.234Z"
# result = await load_pdf_with_textract(class_name="File_store_ver2", properties={"path": "abcd/cvf.pdf", "url": url_4page})
# print(result)
# # Running the main function
# if __name__ == "__main__":
# asyncio.run(main())
| [
"Extract the title of the paper from the given context. Note that it may be in multiple lines. Do not make any assumptions.\nContext:\"\"\"PLACEHOLDER\"\"\"",
"Extract the title of the paper from the given context. Note that it may be in multiple lines. Do not make any assumptions.\nContext:\"\"\"PLACEHOLDER\"\"\""
] |
2024-01-10 | 4everhope/lico | lico.py |
import os
import hashlib
import time
import random
import asyncio
from openai import OpenAI
import edge_tts
import pinecone
from quart import Quart, jsonify, request, render_template, send_file
from werkzeug.utils import secure_filename
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
load_dotenv('./env')
OPENAI_API_KEY_1 = os.environ.get("OPENAI_API_KEY")
OPENAI_API_KEY_2 = os.environ.get("CHATGPT_ACCESS_TOKEN")
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
pinecone_environment = os.environ.get("PINECONE_ENVIRONMENT")
VECTOR_SPACE_NAME = os.environ.get("VECTOR_SPACE_NAME")
SYSTEM_PROMPT = os.environ.get("SYSTEM_PROMPT")
API_BASE = os.environ.get("API_BASE")
MODEL_NAME = os.environ.get("MODEL_NAME")
client = OpenAI(api_key=OPENAI_API_KEY_1)
pinecone.init(api_key=pinecone_api_key, environment=pinecone_environment)
chat_model_1 = ChatOpenAI(
model_name=MODEL_NAME,
openai_api_base=API_BASE,
openai_api_key = OPENAI_API_KEY_2
)
chat_model_2 = ChatOpenAI(
model_name='gpt-3.5-turbo',
openai_api_base=API_BASE,
openai_api_key=OPENAI_API_KEY_2
)
embed_model = "text-embedding-ada-002"
global memory
memory = ConversationBufferWindowMemory(
memory_key="history",
input_key="question",
output_key='answer',
return_messages=True,
k=5
)
total_tokens = 0
memory_keywords = ["帮我记住", "帮我记下来", "写入长期记忆"]
chat_history = []
sysprompt_main = SYSTEM_PROMPT
sysprompt_summary = "Summarize the conversation in Chinese, retaining only core information, discarding irrelevant or repetitive content, for easy database retrieval."
prompt_maingpt = ChatPromptTemplate.from_messages([
("system", ""),
("ai", "history"),
("human", "user_input")
])
prompt_summary = ChatPromptTemplate.from_messages([
("system", sysprompt_summary),
("human", "combined_text")
])
llm_chain = LLMChain(
llm=chat_model_1,
prompt=prompt_maingpt,
memory=ConversationBufferWindowMemory(k=3,max_token_limit=650),
verbose=True
)
llm_chain_summary = LLMChain(
llm=chat_model_2,
prompt=prompt_summary,
)
app = Quart(__name__)
def get_current_datetime():
local_time = time.localtime()
datetime_str = time.strftime("%Y-%m-%d %H:%M", local_time)
return datetime_str
try:
if VECTOR_SPACE_NAME not in pinecone.list_indexes():
pinecone.create_index(
name=VECTOR_SPACE_NAME,
metric='dotproduct',
dimension=1536
)
index = pinecone.Index(index_name=VECTOR_SPACE_NAME)
print("LICO!!-向量数据库-连接到Pinecone并成功连接到现有索引!")
except Exception as e:
print(f"LICO!!-向量数据库-连接Pinecone时出错:{e}")
def generate_unique_id(prefix):
unique_str = f"{prefix}_{time.time()}_{random.random()}"
unique_id = hashlib.sha256(unique_str.encode()).hexdigest()[:16]
return f"{prefix}_{unique_id}"
def save_embeddings_to_pinecone(combined_embedding,combined_text):
index = pinecone.Index(index_name=VECTOR_SPACE_NAME)
combined_embedding_data_vector = combined_embedding.data[0].embedding
session_id=generate_unique_id("chat_id")
data_to_upsert = [
(session_id, combined_embedding_data_vector, {"text": combined_text})
]
upsert_result = index.upsert(data_to_upsert)
if upsert_result:
print("LICO!!-向量数据库-Data successfully uploaded to Pinecone.")
else:
print("LICO!!-向量数据库-Failed to upload data to Pinecone.")
def query_and_retrieve(query, embed_model, index, limit=2000):
index = pinecone.Index(index_name=VECTOR_SPACE_NAME)
embedding_response = client.embeddings.create(input=[query], model=embed_model,encoding_format="float")
query_vector = embedding_response.data[0].embedding
retrieval_response = index.query(query_vector, top_k=2, include_metadata=True)
contexts = []
if retrieval_response['matches']:
contexts = [match['metadata']['text'] for match in retrieval_response['matches'] if 'text' in match['metadata']]
if not contexts:
return f"human_input:{query}"
prompt_start = "下面的是数据库里的记忆\nContext:\n"
prompt_end = f"\nhuman_input:{query}\n "
combined_contexts = "\n".join(contexts)
if len(combined_contexts) <= limit:
return prompt_start + combined_contexts + prompt_end
for i in range(1, len(contexts)):
if len("\n".join(contexts[:i])) >= limit:
return prompt_start + "\n".join(contexts[:i-1]) + prompt_end
return prompt_start + combined_contexts + prompt_end
def extract_and_vectorize(vextorinput, embed_model,sysprompt_summary):
human_input_text = vextorinput['human_input']
ai_message_text = vextorinput['text']
combined_text = "human_input: " + human_input_text + "ai_reply: " + ai_message_text
prompt_summary = ChatPromptTemplate.from_messages([
("system", sysprompt_summary),
("human", combined_text)
])
llm_chain_summary.prompt = prompt_summary
summary_response = llm_chain_summary({"text": combined_text})
summary_response_text = summary_response['text']
print("检测到以下关键内容:", summary_response_text)
combined_embedding = client.embeddings.create(input=[summary_response_text],
model=embed_model)
return combined_embedding,summary_response_text
def process_user_input(user_input,sysprompt_main):
if "你还记得" in user_input:
query_with_contexts = query_and_retrieve(user_input, embed_model, index, limit=2000)
human_message_template = query_with_contexts
print("LICO回忆中……")
else:
human_message_template = user_input
current_datetime = get_current_datetime()
prompt_maingpt= ChatPromptTemplate.from_messages([
("system", "Time:"+current_datetime+sysprompt_main),
("ai", "chat-history: {history}"),
("human", human_message_template)
])
llm_chain.prompt = prompt_maingpt
global total_tokens
with get_openai_callback() as cb:
response = llm_chain({'human_input': user_input})
total_tokens += cb.total_tokens
print(f'Tokens used in this round: {cb.total_tokens}')
print(f'Total tokens used: {total_tokens}')
return response
async def speak_text(text, filename):
current_dir = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.join(current_dir, filename)
communicate = edge_tts.Communicate(text, "zh-CN-XiaoyiNeural")
await communicate.save(full_path)
async def audio_generation(response_text, filename):
await speak_text(response_text, filename)
print(f'Audio file generated: {filename}')
@app.route('/<filename>')
async def serve_audio(filename):
filename = secure_filename(filename)
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, filename)
return await send_file(file_path, as_attachment=True)
@app.route('/delete_audio/<filename>', methods=['POST'])
async def delete_audio(filename):
filename = secure_filename(filename)
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, filename)
try:
os.remove(file_path)
return jsonify({"status": "success", "message": "Audio file deleted successfully."})
except Exception as e:
return jsonify({"status": "error", "message": str(e)})
@app.route('/', methods=['GET', 'POST'])
async def chat():
user_message = ""
response = ""
bot_reply = ""
if request.method == 'POST':
form = await request.form
user_message = form.get('user_message')
enable_voice = form.get('enable_voice') == 'true'
if user_message.lower() == 'exit':
bot_reply = "随时在哦,主人!"
chat_history.append(("You", user_message))
chat_history.append(("Lico", bot_reply))
return await render_template('chat.html', chat_history=chat_history)
allresponse = process_user_input(user_message, sysprompt_main)
response = allresponse['text']
if enable_voice:
timestamp = int(time.time())
filename = f"output_{timestamp}.mp3"
await audio_generation(response, filename)
else:
filename = None
if any(keyword in user_message for keyword in memory_keywords):
combined_embedding, summary_response_text = extract_and_vectorize(allresponse, embed_model, sysprompt_summary)
save_embeddings_to_pinecone(combined_embedding, summary_response_text)
chat_history.append(("You", user_message))
chat_history.append(("Lico", response))
if filename:
return jsonify({"user_message": user_message, "bot_reply": response, "audio_file": filename})
else:
return jsonify({"user_message": user_message, "bot_reply": response})
return await render_template('chat.html', chat_history=chat_history)
if __name__ == "__main__":
app.run(debug=False, host='127.0.0.1', port=8000)
| [
"user_input",
"[('system', PLACEHOLDER), ('human', 'combined_text')]",
"combined_text",
"下面的是数据库里的记忆\nContext:\n",
"Summarize the conversation in Chinese, retaining only core information, discarding irrelevant or repetitive content, for easy database retrieval.",
"human",
"[('system', ''), ('ai', 'history'), ('human', 'user_input')]",
"SYSTEM_PROMPT",
"chat-history: {history}",
"\nhuman_input:PLACEHOLDER\n ",
"[('system', PLACEHOLDER), ('human', PLACEHOLDER)]",
"[('system', 'Time:PLACEHOLDERPLACEHOLDER'), ('ai', 'chat-history: {history}'), ('human', PLACEHOLDER)]"
] |
2024-01-10 | Einaaaaa/teenTalkTalk | app~src~routes~chatbot-router~HuggingFace_ChatBot_Result.py | # -*- coding: utf-8 -*-
import requests
import pandas as pd
import numpy as np
import copy
import json
import torch
import pickle
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
import sklearn.manifold as manifold
import openai
import os
import sys
import csv
import json
from ast import literal_eval
from sentence_transformers import SentenceTransformer, util
from transformers import AutoTokenizer, AutoModel
from transformers import OwlViTProcessor, OwlViTForObjectDetection
from transformers import pipeline
from transformers import GPT2TokenizerFast
from PIL import Image
from typing import List, Tuple, Dict
from dotenv import load_dotenv
''' 추가한 코드 : 챗봇 답변 시에 출력되는 경고문 삭제를 위한 코드 '''
import warnings # 경고문 없애기 위한 라이브러리
warnings.filterwarnings("ignore", message="Creating a tensor from a list of numpy.ndarrays is extremely slow.")
sys.stdout.reconfigure(encoding='utf-8')
''' 1. OpenAI API 불러오기 '''
load_dotenv()
openai.api_key = os.getenv("api_key")
''' 2. 데이터 불러오기 및 Embedding '''
# HuggingFace Embedding을 활용하여 Embdding vector 추출
data = pd.read_csv('./policy_data.csv', sep=",", dtype=str) # CSV파일 불러오기
data['recom_total'] = data['who'] + " / " + data['age'] + " / " + data['when'] + " / " + data['category'] # 정확한 추천을 위하여 who, age, when, category의 키워드 추출
model = SentenceTransformer("sentence-transformers/distiluse-base-multilingual-cased-v2") # HuggingFace Embedding 모델
data['recom_embeddings'] = data['recom_total'].apply(lambda x : model.encode(x)) # 추천을 위한 Embedding
data['desc_embeddings'] = data['title'].apply(lambda x : model.encode(x)) # 설명을 위한 Embedding
data.to_csv('./data_embeddings.csv', encoding='utf-8-sig')
''' 3. Embedding된 데이터를 이용하여 가장 cosine값이 유사한 데이터 추출 '''
# top_k = 2 # 답변 개수 조절
def get_query_sim_top_k(query, model, df, top_k):
query_encode = model.encode(query)
cos_scores = util.pytorch_cos_sim(query_encode, df['recom_embeddings'])[0]
top_results = torch.topk(cos_scores, k=top_k)
return top_results
def get_overview_sim_top_k(desc, model, df, top_k):
overview_encode = model.encode(desc)
cos_scores = util.pytorch_cos_sim(overview_encode, df['desc_embeddings'])[0]
top_results = torch.topk(cos_scores, k=top_k)
return top_results
# query = "중학생을 위한 급식 관련 정책 추천해줘"
# top_result = get_query_sim_top_k(query, model, data, top_k)
# print(data.iloc[top_result[1].numpy(), :][['title', 'who', 'age', 'when']])
''' 4. OpenAI의 API를 이용하기 위하여 의도를 분류하기 위한 프롬포트 구성 '''
# 프롬프트 내용 수정
msg_prompt = {
'recom' : {
'system' : "너는 user에게 정책 추천을 도움주는 assistant입니다.",
'user' : "당연하지!'로 시작하는 간단한 인사말 1문장을 작성해. 추천해주겠다는 말을 해줘.",
},
'desc' : {
'system' : "너는 user에게 정책 설명을 도움주는 assistant입니다.",
'user' : "'당연하지!'로 시작하는 간단한 인사말 1문장을 작성하여 user에게 정책을 설명해줘.",
},
'intent' : {
'system' : "너는 user의 질문 의도를 이해하는 도움을 주는 assistant입니다.",
'user' : "아래 문장은 'description','recommend', 중 속하는 categories만 보여라."
}
}
# Intent 파악 함수
def set_prompt(intent, query, msg_prompt_init, model):
'''prompt 형태를 만들어주는 함수'''
m = dict()
# 추천
if ('recom' in intent):
msg = msg_prompt_init['recom']
# 설명
elif 'desc' in intent:
msg = msg_prompt_init['desc']
# intent 파악
else:
msg = msg_prompt_init['intent']
msg['user'] += f' {query} \n A:'
for k, v in msg.items():
m['role'], m['content'] = k, v
return [m]
# OpenAI API 모델을 사용하기 위한 함수
def get_chatgpt_msg(msg):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=msg
)
return completion['choices'][0]['message']['content']
# 받은 Query를 이용하는 함수
def user_interact(query, model, msg_prompt_init):
# 사용자의 의도를 파악
user_intent = set_prompt('intent', query, msg_prompt_init, None)
user_intent = get_chatgpt_msg(user_intent).lower()
# print("user_intent : ", user_intent)
# 사용자의 쿼리에 따라 prompt 생성
intent_data = set_prompt(user_intent, query, msg_prompt_init, model)
intent_data_msg = get_chatgpt_msg(intent_data).replace("\n", "").strip()\
# 3-1. 추천이면
if ('recom' in user_intent):
recom_msg = str()
# 유사 아이템 가져오기
top_result = get_query_sim_top_k(query, model, data, top_k=1 if 'recom' in user_intent else 1) # 답변 개수 1개
#print("top_result : ", top_result)
# 검색이면, 자기 자신의 컨텐츠는 제외
top_index = top_result[1].numpy() if 'recom' in user_intent else top_result[1].numpy()[1:]
#print("top_index : ", top_index)
# 정책명, 대상, 기간, 링크 데이터를 CSV 파일에서 불러오기
r_set_d = data.iloc[top_index, :][['title', 'who', 'when','link']]
r_set_d = json.loads(r_set_d.to_json(orient="records"))
count = 0
'''
수정한 코드
기존의 코드는 1개가 아닌 여러개의 리스트를 가져와 각각을 출력하는 코드로 이중 for문을 사용
-> 우리는 1개 추천이기 때문에 1개의 for문을 사용하고 r_set_d의 index 0번의 item을 바로 가져오도록 수정
(설명 파트도 동일하게 수정)
'''
recom_msg += "\n입력하신 내용 기반으로 가장 적합한 정책을 추천하겠습니다.\n"
for _, v in r_set_d[0].items():
if(count == 0):
recom_msg += f"정책명 : '{v}'\n"
elif(count == 1):
recom_msg += f"대상 : {v}\n"
elif(count == 2):
recom_msg += f"기간 : {v}\n\n"
elif(count == 3):
recom_msg += "자세한 설명은 아래의 링크를 클릭하여 접속해보시기 바랍니다.\n"
recom_msg += f"{v}\n"
count += 1
print(recom_msg)
# 3-2. 설명이면
elif 'desc' in user_intent:
desc_msg = str()
top_result = get_overview_sim_top_k(query, model, data, top_k=1)
r_set_d = data.iloc[top_result[1].numpy(), :][['title','overview','link']]
r_set_d = json.loads(r_set_d.to_json(orient="records"))
count = 0
desc_msg += "\n"
for _, v in r_set_d[0].items():
if(count == 0):
desc_msg += f"{v} 정책이란 "
elif(count == 1):
desc_msg += f"{v} 하는 정책입니다.\n\n"
elif(count == 2):
desc_msg += "자세한 설명을 원하시면 아래의 링크를 클릭하여 접속해보시기 바랍니다.\n"
desc_msg += f"{v}\n"
count += 1
print(desc_msg)
# query = input()
query = sys.argv[1] # 첫 번째 커맨드 라인 인자로 전달된 쿼리
user_interact(query, model, copy.deepcopy(msg_prompt))
desired_answer = input("원하시는 답변이 되셨나요? (yes/no): ")
if desired_answer.lower() == "yes":
print("더 궁금하신 것이 있다면 다시 질문해주세요.\n") # 출력 내용 수정
exit(0)
else:
query = input("정확한 답변을 원하시면 구체적인 키워드를 포함해서 질문해주세요: ")
user_interact(query, model, copy.deepcopy(msg_prompt)) | [
"{'recom': {'system': '너는 user에게 정책 추천을 도움주는 assistant입니다.', 'user': \"당연하지!'로 시작하는 간단한 인사말 1문장을 작성해. 추천해주겠다는 말을 해줘.\"}, 'desc': {'system': '너는 user에게 정책 설명을 도움주는 assistant입니다.', 'user': \"'당연하지!'로 시작하는 간단한 인사말 1문장을 작성하여 user에게 정책을 설명해줘.\"}, 'intent': {'system': '너는 user의 질문 의도를 이해하는 도움을 주는 assistant입니다.', 'user': \"아래 문장은 'description','recommend', 중 속하는 categories만 보여라.\"}}"
] |
2024-01-10 | Zeros2112/LinguaBot--MadurAIOpenTecHakathon | conversational_agent~social_media.py | import requests
from pydantic import BaseModel, Field
from langchain.tools import tool
class TwitterInteractionInput(BaseModel):
message: str = Field(..., description="Message to post on Twitter")
# Replace 'YOUR_API_KEY', 'YOUR_API_SECRET', 'YOUR_ACCESS_TOKEN', and 'YOUR_ACCESS_TOKEN_SECRET' with actual Twitter API credentials
TWITTER_API_KEY = 'YOUR_API_KEY'
TWITTER_API_SECRET = 'YOUR_API_SECRET'
TWITTER_ACCESS_TOKEN = 'YOUR_ACCESS_TOKEN'
TWITTER_ACCESS_TOKEN_SECRET = 'YOUR_ACCESS_TOKEN_SECRET'
@tool(args_schema=TwitterInteractionInput)
def post_on_twitter(input_data: TwitterInteractionInput) -> str:
"""Post a message on Twitter."""
base_url = 'https://api.twitter.com/2/tweets'
headers = {
'Authorization': f'Bearer {TWITTER_ACCESS_TOKEN}',
}
data = {
'status': input_data.message,
}
try:
response = requests.post(base_url, headers=headers, data=data)
if response.status_code == 200:
return "Message posted on Twitter successfully."
else:
return f"Twitter API Request failed with status code: {response.status_code}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
| [] |
2024-01-10 | Sylvester254/chat-langchain | query_data.py | from langchain.callbacks.base import AsyncCallbackManager
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.vectorstores.base import VectorStore
def get_chain(
vectorstore: VectorStore, question_handler, stream_handler
) -> ConversationalRetrievalChain:
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = OpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager
)
chain = ConversationalRetrievalChain(
retriever=vectorstore.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
callback_manager=manager
)
return chain | [] |
2024-01-10 | seedclaimer/ChuanhuChatGPT | ChuanhuChatbot.py | import json
import gradio as gr
import openai
import os
import sys
import traceback
# import markdown
my_api_key = "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" # 在这里输入你的 API 密钥
initial_prompt = "You are a helpful assistant."
if my_api_key == "":
my_api_key = os.environ.get('my_api_key')
if my_api_key == "empty":
print("Please give a api key!")
sys.exit(1)
if my_api_key == "":
initial_keytxt = None
elif len(str(my_api_key)) == 51:
initial_keytxt = "默认api-key(未验证):" + str(my_api_key[:4] + "..." + my_api_key[-4:])
else:
initial_keytxt = "默认api-key无效,请重新输入"
def parse_text(text):
lines = text.split("\n")
count = 0
for i,line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="{items[-1]}">'
else:
lines[i] = f'</code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("&", "&")
line = line.replace("\"", """)
line = line.replace("\'", "'")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
lines[i] = '<br/>'+line
return "".join(lines)
def get_response(system, context, myKey, raw = False):
openai.api_key = myKey
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[system, *context],
)
openai.api_key = ""
if raw:
return response
else:
statistics = f'本次对话Tokens用量【{response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {response["usage"]["prompt_tokens"]},回答 {response["usage"]["completion_tokens"]} )'
message = response["choices"][0]["message"]["content"]
message_with_stats = f'{message}\n\n================\n\n{statistics}'
# message_with_stats = markdown.markdown(message_with_stats)
return message, parse_text(message_with_stats)
def predict(chatbot, input_sentence, system, context, myKey):
if len(input_sentence) == 0:
return []
context.append({"role": "user", "content": f"{input_sentence}"})
try:
message, message_with_stats = get_response(system, context, myKey)
except openai.error.AuthenticationError:
chatbot.append((input_sentence, "请求失败,请检查API-key是否正确。"))
return chatbot, context
except openai.error.Timeout:
chatbot.append((input_sentence, "请求超时,请检查网络连接。"))
return chatbot, context
except openai.error.APIConnectionError:
chatbot.append((input_sentence, "连接失败,请检查网络连接。"))
return chatbot, context
except openai.error.RateLimitError:
chatbot.append((input_sentence, "请求过于频繁,请5s后再试。"))
return chatbot, context
except:
chatbot.append((input_sentence, "发生了未知错误Orz"))
return chatbot, context
context.append({"role": "assistant", "content": message})
chatbot.append((input_sentence, message_with_stats))
return chatbot, context
def retry(chatbot, system, context, myKey):
if len(context) == 0:
return [], []
try:
message, message_with_stats = get_response(system, context[:-1], myKey)
except openai.error.AuthenticationError:
chatbot.append(("重试请求", "请求失败,请检查API-key是否正确。"))
return chatbot, context
except openai.error.Timeout:
chatbot.append(("重试请求", "请求超时,请检查网络连接。"))
return chatbot, context
except openai.error.APIConnectionError:
chatbot.append(("重试请求", "连接失败,请检查网络连接。"))
return chatbot, context
except openai.error.RateLimitError:
chatbot.append(("重试请求", "请求过于频繁,请5s后再试。"))
return chatbot, context
except:
chatbot.append(("重试请求", "发生了未知错误Orz"))
return chatbot, context
context[-1] = {"role": "assistant", "content": message}
chatbot[-1] = (context[-2]["content"], message_with_stats)
return chatbot, context
def delete_last_conversation(chatbot, context):
if len(context) == 0:
return [], []
chatbot = chatbot[:-1]
context = context[:-2]
return chatbot, context
def reduce_token(chatbot, system, context, myKey):
context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
response = get_response(system, context, myKey, raw=True)
statistics = f'本次对话Tokens用量【{response["usage"]["completion_tokens"]+12+12+8} / 4096】'
optmz_str = parse_text( f'好的,我们之前聊了:{response["choices"][0]["message"]["content"]}\n\n================\n\n{statistics}' )
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
context = []
context.append({"role": "user", "content": "我们之前聊了什么?"})
context.append({"role": "assistant", "content": f'我们之前聊了:{response["choices"][0]["message"]["content"]}'})
return chatbot, context
def save_chat_history(filepath, system, context):
if filepath == "":
return
history = {"system": system, "context": context}
with open(f"{filepath}.json", "w") as f:
json.dump(history, f)
def load_chat_history(fileobj):
with open(fileobj.name, "r") as f:
history = json.load(f)
context = history["context"]
chathistory = []
for i in range(0, len(context), 2):
chathistory.append((parse_text(context[i]["content"]), parse_text(context[i+1]["content"])))
return chathistory , history["system"], context, history["system"]["content"]
def get_history_names():
with open("history.json", "r") as f:
history = json.load(f)
return list(history.keys())
def reset_state():
return [], []
def update_system(new_system_prompt):
return {"role": "system", "content": new_system_prompt}
def set_apikey(new_api_key, myKey):
old_api_key = myKey
try:
get_response(update_system(initial_prompt), [{"role": "user", "content": "test"}], new_api_key)
except openai.error.AuthenticationError:
return "无效的api-key", myKey
except openai.error.Timeout:
return "请求超时,请检查网络设置", myKey
except openai.error.APIConnectionError:
return "网络错误", myKey
except:
return "发生了未知错误Orz", myKey
encryption_str = "验证成功,api-key已做遮挡处理:" + new_api_key[:4] + "..." + new_api_key[-4:]
return encryption_str, new_api_key
with gr.Blocks() as demo:
keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...", value=initial_keytxt, label="API Key").style(container=True)
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
context = gr.State([])
systemPrompt = gr.State(update_system(initial_prompt))
myKey = gr.State(my_api_key)
topic = gr.State("未命名对话历史记录")
with gr.Row():
with gr.Column(scale=12):
txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(container=False)
with gr.Column(min_width=50, scale=1):
submitBtn = gr.Button("🚀", variant="primary")
with gr.Row():
emptyBtn = gr.Button("🧹 新的对话")
retryBtn = gr.Button("🔄 重新生成")
delLastBtn = gr.Button("🗑️ 删除上条对话")
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
newSystemPrompt = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
systemPromptDisplay = gr.Textbox(show_label=True, value=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
with gr.Accordion(label="保存/加载对话历史记录(在文本框中输入文件名,点击“保存对话”按钮,历史记录文件会被存储到本地)", open=False):
with gr.Column():
with gr.Row():
with gr.Column(scale=6):
saveFileName = gr.Textbox(show_label=True, placeholder=f"在这里输入保存的文件名...", label="保存对话", value="对话历史记录").style(container=True)
with gr.Column(scale=1):
saveBtn = gr.Button("💾 保存对话")
uploadBtn = gr.UploadButton("📂 读取对话", file_count="single", file_types=["json"])
txt.submit(predict, [chatbot, txt, systemPrompt, context, myKey], [chatbot, context], show_progress=True)
txt.submit(lambda :"", None, txt)
submitBtn.click(predict, [chatbot, txt, systemPrompt, context, myKey], [chatbot, context], show_progress=True)
submitBtn.click(lambda :"", None, txt)
emptyBtn.click(reset_state, outputs=[chatbot, context])
newSystemPrompt.submit(update_system, newSystemPrompt, systemPrompt)
newSystemPrompt.submit(lambda x: x, newSystemPrompt, systemPromptDisplay)
newSystemPrompt.submit(lambda :"", None, newSystemPrompt)
retryBtn.click(retry, [chatbot, systemPrompt, context, myKey], [chatbot, context], show_progress=True)
delLastBtn.click(delete_last_conversation, [chatbot, context], [chatbot, context], show_progress=True)
reduceTokenBtn.click(reduce_token, [chatbot, systemPrompt, context, myKey], [chatbot, context], show_progress=True)
keyTxt.submit(set_apikey, [keyTxt, myKey], [keyTxt, myKey], show_progress=True)
uploadBtn.upload(load_chat_history, uploadBtn, [chatbot, systemPrompt, context, systemPromptDisplay], show_progress=True)
saveBtn.click(save_chat_history, [saveFileName, systemPrompt, context], None, show_progress=True)
demo.launch()
| [
"test",
"PLACEHOLDER",
"在这里输入新的System Prompt...",
"我们之前聊了什么?",
"目前的 System prompt",
"请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。",
"You are a helpful assistant.",
"content",
"更改 System prompt"
] |
2024-01-10 | aishwaryaprabhat/CS7641-Machine-Learning | mdp~VIPIExperiment.py | import numpy as np
import gym
from gym import wrappers
from gym.envs.toy_text.frozen_lake import generate_random_map, FrozenLakeEnv
from hiive.mdptoolbox.mdp import ValueIteration, PolicyIteration, QLearning
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from openai import OpenAI
class VIPIExperiment():
def __init__(self, problem="Taxi-v3", run_q=False):
if type(problem)==str:
self.mdp = OpenAI(problem)
self.P, self.R = self.mdp.P, self.mdp.R
else:
self.mdp = problem
self.P, self.R = self.mdp
self.df_vi = pd.DataFrame()
self.df_pi = pd.DataFrame()
self.df_q = pd.DataFrame()
self.run_vi()
self.run_pi()
if run_q:
self.run_q()
def run_vi(self):
for gamma in [0.1, 0.5, 0.9]:
temp_df = pd.DataFrame()
vi = ValueIteration(self.P, self.R, max_iter=int(1e10), gamma=gamma)
temp_df = pd.DataFrame(vi.run())
temp_df["gamma"] = gamma
self.df_vi = self.df_vi.append(temp_df)
def run_pi(self):
for gamma in [0.1, 0.5, 0.9]:
temp_df = pd.DataFrame()
vi = PolicyIteration(self.P, self.R, max_iter=int(1e10), gamma=gamma)
temp_df = pd.DataFrame(vi.run())
temp_df["gamma"] = gamma
self.df_pi = self.df_pi.append(temp_df)
def run_q(self):
for eps in [0, 0.5, 1]:
temp_df = pd.DataFrame()
for alpha in [0.1, 0.5, 1]:
vi = QLearning(self.P, self.R, gamma=0.9, epsilon=eps, alpha=alpha, n_iter=100000)
temp_df = pd.DataFrame(vi.run())
temp_df["epsilon_start"] = eps
temp_df["alpha_start"] = alpha
self.df_q = self.df_q.append(temp_df)
def gamma_plot(self, df, metric="Mean V"):
plt.plot(df[df.gamma==0.1][metric], label="gamma=0.1")
plt.plot(df[df.gamma==0.5][metric], label="gamma=0.5")
plt.plot(df[df.gamma==0.9][metric], label="gamma=0.9")
plt.legend()
def comparative_plot(self, metric="Mean V"):
plt.plot(self.df_pi[metric], label="Policy Iteration")
plt.plot(self.df_vi[metric], label="Value Iteration")
plt.plot(self.df_q[metric], label="Q Learning")
plt.legend()
def plot_eps(self, df, metric="Mean V"):
plt.plot(df[df.epsilon_start==0][metric], label="epsilon=0")
plt.plot(df[df.epsilon_start==0.5][metric], label="epsilon=0.5")
plt.plot(df[df.epsilon_start==1][metric], label="epsilon=1")
plt.legend()
def plot_alpha(self, df, metric="Mean V"):
plt.plot(df[df.alpha_start==0.1][metric], label="alpha=0.1")
plt.plot(df[df.alpha_start==0.5][metric], label="alpha=0.5")
plt.plot(df[df.alpha_start==1][metric], label="alpha=1")
plt.legend()
| [] |
2024-01-10 | bensbahou/ChatGpt-clone-with-Streamlit | gpt_clone.py | import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage,
)
def init():
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
st.set_page_config(page_title="ChatGPT Clone", page_icon=":robot_face:")
init()
chat = ChatOpenAI(temperature=0.9)
st.title("ChatGPT Clone :robot_face:")
with st.sidebar:
st.write("## About")
st.write("This is a clone of [ChatGPT](https://chat.openai.com/)")
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(
content="You're a doctor, and your name is Dr. Smith. You are talking to a patient about his symptoms."
),
]
messages = st.session_state.get("messages", [])
for i, msg in enumerate(messages):
if isinstance(msg, SystemMessage):
st.info(msg.content)
elif isinstance(msg, HumanMessage):
message(msg.content, is_user=True, key=f"message_{i}")
elif isinstance(msg, AIMessage):
message(msg.content, is_user=False, key=f"message_{i}")
user_input = st.text_input("You", value="", key="user_input", type="default")
if user_input:
# message(user_input, is_user=True)
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("Thinking..."):
response = chat(messages=st.session_state.messages)
# message(response.content)
st.session_state.messages.append(AIMessage(content=response.content))
| [
"You're a doctor, and your name is Dr. Smith. You are talking to a patient about his symptoms."
] |
2024-01-10 | milexm/Python | code~chatgpt~accessapi.py | """
Module accessapi.py
"""
import openai
import os
def simple_question():
"""
Aask a simple question to ChatGPT.
Remarks
-------
This simple function tests the enviroment settings to
determine if the infrastructure is in place to communicate
with ChatGPT via the OpenAI API.
"""
# Assign your private OpenAI API key
# for authenntication.
openai.api_key = os.environ['OpenAI_API_Key']
# Simple prompt or question to ask.
my_prompt = 'Tell me a short ad for a homme security company'
# This model version is deprecated. Migrate before January 4, 2024 to avoid
# disruption of service. Learn more
# https://platform.openai.com/docs/deprecations
davinci_model = 'text-davinci-003'
output = openai.Completion.create(
model = davinci_model,
prompt = my_prompt,
max_tokens = 200,
temperature = 0
)
# Print the entire response (Json file).
# print(output)
# Print only the text we are interested in.
text = output['choices'][0]['text']
print(f'\nChatGPT says: {text} \n')
if __name__ == '__main__':
# Ask simple question.
simple_question()
| [
"Tell me a short ad for a homme security company"
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~llms~nlpcloud.py | from typing import Any, Dict, List, Mapping, Optional
from langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
class NLPCloud(LLM):
"""NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain.llms import NLPCloud
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True
"""Whether to use a GPU or not"""
lang: str = "en"
"""Language to use (multilingual addon)"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
num_beams: int = 1
"""Number of beams for beam search."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["nlpcloud_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "nlpcloud_api_key", "NLPCLOUD_API_KEY")
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"],
values["nlpcloud_api_key"].get_secret_value(),
gpu=values["gpu"],
lang=values["lang"],
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"num_beams": self.num_beams,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**{"gpu": self.gpu},
**{"lang": self.lang},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response["generated_text"]
| [] |
2024-01-10 | docugami/langchain | libs~core~langchain_core~caches.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional, Sequence
from langchain_core.outputs import Generation
RETURN_VAL_TYPE = Sequence[Generation]
class BaseCache(ABC):
"""Base interface for cache."""
@abstractmethod
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
@abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
@abstractmethod
def clear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments."""
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~retrievers~test_wikipedia.py | """Integration test for Wikipedia Retriever."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain.retrievers import WikipediaRetriever
@pytest.fixture
def retriever() -> WikipediaRetriever:
return WikipediaRetriever()
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: WikipediaRetriever) -> None:
retriever.load_all_available_meta = True
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=1, load_all_available_meta=True
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args_more() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=20, load_all_available_meta=False
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 20
assert_docs(docs, all_meta=False)
def test_load_no_result(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~agents~load_tools.py | # flake8: noqa
"""Tools provide access to various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain_core.language_models import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.utilities.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.golden_query.tool import GoldenQueryRun
from langchain.tools.pubmed.tool import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_cloud.texttospeech import GoogleCloudTextToSpeechTool
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.google_scholar.tool import GoogleScholarQueryRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
from langchain.tools.graphql.tool import BaseGraphQLTool
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.sleep.tool import SleepTool
from langchain.tools.stackexchange.tool import StackExchangeTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
from langchain.tools.memorize.tool import Memorize
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
from langchain.utilities.pubmed import PubMedAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.graphql import GraphQLAPIWrapper
from langchain.utilities.searchapi import SearchApiAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.stackexchange import StackExchangeAPIWrapper
from langchain.utilities.twilio import TwilioAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
def _get_python_repl() -> BaseTool:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
def _get_sleep() -> BaseTool:
return SleepTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"requests": _get_tools_requests_get, # preserved for backwards compatibility
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
"sleep": _get_sleep,
}
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(
llm,
open_meteo_docs.OPEN_METEO_DOCS,
limit_to_domains=["https://api.open-meteo.com/"],
)
return Tool(
name="Open-Meteo-API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
news_docs.NEWS_DOCS,
headers={"X-Api-Key": news_api_key},
limit_to_domains=["https://newsapi.org/"],
)
return Tool(
name="News-API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
limit_to_domains=["https://api.themoviedb.org/"],
)
return Tool(
name="TMDB-API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
limit_to_domains=["https://listen-api.listennotes.com/"],
)
return Tool(
name="Podcast-API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) -> BaseTool:
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_pubmed(**kwargs: Any) -> BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_scholar(**kwargs: Any) -> BaseTool:
return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_searchapi(**kwargs: Any) -> BaseTool:
return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_searchapi_results_json(**kwargs: Any) -> BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_stackexchange(**kwargs: Any) -> BaseTool:
return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool(
"Dall-E-Image-Generator",
DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
)
def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool(
name="Text-Message",
description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_graphql_tool(**kwargs: Any) -> BaseTool:
graphql_endpoint = kwargs["graphql_endpoint"]
wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint)
return BaseGraphQLTool(graphql_wrapper=wrapper)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
return Memorize(llm=llm)
def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool:
return GoogleCloudTextToSpeechTool(**kwargs)
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
"memorize": (_get_memorize, []),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-scholar": (
_get_google_scholar,
["top_k_results", "hl", "lr", "serp_api_key"],
),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]),
"searchapi-results-json": (
_get_searchapi_results_json,
["searchapi_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]),
"twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"golden-query": (_get_golden_query, ["golden_api_key"]),
"pubmed": (_get_pubmed, ["top_k_results"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"stackexchange": (_get_stackexchange, []),
"sceneXplain": (_get_scenexplain, []),
"graphql": (_get_graphql_tool, ["graphql_endpoint"]),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
"dataforseo-api-search": (
_get_dataforseo_api_search,
["api_login", "api_password", "aiosession"],
),
"dataforseo-api-search-json": (
_get_dataforseo_api_search_json,
["api_login", "api_password", "aiosession"],
),
"eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]),
"google_cloud_texttospeech": (_get_google_cloud_texttospeech, []),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
"""Loads a tool from the HuggingFace Hub.
Args:
task_or_repo_id: Task or model repo id.
model_repo_id: Optional model repo id.
token: Optional token.
remote: Optional remote. Defaults to False.
**kwargs:
Returns:
A tool.
"""
try:
from transformers import load_tool
except ImportError:
raise ImportError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~document_loaders~hugging_face_dataset.py | import json
from typing import Iterator, List, Mapping, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain.document_loaders.base import BaseLoader
class HuggingFaceDatasetLoader(BaseLoader):
"""Load from `Hugging Face Hub` datasets."""
def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=self.parse_obj(row.pop(self.page_content_column)),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
def load(self) -> List[Document]:
"""Load documents."""
return list(self.lazy_load())
def parse_obj(self, page_content: Union[str, object]) -> str:
if isinstance(page_content, object):
return json.dumps(page_content)
return page_content
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~chat_models~vertexai.py | """Wrapper around Google VertexAI chat-based models."""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.llms.vertexai import _VertexAICommon, is_codey_model
from langchain.utilities.vertexai import raise_vertex_import_error
if TYPE_CHECKING:
from vertexai.language_models import (
ChatMessage,
ChatSession,
CodeChatSession,
InputOutputTextPair,
)
logger = logging.getLogger(__name__)
@dataclass
class _ChatHistory:
"""Represents a context and a history of messages."""
history: List["ChatMessage"] = field(default_factory=list)
context: Optional[str] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
"""
from vertexai.language_models import ChatMessage
vertex_messages, context = [], None
for i, message in enumerate(history):
content = cast(str, message.content)
if i == 0 and isinstance(message, SystemMessage):
context = content
elif isinstance(message, AIMessage):
vertex_message = ChatMessage(content=message.content, author="bot")
vertex_messages.append(vertex_message)
elif isinstance(message, HumanMessage):
vertex_message = ChatMessage(content=message.content, author="user")
vertex_messages.append(vertex_message)
else:
raise ValueError(
f"Unexpected message with type {type(message)} at the position {i}."
)
chat_history = _ChatHistory(context=context, history=vertex_messages)
return chat_history
def _parse_examples(examples: List[BaseMessage]) -> List["InputOutputTextPair"]:
from vertexai.language_models import InputOutputTextPair
if len(examples) % 2 != 0:
raise ValueError(
f"Expect examples to have an even amount of messages, got {len(examples)}."
)
example_pairs = []
input_text = None
for i, example in enumerate(examples):
if i % 2 == 0:
if not isinstance(example, HumanMessage):
raise ValueError(
f"Expected the first message in a part to be from human, got "
f"{type(example)} for the {i}th message."
)
input_text = example.content
if i % 2 == 1:
if not isinstance(example, AIMessage):
raise ValueError(
f"Expected the second message in a part to be from AI, got "
f"{type(example)} for the {i}th message."
)
pair = InputOutputTextPair(
input_text=input_text, output_text=example.content
)
example_pairs.append(pair)
return example_pairs
def _get_question(messages: List[BaseMessage]) -> HumanMessage:
"""Get the human message at the end of a list of input messages to a chat model."""
if not messages:
raise ValueError("You should provide at least one message to start the chat!")
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
)
return question
class ChatVertexAI(_VertexAICommon, BaseChatModel):
"""`Vertex AI` Chat large language models API."""
model_name: str = "chat-bison"
"Underlying model name."
examples: Optional[List[BaseMessage]] = None
@classmethod
def is_lc_serializable(self) -> bool:
return True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
from vertexai.language_models import ChatModel, CodeChatModel
except ImportError:
raise_vertex_import_error()
if is_codey_model(values["model_name"]):
model_cls = CodeChatModel
else:
model_cls = ChatModel
values["client"] = model_cls.from_pretrained(values["model_name"])
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, stream=False, **kwargs)
examples = kwargs.get("examples") or self.examples
if examples:
params["examples"] = _parse_examples(examples)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
chat = self._start_chat(history, **params)
response = chat.send_message(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Asynchronously generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if "stream" in kwargs:
kwargs.pop("stream")
logger.warning("ChatVertexAI does not currently support async streaming.")
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, **kwargs)
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
chat = self._start_chat(history, **params)
response = await chat.send_message_async(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, **kwargs)
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
chat = self._start_chat(history, **params)
responses = chat.send_message_streaming(question.content, **params)
for response in responses:
if run_manager:
run_manager.on_llm_new_token(response.text)
yield ChatGenerationChunk(message=AIMessageChunk(content=response.text))
def _start_chat(
self, history: _ChatHistory, **kwargs: Any
) -> Union[ChatSession, CodeChatSession]:
if not self.is_codey_model:
return self.client.start_chat(
context=history.context, message_history=history.history, **kwargs
)
else:
return self.client.start_chat(message_history=history.history, **kwargs)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~llms~together.py | """Wrapper around Together AI's Completion API."""
import logging
from typing import Any, Dict, List, Optional
from aiohttp import ClientSession
from langchain_core.pydantic_v1 import Extra, SecretStr, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.utilities.requests import Requests
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class Together(LLM):
"""Wrapper around Together AI models.
To use, you'll need an API key which you can find here:
https://api.together.xyz/settings/api-keys. This can be passed in as init param
``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.
Together AI API reference: https://docs.together.ai/reference/inference
"""
base_url: str = "https://api.together.xyz/inference"
"""Base inference API URL."""
together_api_key: SecretStr
"""Together AI API key. Get it here: https://api.together.xyz/settings/api-keys"""
model: str
"""Model name. Available models listed here:
https://docs.together.ai/docs/inference-models
"""
temperature: Optional[float] = None
"""Model temperature."""
top_p: Optional[float] = None
"""Used to dynamically adjust the number of choices for each predicted token based
on the cumulative probabilities. A value of 1 will always yield the same
output. A temperature less than 1 favors more correctness and is appropriate
for question answering or summarization. A value greater than 1 introduces more
randomness in the output.
"""
top_k: Optional[int] = None
"""Used to limit the number of choices for the next predicted word or token. It
specifies the maximum number of tokens to consider at each step, based on their
probability of occurrence. This technique helps to speed up the generation
process and can improve the quality of the generated text by focusing on the
most likely options.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
repetition_penalty: Optional[float] = None
"""A number that controls the diversity of generated text by reducing the
likelihood of repeated sequences. Higher values decrease repetition.
"""
logprobs: Optional[int] = None
"""An integer that specifies how many top token log probabilities are included in
the response for each token generation step.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["together_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "together_api_key", "TOGETHER_API_KEY")
)
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "together"
def _format_output(self, output: dict) -> str:
return output["output"]["choices"][0]["text"]
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain/{__version__}"
@property
def default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Together's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model..
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
request = Requests(headers=headers)
response = request.post(url=self.base_url, data=payload)
if response.status_code >= 500:
raise Exception(f"Together Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"Together received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
if data.get("status") != "finished":
err_msg = data.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(data)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Together model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
async with ClientSession() as session:
async with session.post(
self.base_url, json=payload, headers=headers
) as response:
if response.status >= 500:
raise Exception(f"Together Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"Together received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
if response_json.get("status") != "finished":
err_msg = response_json.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(response_json)
return output
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~utilities~test_wikipedia_api.py | """Integration test for Wikipedia API Wrapper."""
from typing import List
import pytest
from langchain_core.documents import Document
from langchain.utilities import WikipediaAPIWrapper
@pytest.fixture
def api_client() -> WikipediaAPIWrapper:
return WikipediaAPIWrapper()
def test_run_success(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run("HUNTER X HUNTER")
assert "Yoshihiro Togashi" in output
def test_run_no_result(api_client: WikipediaAPIWrapper) -> None:
output = api_client.run(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert "No good Wikipedia Search Result was found" == output
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(api_client: WikipediaAPIWrapper) -> None:
api_client.load_all_available_meta = True
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=True)
def test_load_more_docs_success(api_client: WikipediaAPIWrapper) -> None:
top_k_results = 20
api_client = WikipediaAPIWrapper(top_k_results=top_k_results)
docs = api_client.load("HUNTER X HUNTER")
assert len(docs) > 10
assert len(docs) <= top_k_results
assert_docs(docs, all_meta=False)
def test_load_no_result(api_client: WikipediaAPIWrapper) -> None:
docs = api_client.load(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~vectorstores~matching_engine.py | from __future__ import annotations
import json
import logging
import time
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from langchain.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud import storage
from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint
from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import (
Namespace,
)
from google.oauth2.service_account import Credentials
from langchain.embeddings import TensorflowHubEmbeddings
logger = logging.getLogger()
class MatchingEngine(VectorStore):
"""`Google Vertex AI Vector Search` (previously Matching Engine) vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour."""
def __init__(
self,
project_id: str,
index: MatchingEngineIndex,
endpoint: MatchingEngineIndexEndpoint,
embedding: Embeddings,
gcs_client: storage.Client,
gcs_bucket_name: str,
credentials: Optional[Credentials] = None,
):
"""Google Vertex AI Vector Search (previously Matching Engine)
implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
@property
def embeddings(self) -> Embeddings:
return self.embedding
def _validate_google_libraries_installation(self) -> None:
"""Validates that Google libraries that are needed are installed."""
try:
from google.cloud import aiplatform, storage # noqa: F401
from google.oauth2 import service_account # noqa: F401
except ImportError:
raise ImportError(
"You must run `pip install --upgrade "
"google-cloud-aiplatform google-cloud-storage`"
"to use the MatchingEngine Vectorstore."
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
if metadatas is not None and len(texts) != len(metadatas):
raise ValueError(
"texts and metadatas do not have the same length. Received "
f"{len(texts)} texts and {len(metadatas)} metadatas."
)
logger.debug("Embedding documents.")
embeddings = self.embedding.embed_documents(texts)
jsons = []
ids = []
# Could be improved with async.
for idx, (embedding, text) in enumerate(zip(embeddings, texts)):
id = str(uuid.uuid4())
ids.append(id)
json_: dict = {"id": id, "embedding": embedding}
if metadatas is not None:
json_["metadata"] = metadatas[idx]
jsons.append(json_)
self._upload_to_gcs(text, f"documents/{id}")
logger.debug(f"Uploaded {len(ids)} documents to GCS.")
# Creating json lines from the embedded documents.
result_str = "\n".join([json.dumps(x) for x in jsons])
filename_prefix = f"indexes/{uuid.uuid4()}"
filename = f"{filename_prefix}/{time.time()}.json"
self._upload_to_gcs(result_str, filename)
logger.debug(
f"Uploaded updated json with embeddings to "
f"{self.gcs_bucket_name}/{filename}."
)
self.index = self.index.update_embeddings(
contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/"
)
logger.debug("Updated index with new configuration.")
return ids
def _upload_to_gcs(self, data: str, gcs_location: str) -> None:
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[List[Namespace]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query and their cosine distance from the query.
Args:
query: String query look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Optional. A list of Namespaces for filtering
the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
logger.debug(f"Embedding query {query}.")
embedding_query = self.embedding.embed_query(query)
return self.similarity_search_by_vector_with_score(
embedding_query, k=k, filter=filter
)
def similarity_search_by_vector_with_score(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Namespace]] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to the embedding and their cosine distance.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Optional. A list of Namespaces for filtering
the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
filter = filter or []
# If the endpoint is public we use the find_neighbors function.
if hasattr(self.endpoint, "_public_match_client") and (
self.endpoint._public_match_client
):
response = self.endpoint.find_neighbors(
deployed_index_id=self._get_index_id(),
queries=[embedding],
num_neighbors=k,
filter=filter,
)
else:
response = self.endpoint.match(
deployed_index_id=self._get_index_id(),
queries=[embedding],
num_neighbors=k,
filter=filter,
)
logger.debug(f"Found {len(response)} matches.")
if len(response) == 0:
return []
results = []
# I'm only getting the first one because queries receives an array
# and the similarity_search method only receives one query. This
# means that the match method will always return an array with only
# one element.
for doc in response[0]:
page_content = self._download_from_gcs(f"documents/{doc.id}")
results.append((Document(page_content=page_content), doc.distance))
logger.debug("Downloaded documents for query.")
return results
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[List[Namespace]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filter=filter, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[List[Namespace]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to the embedding.
Args:
embedding: Embedding to look up documents similar to.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_by_vector_with_score(
embedding, k=k, filter=filter, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def _get_index_id(self) -> str:
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f"No index with id {self.index.resource_name} "
f"deployed on endpoint "
f"{self.endpoint.display_name}."
)
def _download_from_gcs(self, gcs_location: str) -> str:
"""Downloads from GCS in text format.
Args:
gcs_location: The location where the file is located.
Returns:
The string contents of the file.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
return blob.download_as_string()
@classmethod
def from_texts(
cls: Type["MatchingEngine"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "MatchingEngine":
"""Use from components instead."""
raise NotImplementedError(
"This method is not implemented. Instead, you should initialize the class"
" with `MatchingEngine.from_components(...)` and then call "
"`add_texts`"
)
@classmethod
def from_components(
cls: Type["MatchingEngine"],
project_id: str,
region: str,
gcs_bucket_name: str,
index_id: str,
endpoint_id: str,
credentials_path: Optional[str] = None,
embedding: Optional[Embeddings] = None,
) -> "MatchingEngine":
"""Takes the object creation out of the constructor.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: The location where the vectors will be stored in
order for the index to be created.
index_id: The id of the created index.
endpoint_id: The id of the created endpoint.
credentials_path: (Optional) The path of the Google credentials on
the local file system.
embedding: The :class:`Embeddings` that will be used for
embedding the texts.
Returns:
A configured MatchingEngine with the texts added to the index.
"""
gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name)
credentials = cls._create_credentials_from_file(credentials_path)
index = cls._create_index_by_id(index_id, project_id, region, credentials)
endpoint = cls._create_endpoint_by_id(
endpoint_id, project_id, region, credentials
)
gcs_client = cls._get_gcs_client(credentials, project_id)
cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials)
return cls(
project_id=project_id,
index=index,
endpoint=endpoint,
embedding=embedding or cls._get_default_embeddings(),
gcs_client=gcs_client,
credentials=credentials,
gcs_bucket_name=gcs_bucket_name,
)
@classmethod
def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str:
"""Validates the gcs_bucket_name as a bucket name.
Args:
gcs_bucket_name: The received bucket uri.
Returns:
A valid gcs_bucket_name or throws ValueError if full path is
provided.
"""
gcs_bucket_name = gcs_bucket_name.replace("gs://", "")
if "/" in gcs_bucket_name:
raise ValueError(
f"The argument gcs_bucket_name should only be "
f"the bucket name. Received {gcs_bucket_name}"
)
return gcs_bucket_name
@classmethod
def _create_credentials_from_file(
cls, json_credentials_path: Optional[str]
) -> Optional[Credentials]:
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path
)
return credentials
@classmethod
def _create_index_by_id(
cls, index_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndex:
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f"Creating matching engine index with id {index_id}.")
return aiplatform.MatchingEngineIndex(
index_name=index_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _create_endpoint_by_id(
cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndexEndpoint:
"""Creates a MatchingEngineIndexEndpoint object by id.
Args:
endpoint_id: The created endpoint id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndexEndpoint.
"""
from google.cloud import aiplatform
logger.debug(f"Creating endpoint with id {endpoint_id}.")
return aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _get_gcs_client(
cls, credentials: "Credentials", project_id: str
) -> "storage.Client":
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(
credentials=credentials,
project=project_id,
client_info=get_client_info(module="vertex-ai-matching-engine"),
)
@classmethod
def _init_aiplatform(
cls,
project_id: str,
region: str,
gcs_bucket_name: str,
credentials: "Credentials",
) -> None:
"""Configures the aiplatform library.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: GCS staging location.
credentials: The GCS Credentials object.
"""
from google.cloud import aiplatform
logger.debug(
f"Initializing AI Platform for project {project_id} on "
f"{region} and for {gcs_bucket_name}."
)
aiplatform.init(
project=project_id,
location=region,
staging_bucket=gcs_bucket_name,
credentials=credentials,
)
@classmethod
def _get_default_embeddings(cls) -> "TensorflowHubEmbeddings":
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
from langchain.embeddings import TensorflowHubEmbeddings
return TensorflowHubEmbeddings()
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~llms~test_nlpcloud.py | """Test NLPCloud API wrapper."""
from pathlib import Path
from typing import cast
from pytest import CaptureFixture, MonkeyPatch
from langchain.llms.loading import load_llm
from langchain.llms.nlpcloud import NLPCloud
from langchain.pydantic_v1 import SecretStr
from tests.integration_tests.llms.utils import assert_llm_equality
def test_nlpcloud_call() -> None:
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an NLPCloud LLM."""
llm = NLPCloud(max_length=10)
llm.save(file_path=tmp_path / "nlpcloud.yaml")
loaded_llm = load_llm(tmp_path / "nlpcloud.yaml")
assert_llm_equality(llm, loaded_llm)
def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> None:
"""Test that nlpcloud api key is a secret key."""
# test initialization from init
assert isinstance(NLPCloud(nlpcloud_api_key="1").nlpcloud_api_key, SecretStr)
monkeypatch.setenv("NLPCLOUD_API_KEY", "secret-api-key")
llm = NLPCloud()
assert isinstance(llm.nlpcloud_api_key, SecretStr)
assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key"
print(llm.nlpcloud_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~chat_models~test_jinachat.py | """Test JinaChat wrapper."""
from typing import cast
import pytest
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.jinachat import JinaChat
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_jinachat_api_key_is_secret_string() -> None:
llm = JinaChat(jinachat_api_key="secret-api-key")
assert isinstance(llm.jinachat_api_key, SecretStr)
def test_jinachat_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key")
llm = JinaChat()
print(llm.jinachat_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_jinachat_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test initialization with an API key provided via the initializer"""
llm = JinaChat(jinachat_api_key="secret-api-key")
print(llm.jinachat_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_uses_actual_secret_value_from_secretstr() -> None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = JinaChat(jinachat_api_key="secret-api-key")
assert cast(SecretStr, llm.jinachat_api_key).get_secret_value() == "secret-api-key"
def test_jinachat() -> None:
"""Test JinaChat wrapper."""
chat = JinaChat(max_tokens=10)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_jinachat_system_message() -> None:
"""Test JinaChat wrapper with system message."""
chat = JinaChat(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_jinachat_generate() -> None:
"""Test JinaChat wrapper with generate."""
chat = JinaChat(max_tokens=10)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_jinachat_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
async def test_async_jinachat() -> None:
"""Test async generation."""
chat = JinaChat(max_tokens=102)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
async def test_async_jinachat_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_jinachat_extra_kwargs() -> None:
"""Test extra kwargs to chat openai."""
# Check that foo is saved in extra_kwargs.
llm = JinaChat(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = JinaChat(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
JinaChat(foo=3, model_kwargs={"foo": 2})
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
JinaChat(model_kwargs={"temperature": 0.2})
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~utilities~stackexchange.py | import html
from typing import Any, Dict, Literal
from langchain.pydantic_v1 import BaseModel, Field, root_validator
class StackExchangeAPIWrapper(BaseModel):
"""Wrapper for Stack Exchange API."""
client: Any #: :meta private:
max_results: int = 3
"""Max number of results to include in output."""
query_type: Literal["all", "title", "body"] = "all"
"""Which part of StackOverflows items to match against. One of 'all', 'title',
'body'. Defaults to 'all'.
"""
fetch_params: Dict[str, Any] = Field(default_factory=dict)
"""Additional params to pass to StackApi.fetch."""
result_separator: str = "\n\n"
"""Separator between question,answer pairs."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the required Python package exists."""
try:
from stackapi import StackAPI
values["client"] = StackAPI("stackoverflow")
except ImportError:
raise ImportError(
"The 'stackapi' Python package is not installed. "
"Please install it with `pip install stackapi`."
)
return values
def run(self, query: str) -> str:
"""Run query through StackExchange API and parse results."""
query_key = "q" if self.query_type == "all" else self.query_type
output = self.client.fetch(
"search/excerpts", **{query_key: query}, **self.fetch_params
)
if len(output["items"]) < 1:
return f"No relevant results found for '{query}' on Stack Overflow."
questions = [
item for item in output["items"] if item["item_type"] == "question"
][: self.max_results]
answers = [item for item in output["items"] if item["item_type"] == "answer"]
results = []
for question in questions:
res_text = f"Question: {question['title']}\n{question['excerpt']}"
relevant_answers = [
answer
for answer in answers
if answer["question_id"] == question["question_id"]
]
accepted_answers = [
answer for answer in relevant_answers if answer["is_accepted"]
]
if relevant_answers:
top_answer = (
accepted_answers[0] if accepted_answers else relevant_answers[0]
)
excerpt = html.unescape(top_answer["excerpt"])
res_text += f"\nAnswer: {excerpt}"
results.append(res_text)
return self.result_separator.join(results)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~text_splitter.py | """**Text Splitters** are classes for splitting text.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter
RecursiveCharacterTextSplitter --> <name>TextSplitter
Note: **MarkdownHeaderTextSplitter** and **HTMLHeaderTextSplitter do not derive from TextSplitter.
**Main helpers:**
.. code-block::
Document, Tokenizer, Language, LineType, HeaderType
""" # noqa: E501
from __future__ import annotations
import asyncio
import copy
import logging
import pathlib
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from functools import partial
from io import BytesIO, StringIO
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
cast,
)
import requests
from langchain_core.documents import BaseDocumentTransformer, Document
logger = logging.getLogger(__name__)
TS = TypeVar("TS", bound="TextSplitter")
def _make_spacy_pipeline_for_splitting(
pipeline: str, *, max_length: int = 1_000_000
) -> Any: # avoid importing spacy
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
if pipeline == "sentencizer":
from spacy.lang.en import English
sentencizer = English()
sentencizer.add_pipe("sentencizer")
else:
sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"])
sentencizer.max_length = max_length
return sentencizer
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
strip_whitespace: bool = True,
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether to keep the separator in the chunks
add_start_index: If `True`, includes chunk's start index in metadata
strip_whitespace: If `True`, strips whitespace from the start and end of
every document
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
self._strip_whitespace = strip_whitespace
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
def split_documents(self, documents: Iterable[Document]) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
if self._strip_whitespace:
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.transform_documents, **kwargs), documents
)
class CharacterTextSplitter(TextSplitter):
"""Splitting text that looks at characters."""
def __init__(
self, separator: str = "\n\n", is_separator_regex: bool = False, **kwargs: Any
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
self._is_separator_regex = is_separator_regex
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
separator = (
self._separator if self._is_separator_regex else re.escape(self._separator)
)
splits = _split_text_with_regex(text, separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
class LineType(TypedDict):
"""Line type as typed dict."""
metadata: Dict[str, str]
content: str
class HeaderType(TypedDict):
"""Header type as typed dict."""
level: int
name: str
data: str
class MarkdownHeaderTextSplitter:
"""Splitting markdown files based on specified headers."""
def __init__(
self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False
):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
"""
# Output line-by-line or aggregated into chunks w/ common headers
self.return_each_line = return_each_line
# Given the headers we want to split on,
# (e.g., "#, ##, etc") order by length
self.headers_to_split_on = sorted(
headers_to_split_on, key=lambda split: len(split[0]), reverse=True
)
def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[Document]:
"""Combine lines with common metadata into chunks
Args:
lines: Line of text / associated header metadata
"""
aggregated_chunks: List[LineType] = []
for line in lines:
if (
aggregated_chunks
and aggregated_chunks[-1]["metadata"] == line["metadata"]
):
# If the last line in the aggregated list
# has the same metadata as the current line,
# append the current content to the last lines's content
aggregated_chunks[-1]["content"] += " \n" + line["content"]
else:
# Otherwise, append the current line to the aggregated list
aggregated_chunks.append(line)
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in aggregated_chunks
]
def split_text(self, text: str) -> List[Document]:
"""Split markdown file
Args:
text: Markdown file"""
# Split the input text by newline character ("\n").
lines = text.split("\n")
# Final output
lines_with_metadata: List[LineType] = []
# Content and metadata of the chunk currently being processed
current_content: List[str] = []
current_metadata: Dict[str, str] = {}
# Keep track of the nested header structure
# header_stack: List[Dict[str, Union[int, str]]] = []
header_stack: List[HeaderType] = []
initial_metadata: Dict[str, str] = {}
in_code_block = False
opening_fence = ""
for line in lines:
stripped_line = line.strip()
if not in_code_block:
# Exclude inline code spans
if stripped_line.startswith("```") and stripped_line.count("```") == 1:
in_code_block = True
opening_fence = "```"
elif stripped_line.startswith("~~~"):
in_code_block = True
opening_fence = "~~~"
else:
if stripped_line.startswith(opening_fence):
in_code_block = False
opening_fence = ""
if in_code_block:
current_content.append(stripped_line)
continue
# Check each line against each of the header types (e.g., #, ##)
for sep, name in self.headers_to_split_on:
# Check if line starts with a header that we intend to split on
if stripped_line.startswith(sep) and (
# Header with no text OR header is followed by space
# Both are valid conditions that sep is being used a header
len(stripped_line) == len(sep) or stripped_line[len(sep)] == " "
):
# Ensure we are tracking the header as metadata
if name is not None:
# Get the current header level
current_header_level = sep.count("#")
# Pop out headers of lower or same level from the stack
while (
header_stack
and header_stack[-1]["level"] >= current_header_level
):
# We have encountered a new header
# at the same or higher level
popped_header = header_stack.pop()
# Clear the metadata for the
# popped header in initial_metadata
if popped_header["name"] in initial_metadata:
initial_metadata.pop(popped_header["name"])
# Push the current header to the stack
header: HeaderType = {
"level": current_header_level,
"name": name,
"data": stripped_line[len(sep) :].strip(),
}
header_stack.append(header)
# Update initial_metadata with the current header
initial_metadata[name] = header["data"]
# Add the previous line to the lines_with_metadata
# only if current_content is not empty
if current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
break
else:
if stripped_line:
current_content.append(stripped_line)
elif current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
current_metadata = initial_metadata.copy()
if current_content:
lines_with_metadata.append(
{"content": "\n".join(current_content), "metadata": current_metadata}
)
# lines_with_metadata has each line with associated header metadata
# aggregate these into chunks based on common metadata
if not self.return_each_line:
return self.aggregate_lines_to_chunks(lines_with_metadata)
else:
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in lines_with_metadata
]
class ElementType(TypedDict):
"""Element type as typed dict."""
url: str
xpath: str
content: str
metadata: Dict[str, str]
class HTMLHeaderTextSplitter:
"""
Splitting HTML files based on specified headers.
Requires lxml package.
"""
def __init__(
self,
headers_to_split_on: List[Tuple[str, str]],
return_each_element: bool = False,
):
"""Create a new HTMLHeaderTextSplitter.
Args:
headers_to_split_on: list of tuples of headers we want to track mapped to
(arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4,
h5, h6 e.g. [("h1", "Header 1"), ("h2", "Header 2)].
return_each_element: Return each element w/ associated headers.
"""
# Output element-by-element or aggregated into chunks w/ common headers
self.return_each_element = return_each_element
self.headers_to_split_on = sorted(headers_to_split_on)
def aggregate_elements_to_chunks(
self, elements: List[ElementType]
) -> List[Document]:
"""Combine elements with common metadata into chunks
Args:
elements: HTML element content with associated identifying info and metadata
"""
aggregated_chunks: List[ElementType] = []
for element in elements:
if (
aggregated_chunks
and aggregated_chunks[-1]["metadata"] == element["metadata"]
):
# If the last element in the aggregated list
# has the same metadata as the current element,
# append the current content to the last element's content
aggregated_chunks[-1]["content"] += " \n" + element["content"]
else:
# Otherwise, append the current element to the aggregated list
aggregated_chunks.append(element)
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in aggregated_chunks
]
def split_text_from_url(self, url: str) -> List[Document]:
"""Split HTML from web URL
Args:
url: web URL
"""
r = requests.get(url)
return self.split_text_from_file(BytesIO(r.content))
def split_text(self, text: str) -> List[Document]:
"""Split HTML text string
Args:
text: HTML text
"""
return self.split_text_from_file(StringIO(text))
def split_text_from_file(self, file: Any) -> List[Document]:
"""Split HTML file
Args:
file: HTML file
"""
try:
from lxml import etree
except ImportError as e:
raise ImportError(
"Unable to import lxml, please install with `pip install lxml`."
) from e
# use lxml library to parse html document and return xml ElementTree
parser = etree.HTMLParser()
tree = etree.parse(file, parser)
# document transformation for "structure-aware" chunking is handled with xsl.
# see comments in html_chunks_with_headers.xslt for more detailed information.
xslt_path = (
pathlib.Path(__file__).parent
/ "document_transformers/xsl/html_chunks_with_headers.xslt"
)
xslt_tree = etree.parse(xslt_path)
transform = etree.XSLT(xslt_tree)
result = transform(tree)
result_dom = etree.fromstring(str(result))
# create filter and mapping for header metadata
header_filter = [header[0] for header in self.headers_to_split_on]
header_mapping = dict(self.headers_to_split_on)
# map xhtml namespace prefix
ns_map = {"h": "http://www.w3.org/1999/xhtml"}
# build list of elements from DOM
elements = []
for element in result_dom.findall("*//*", ns_map):
if element.findall("*[@class='headers']") or element.findall(
"*[@class='chunk']"
):
elements.append(
ElementType(
url=file,
xpath="".join(
[
node.text
for node in element.findall("*[@class='xpath']", ns_map)
]
),
content="".join(
[
node.text
for node in element.findall("*[@class='chunk']", ns_map)
]
),
metadata={
# Add text of specified headers to metadata using header
# mapping.
header_mapping[node.tag]: node.text
for node in filter(
lambda x: x.tag in header_filter,
element.findall("*[@class='headers']/*", ns_map),
)
},
)
)
if not self.return_each_element:
return self.aggregate_elements_to_chunks(elements)
else:
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in elements
]
# should be in newer Python versions (3.10+)
# @dataclass(frozen=True, kw_only=True, slots=True)
@dataclass(frozen=True)
class Tokenizer:
"""Tokenizer data class."""
chunk_overlap: int
"""Overlap in tokens between chunks"""
tokens_per_chunk: int
"""Maximum number of tokens per chunk"""
decode: Callable[[List[int]], str]
""" Function to decode a list of token ids to a string"""
encode: Callable[[str], List[int]]
""" Function to encode a string to a list of token ids"""
def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks using tokenizer."""
splits: List[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(tokenizer.decode(chunk_ids))
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class TokenTextSplitter(TextSplitter):
"""Splitting text to tokens using model tokenizer."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
def split_text(self, text: str) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Splitting text to tokens using sentence model tokenizer."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformer python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
def split_text(self, text: str) -> List[str]:
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
def count_tokens(self, *, text: str) -> int:
return len(self._encode(text))
_max_length_equal_32_bit_integer: int = 2**32
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
class Language(str, Enum):
"""Enum of the programming languages."""
CPP = "cpp"
GO = "go"
JAVA = "java"
KOTLIN = "kotlin"
JS = "js"
TS = "ts"
PHP = "php"
PROTO = "proto"
PYTHON = "python"
RST = "rst"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
SWIFT = "swift"
MARKDOWN = "markdown"
LATEX = "latex"
HTML = "html"
SOL = "sol"
CSHARP = "csharp"
COBOL = "cobol"
class RecursiveCharacterTextSplitter(TextSplitter):
"""Splitting text by recursively look at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = False,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1 :]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
@classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, is_separator_regex=True, **kwargs)
@staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.KOTLIN:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\ninternal ",
"\ncompanion ",
"\nfun ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nwhen ",
"\ncase ",
"\nelse ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.TS:
return [
"\nenum ",
"\ninterface ",
"\nnamespace ",
"\ntype ",
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\\*\\*\\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\\chapter{",
"\n\\\\section{",
"\n\\\\subsection{",
"\n\\\\subsubsection{",
# Now split by environments
"\n\\\\begin{enumerate}",
"\n\\\\begin{itemize}",
"\n\\\\begin{description}",
"\n\\\\begin{list}",
"\n\\\\begin{quote}",
"\n\\\\begin{quotation}",
"\n\\\\begin{verse}",
"\n\\\\begin{verbatim}",
# Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
elif language == Language.CSHARP:
return [
"\ninterface ",
"\nenum ",
"\nimplements ",
"\ndelegate ",
"\nevent ",
# Split along class definitions
"\nclass ",
"\nabstract ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
"\nreturn ",
# Split along control flow statements
"\nif ",
"\ncontinue ",
"\nfor ",
"\nforeach ",
"\nwhile ",
"\nswitch ",
"\nbreak ",
"\ncase ",
"\nelse ",
# Split by exceptions
"\ntry ",
"\nthrow ",
"\nfinally ",
"\ncatch ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SOL:
return [
# Split along compiler information definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.COBOL:
return [
# Split along divisions
"\nIDENTIFICATION DIVISION.",
"\nENVIRONMENT DIVISION.",
"\nDATA DIVISION.",
"\nPROCEDURE DIVISION.",
# Split along sections within DATA DIVISION
"\nWORKING-STORAGE SECTION.",
"\nLINKAGE SECTION.",
"\nFILE SECTION.",
# Split along sections within PROCEDURE DIVISION
"\nINPUT-OUTPUT SECTION.",
# Split along paragraphs and common statements
"\nOPEN ",
"\nCLOSE ",
"\nREAD ",
"\nWRITE ",
"\nIF ",
"\nELSE ",
"\nMOVE ",
"\nPERFORM ",
"\nUNTIL ",
"\nVARYING ",
"\nACCEPT ",
"\nDISPLAY ",
"\nSTOP RUN.",
# Split by the normal type of lines
"\n",
" ",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self, separator: str = "\n\n", language: str = "english", **kwargs: Any
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
self._language = language
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Splitting text using Spacy package.
Per default, Spacy's `en_core_web_sm` model is used and
its default max_length is 1000000 (it is the length of maximum character
this model takes which can be increased for large files). For a faster, but
potentially less accurate splitting, you can use `pipeline='sentencizer'`.
"""
def __init__(
self,
separator: str = "\n\n",
pipeline: str = "en_core_web_sm",
max_length: int = 1_000_000,
**kwargs: Any,
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
self._tokenizer = _make_spacy_pipeline_for_splitting(
pipeline, max_length=max_length
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (s.text for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
# For backwards compatibility
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a MarkdownTextSplitter."""
separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LatexTextSplitter."""
separators = self.get_separators_for_language(Language.LATEX)
super().__init__(separators=separators, **kwargs)
| [
"\n"
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~chat_models~ernie.py | import json
import logging
import threading
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.pydantic_v1 import root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
class ErnieBotChat(BaseChatModel):
"""`ERNIE-Bot` large language model.
ERNIE-Bot is a large language model developed by Baidu,
covering a huge amount of Chinese data.
To use, you should have the `ernie_client_id` and `ernie_client_secret` set,
or set the environment variable `ERNIE_CLIENT_ID` and `ERNIE_CLIENT_SECRET`.
Note:
access_token will be automatically generated based on client_id and client_secret,
and will be regenerated after expiration (30 days).
Default model is `ERNIE-Bot-turbo`,
currently supported models are `ERNIE-Bot-turbo`, `ERNIE-Bot`
Example:
.. code-block:: python
from langchain.chat_models import ErnieBotChat
chat = ErnieBotChat(model_name='ERNIE-Bot')
"""
ernie_api_base: Optional[str] = None
"""Baidu application custom endpoints"""
ernie_client_id: Optional[str] = None
"""Baidu application client id"""
ernie_client_secret: Optional[str] = None
"""Baidu application client secret"""
access_token: Optional[str] = None
"""access token is generated by client id and client secret,
setting this value directly will cause an error"""
model_name: str = "ERNIE-Bot-turbo"
"""model name of ernie, default is `ERNIE-Bot-turbo`.
Currently supported `ERNIE-Bot-turbo`, `ERNIE-Bot`"""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
streaming: Optional[bool] = False
"""streaming mode. not supported yet."""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
_lock = threading.Lock()
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["ernie_api_base"] = get_from_dict_or_env(
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
)
values["ernie_client_id"] = get_from_dict_or_env(
values,
"ernie_client_id",
"ERNIE_CLIENT_ID",
)
values["ernie_client_secret"] = get_from_dict_or_env(
values,
"ernie_client_secret",
"ERNIE_CLIENT_SECRET",
)
return values
def _chat(self, payload: object) -> dict:
base_url = f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat"
model_paths = {
"ERNIE-Bot-turbo": "eb-instant",
"ERNIE-Bot": "completions",
"ERNIE-Bot-8K": "ernie_bot_8k",
"ERNIE-Bot-4": "completions_pro",
"BLOOMZ-7B": "bloomz_7b1",
"Llama-2-7b-chat": "llama_2_7b",
"Llama-2-13b-chat": "llama_2_13b",
"Llama-2-70b-chat": "llama_2_70b",
}
if self.model_name in model_paths:
url = f"{base_url}/{model_paths[self.model_name]}"
else:
raise ValueError(f"Got unknown model_name {self.model_name}")
resp = requests.post(
url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
},
params={"access_token": self.access_token},
json=payload,
)
return resp.json()
def _refresh_access_token_with_lock(self) -> None:
with self._lock:
logger.debug("Refreshing access token")
base_url: str = f"{self.ernie_api_base}/oauth/2.0/token"
resp = requests.post(
base_url,
timeout=10,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
params={
"grant_type": "client_credentials",
"client_id": self.ernie_client_id,
"client_secret": self.ernie_client_secret,
},
)
self.access_token = str(resp.json().get("access_token"))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
raise ValueError("`streaming` option currently unsupported.")
if not self.access_token:
self._refresh_access_token_with_lock()
payload = {
"messages": [_convert_message_to_dict(m) for m in messages],
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
**kwargs,
}
logger.debug(f"Payload for ernie api is {payload}")
resp = self._chat(payload)
if resp.get("error_code"):
if resp.get("error_code") == 111:
logger.debug("access_token expired, refresh it")
self._refresh_access_token_with_lock()
resp = self._chat(payload)
else:
raise ValueError(f"Error from ErnieChat api response: {resp}")
return self._create_chat_result(resp)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
if "function_call" in response:
fc_str = '{{"function_call": {}}}'.format(
json.dumps(response.get("function_call"))
)
generations = [ChatGeneration(message=AIMessage(content=fc_str))]
else:
generations = [
ChatGeneration(message=AIMessage(content=response.get("result")))
]
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "ernie-bot-chat"
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~document_loaders~docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""
Loader that leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You also have the option to define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusarus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~retrievers~multi_vector.py | from enum import Enum
from typing import List
from langchain_core.documents import Document
from langchain_core.pydantic_v1 import Field
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore
from langchain_core.vectorstores import VectorStore
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
docstore: BaseStore[str, Document]
"""The storage layer for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~document_loaders~test_wikipedia.py | """Integration test for Wikipedia Document Loader."""
from typing import List
from langchain_core.documents import Document
from langchain.document_loaders import WikipediaLoader
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success() -> None:
loader = WikipediaLoader(query="HUNTER X HUNTER")
docs = loader.load()
assert len(docs) > 1
assert len(docs) <= 25
assert_docs(docs, all_meta=False)
def test_load_success_all_meta() -> None:
load_max_docs = 5
load_all_available_meta = True
loader = WikipediaLoader(
query="HUNTER X HUNTER",
load_max_docs=load_max_docs,
load_all_available_meta=load_all_available_meta,
)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=load_all_available_meta)
def test_load_success_more() -> None:
load_max_docs = 10
loader = WikipediaLoader(query="HUNTER X HUNTER", load_max_docs=load_max_docs)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=False)
def test_load_no_result() -> None:
loader = WikipediaLoader(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
docs = loader.load()
assert not docs
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~chat_models~baichuan.py | import hashlib
import json
import logging
import time
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
import requests
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import (
convert_to_secret_str,
get_pydantic_field_names,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, generate_from_stream
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.baichuan-ai.com/v1"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
# signature generation
def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int) -> str:
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp)
md5 = hashlib.md5()
md5.update(input_str.encode("utf-8"))
return md5.hexdigest()
class ChatBaichuan(BaseChatModel):
"""Baichuan chat models API by Baichuan Intelligent Technology.
For more information, see https://platform.baichuan-ai.com/docs/api
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"baichuan_api_key": "BAICHUAN_API_KEY",
"baichuan_secret_key": "BAICHUAN_SECRET_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
baichuan_api_base: str = Field(default=DEFAULT_API_BASE)
"""Baichuan custom endpoints"""
baichuan_api_key: Optional[SecretStr] = None
"""Baichuan API Key"""
baichuan_secret_key: Optional[SecretStr] = None
"""Baichuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = 60
"""request timeout for chat http requests"""
model = "Baichuan2-53B"
"""model name of Baichuan, default is `Baichuan2-53B`."""
temperature: float = 0.3
"""What sampling temperature to use."""
top_k: int = 5
"""What search sampling control to use."""
top_p: float = 0.85
"""What probability mass to use."""
with_search_enhance: bool = False
"""Whether to use search enhance, default is False."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["baichuan_api_base"] = get_from_dict_or_env(
values,
"baichuan_api_base",
"BAICHUAN_API_BASE",
DEFAULT_API_BASE,
)
values["baichuan_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"baichuan_api_key",
"BAICHUAN_API_KEY",
)
)
values["baichuan_secret_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"baichuan_secret_key",
"BAICHUAN_SECRET_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Baichuan API."""
normal_params = {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"with_search_enhance": self.with_search_enhance,
}
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if response.get("code") != 0:
raise ValueError(f"Error from Baichuan api response: {response}")
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
response = json.loads(chunk)
if response.get("code") != 0:
raise ValueError(f"Error from Baichuan api response: {response}")
data = response.get("data")
for m in data.get("messages"):
chunk = _convert_delta_to_message_chunk(m, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
if self.baichuan_secret_key is None:
raise ValueError("Baichuan secret key is not set.")
parameters = {**self._default_params, **kwargs}
model = parameters.pop("model")
headers = parameters.pop("headers", {})
payload = {
"model": model,
"messages": [_convert_message_to_dict(m) for m in messages],
"parameters": parameters,
}
timestamp = int(time.time())
url = self.baichuan_api_base
if self.streaming:
url = f"{url}/stream"
url = f"{url}/chat"
api_key = ""
if self.baichuan_api_key:
api_key = self.baichuan_api_key.get_secret_value()
res = requests.post(
url=url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"X-BC-Timestamp": str(timestamp),
"X-BC-Signature": _signature(
secret_key=self.baichuan_secret_key,
payload=payload,
timestamp=timestamp,
),
"X-BC-Sign-Algo": "MD5",
**headers,
},
json=payload,
stream=self.streaming,
)
return res
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for m in response["data"]["messages"]:
message = _convert_dict_to_message(m)
gen = ChatGeneration(message=message)
generations.append(gen)
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "baichuan-chat"
| [
"content"
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~llms~vertexai.py | from __future__ import annotations
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Optional,
Union,
)
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.utilities.vertexai import (
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
if TYPE_CHECKING:
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
from google.cloud.aiplatform.models import Prediction
from google.protobuf.struct_pb2 import Value
from vertexai.language_models._language_models import (
TextGenerationResponse,
_LanguageModel,
)
def _response_to_generation(
response: TextGenerationResponse,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
try:
generation_info = {
"is_blocked": response.is_blocked,
"safety_attributes": response.safety_attributes,
}
except Exception:
generation_info = None
return GenerationChunk(text=response.text, generation_info=generation_info)
def is_codey_model(model_name: str) -> bool:
"""Returns True if the model name is a Codey model.
Args:
model_name: The model name to check.
Returns: True if the model name is a Codey model.
"""
return "code" in model_name
def _create_retry_decorator(
llm: VertexAI,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import google.api_core
errors = [
google.api_core.exceptions.ResourceExhausted,
google.api_core.exceptions.ServiceUnavailable,
google.api_core.exceptions.Aborted,
google.api_core.exceptions.DeadlineExceeded,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
return decorator
def completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
def stream_completion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(*args: Any, **kwargs: Any) -> Any:
return llm.client.predict_streaming(*args, **kwargs)
return _completion_with_retry(*args, **kwargs)
async def acompletion_with_retry(
llm: VertexAI,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _acompletion_with_retry(*args: Any, **kwargs: Any) -> Any:
return await llm.client.predict_async(*args, **kwargs)
return await _acompletion_with_retry(*args, **kwargs)
class _VertexAIBase(BaseModel):
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"
"The default location to use when making API calls."
request_parallelism: int = 5
"The amount of parallelism allowed for requests issued to VertexAI models. "
"Default is 5."
max_retries: int = 6
"""The maximum number of retries to make when generating."""
task_executor: ClassVar[Optional[Executor]] = Field(default=None, exclude=True)
stop: Optional[List[str]] = None
"Optional list of stop words to use when generating."
model_name: Optional[str] = None
"Underlying model name."
@classmethod
def _get_task_executor(cls, request_parallelism: int = 5) -> Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
class _VertexAICommon(_VertexAIBase):
client: "_LanguageModel" = None #: :meta private:
model_name: str
"Underlying model name."
temperature: float = 0.0
"Sampling temperature, it controls the degree of randomness in token selection."
max_output_tokens: int = 128
"Token limit determines the maximum amount of text output from one prompt."
top_p: float = 0.95
"Tokens are selected from most probable to least until the sum of their "
"probabilities equals the top-p value. Top-p is ignored for Codey models."
top_k: int = 40
"How the model selects tokens for output, the next token is selected from "
"among the top-k most probable tokens. Top-k is ignored for Codey models."
credentials: Any = Field(default=None, exclude=True)
"The default custom credentials (google.auth.credentials.Credentials) to use "
"when making API calls. If not provided, credentials will be ascertained from "
"the environment."
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
@property
def _llm_type(self) -> str:
return "vertexai"
@property
def is_codey_model(self) -> bool:
return is_codey_model(self.model_name)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _default_params(self) -> Dict[str, Any]:
params = {
"temperature": self.temperature,
"max_output_tokens": self.max_output_tokens,
"candidate_count": self.n,
}
if not self.is_codey_model:
params.update(
{
"top_k": self.top_k,
"top_p": self.top_p,
}
)
return params
@classmethod
def _try_init_vertexai(cls, values: Dict) -> None:
allowed_params = ["project", "location", "credentials"]
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
def _prepare_params(
self,
stop: Optional[List[str]] = None,
stream: bool = False,
**kwargs: Any,
) -> dict:
stop_sequences = stop or self.stop
params_mapping = {"n": "candidate_count"}
params = {params_mapping.get(k, k): v for k, v in kwargs.items()}
params = {**self._default_params, "stop_sequences": stop_sequences, **params}
if stream or self.streaming:
params.pop("candidate_count")
return params
class VertexAI(_VertexAICommon, BaseLLM):
"""Google Vertex AI large language models."""
model_name: str = "text-bison"
"The name of the Vertex AI large language model."
tuned_model_name: Optional[str] = None
"The name of a tuned model. If provided, model_name is ignored."
@classmethod
def is_lc_serializable(self) -> bool:
return True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
tuned_model_name = values.get("tuned_model_name")
model_name = values["model_name"]
try:
from vertexai.language_models import (
CodeGenerationModel,
TextGenerationModel,
)
from vertexai.preview.language_models import (
CodeGenerationModel as PreviewCodeGenerationModel,
)
from vertexai.preview.language_models import (
TextGenerationModel as PreviewTextGenerationModel,
)
if is_codey_model(model_name):
model_cls = CodeGenerationModel
preview_model_cls = PreviewCodeGenerationModel
else:
model_cls = TextGenerationModel
preview_model_cls = PreviewTextGenerationModel
if tuned_model_name:
values["client"] = model_cls.get_tuned_model(tuned_model_name)
values["client_preview"] = preview_model_cls.get_tuned_model(
tuned_model_name
)
else:
values["client"] = model_cls.from_pretrained(model_name)
values["client_preview"] = preview_model_cls.from_pretrained(model_name)
except ImportError:
raise_vertex_import_error()
if values["streaming"] and values["n"] > 1:
raise ValueError("Only one candidate can be generated with streaming!")
return values
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
try:
result = self.client_preview.count_tokens([text])
except AttributeError:
raise_vertex_import_error()
return result.total_tokens
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
should_stream = stream if stream is not None else self.streaming
params = self._prepare_params(stop=stop, stream=should_stream, **kwargs)
generations = []
for prompt in prompts:
if should_stream:
generation = GenerationChunk(text="")
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
generation += chunk
generations.append([generation])
else:
res = completion_with_retry(
self, prompt, run_manager=run_manager, **params
)
generations.append([_response_to_generation(r) for r in res.candidates])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
params = self._prepare_params(stop=stop, **kwargs)
generations = []
for prompt in prompts:
res = await acompletion_with_retry(
self, prompt, run_manager=run_manager, **params
)
generations.append([_response_to_generation(r) for r in res.candidates])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._prepare_params(stop=stop, stream=True, **kwargs)
for stream_resp in stream_completion_with_retry(
self, prompt, run_manager=run_manager, **params
):
chunk = _response_to_generation(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
class VertexAIModelGarden(_VertexAIBase, BaseLLM):
"""Large language models served from Vertex AI Model Garden."""
client: "PredictionServiceClient" = None #: :meta private:
async_client: "PredictionServiceAsyncClient" = None #: :meta private:
endpoint_id: str
"A name of an endpoint where the model has been deployed."
allowed_model_args: Optional[List[str]] = None
"Allowed optional args to be passed to the model."
prompt_arg: str = "prompt"
result_arg: Optional[str] = "generated_text"
"Set result_arg to None if output of the model is expected to be a string."
"Otherwise, if it's a dict, provided an argument that contains the result."
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.aiplatform.gapic import (
PredictionServiceAsyncClient,
PredictionServiceClient,
)
except ImportError:
raise_vertex_import_error()
if not values["project"]:
raise ValueError(
"A GCP project should be provided to run inference on Model Garden!"
)
client_options = ClientOptions(
api_endpoint=f"{values['location']}-aiplatform.googleapis.com"
)
client_info = get_client_info(module="vertex-ai-model-garden")
values["client"] = PredictionServiceClient(
client_options=client_options, client_info=client_info
)
values["async_client"] = PredictionServiceAsyncClient(
client_options=client_options, client_info=client_info
)
values["endpoint_path"] = values["client"].endpoint_path(
project=values["project"],
location=values["location"],
endpoint=values["endpoint_id"],
)
return values
@property
def _llm_type(self) -> str:
return "vertexai_model_garden"
def _prepare_request(self, prompts: List[str], **kwargs: Any) -> List["Value"]:
try:
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
except ImportError:
raise ImportError(
"protobuf package not found, please install it with"
" `pip install protobuf`"
)
instances = []
for prompt in prompts:
if self.allowed_model_args:
instance = {
k: v for k, v in kwargs.items() if k in self.allowed_model_args
}
else:
instance = {}
instance[self.prompt_arg] = prompt
instances.append(instance)
predict_instances = [
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
]
return predict_instances
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = self.client.predict(endpoint=self.endpoint_path, instances=instances)
return self._parse_response(response)
def _parse_response(self, predictions: "Prediction") -> LLMResult:
generations: List[List[Generation]] = []
for result in predictions.predictions:
generations.append(
[
Generation(text=self._parse_prediction(prediction))
for prediction in result
]
)
return LLMResult(generations=generations)
def _parse_prediction(self, prediction: Any) -> str:
if isinstance(prediction, str):
return prediction
if self.result_arg:
try:
return prediction[self.result_arg]
except KeyError:
if isinstance(prediction, str):
error_desc = (
"Provided non-None `result_arg` (result_arg="
f"{self.result_arg}). But got prediction of type "
f"{type(prediction)} instead of dict. Most probably, you"
"need to set `result_arg=None` during VertexAIModelGarden "
"initialization."
)
raise ValueError(error_desc)
else:
raise ValueError(f"{self.result_arg} key not found in prediction!")
return prediction
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = await self.async_client.predict(
endpoint=self.endpoint_path, instances=instances
)
return self._parse_response(response)
| [
"prompt"
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~document_loaders~docugami.py | import hashlib
import io
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from langchain_core.documents import Document
from langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain.document_loaders.base import BaseLoader
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
ID_KEY = "id"
DOCUMENT_SOURCE_KEY = "source"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
class DocugamiLoader(BaseLoader, BaseModel):
"""Load from `Docugami`.
To use, you should have the ``dgml-utils`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
"""The Docugami API endpoint to use."""
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
"""The Docugami API access token to use."""
max_text_length = 4096
"""Max length of chunk text returned."""
min_text_length: int = 32
"""Threshold under which chunks are appended to next to avoid over-chunking."""
max_metadata_length = 512
"""Max length of metadata text returned."""
include_xml_tags: bool = False
"""Set to true for XML tags in chunk output text."""
parent_hierarchy_levels: int = 0
"""Set appropriately to get parent chunks using the chunk hierarchy."""
parent_id_key: str = "doc_id"
"""Metadata key for parent doc ID."""
sub_chunk_tables: bool = False
"""Set to True to return sub-chunks within tables."""
whitespace_normalize_text: bool = True
"""Set to False if you want to full whitespace formatting in the original
XML doc, including indentation."""
docset_id: Optional[str]
"""The Docugami API docset ID to use."""
document_ids: Optional[Sequence[str]]
"""The Docugami API document IDs to use."""
file_paths: Optional[Sequence[Union[Path, str]]]
"""The local file paths to use."""
include_project_metadata_in_doc_metadata: bool = True
"""Set to True if you want to include the project metadata in the doc metadata."""
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self,
content: bytes,
document_name: Optional[str] = None,
additional_doc_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
try:
from dgml_utils.models import Chunk
from dgml_utils.segmentation import get_chunks
except ImportError:
raise ImportError(
"Could not import from dgml-utils python package. "
"Please install it with `pip install dgml-utils`."
)
def _build_framework_chunk(dg_chunk: Chunk) -> Document:
# Stable IDs for chunks with the same text.
_hashed_id = hashlib.md5(dg_chunk.text.encode()).hexdigest()
metadata = {
XPATH_KEY: dg_chunk.xpath,
ID_KEY: _hashed_id,
DOCUMENT_NAME_KEY: document_name,
DOCUMENT_SOURCE_KEY: document_name,
STRUCTURE_KEY: dg_chunk.structure,
TAG_KEY: dg_chunk.tag,
}
text = dg_chunk.text
if additional_doc_metadata:
if self.include_project_metadata_in_doc_metadata:
metadata.update(additional_doc_metadata)
return Document(
page_content=text[: self.max_text_length],
metadata=metadata,
)
# Parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
dg_chunks = get_chunks(
root,
min_text_length=self.min_text_length,
max_text_length=self.max_text_length,
whitespace_normalize_text=self.whitespace_normalize_text,
sub_chunk_tables=self.sub_chunk_tables,
include_xml_tags=self.include_xml_tags,
parent_hierarchy_levels=self.parent_hierarchy_levels,
)
framework_chunks: Dict[str, Document] = {}
for dg_chunk in dg_chunks:
framework_chunk = _build_framework_chunk(dg_chunk)
chunk_id = framework_chunk.metadata.get(ID_KEY)
if chunk_id:
framework_chunks[chunk_id] = framework_chunk
if dg_chunk.parent:
framework_parent_chunk = _build_framework_chunk(dg_chunk.parent)
parent_id = framework_parent_chunk.metadata.get(ID_KEY)
if parent_id and framework_parent_chunk.page_content:
framework_chunk.metadata[self.parent_id_key] = parent_id
framework_chunks[parent_id] = framework_parent_chunk
return list(framework_chunks.values())
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get(ID_KEY)
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
per_file_metadata: Dict = {}
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
elif response.status_code == 404:
# Not found is ok, just means no published projects
return per_file_metadata
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == "report-values.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc[ID_KEY]
metadata: Dict = {}
# The evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//pr:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./pr:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value[: self.max_metadata_length]
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self,
document_id: str,
docset_id: str,
document_name: Optional[str] = None,
additional_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Load chunks for a document."""
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(
content=response.content,
document_name=document_name,
additional_doc_metadata=additional_metadata,
)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# Remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d[ID_KEY] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata: Dict[str, Dict] = {}
if _project_details and self.include_project_metadata_in_doc_metadata:
# If there are any projects for this docset and the caller requested
# project metadata, load it.
for project in _project_details:
metadata = self._metadata_for_project(project)
for file_id in metadata:
if file_id not in combined_project_metadata:
combined_project_metadata[file_id] = metadata[file_id]
else:
combined_project_metadata[file_id].update(metadata[file_id])
for doc in _document_details:
doc_id = doc[ID_KEY]
doc_name = doc.get(DOCUMENT_NAME_KEY)
doc_metadata = combined_project_metadata.get(doc_id)
chunks += self._load_chunks_for_document(
document_id=doc_id,
docset_id=self.docset_id,
document_name=doc_name,
additional_metadata=doc_metadata,
)
elif self.file_paths:
# Local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
content=file.read(),
document_name=path.name,
)
return chunks
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain_core.documents import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~chat_models~jinachat.py | """JinaChat wrapper."""
from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_pydantic_field_names
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: JinaChat, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
content = _dict["content"] or ""
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"name": message.name,
"content": message.content,
}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class JinaChat(BaseChatModel):
"""`Jina AI` Chat models API.
To use, you should have the ``openai`` python package installed, and the
environment variable ``JINACHAT_API_KEY`` set to your API key, which you
can generate at https://chat.jina.ai/api.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import JinaChat
chat = JinaChat()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"jinachat_api_key": "JINACHAT_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
client: Any #: :meta private:
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
jinachat_api_key: Optional[SecretStr] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to JinaChat completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["jinachat_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "jinachat_api_key", "JINACHAT_API_KEY")
)
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling JinaChat API."""
return {
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts, **params):
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"]}
return ChatResult(generations=generations, llm_output=llm_output)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
async for chunk in await acompletion_with_retry(
self, messages=message_dicts, **params
):
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.content)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(self, messages=message_dicts, **params)
return self._create_chat_result(response)
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
jinachat_creds: Dict[str, Any] = {
"api_key": self.jinachat_api_key
and self.jinachat_api_key.get_secret_value(),
"api_base": "https://api.chat.jina.ai/v1",
"model": "jinachat",
}
return {**jinachat_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "jinachat"
| [
"content"
] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~llms~bedrock.py | import json
import warnings
from abc import ABC
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain_core.outputs import GenerationChunk
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
from langchain.utils import get_from_dict_or_env
HUMAN_PROMPT = "\n\nHuman:"
ASSISTANT_PROMPT = "\n\nAssistant:"
ALTERNATION_ERROR = (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
)
def _add_newlines_before_ha(input_text: str) -> str:
new_text = input_text
for word in ["Human:", "Assistant:"]:
new_text = new_text.replace(word, "\n\n" + word)
for i in range(2):
new_text = new_text.replace("\n\n\n" + word, "\n\n" + word)
return new_text
def _human_assistant_format(input_text: str) -> str:
if input_text.count("Human:") == 0 or (
input_text.find("Human:") > input_text.find("Assistant:")
and "Assistant:" in input_text
):
input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION
if input_text.count("Assistant:") == 0:
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
if input_text[: len("Human:")] == "Human:":
input_text = "\n\n" + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
# track alternation
for i in range(len(input_text)):
if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
return input_text
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
provider_to_output_key_map = {
"anthropic": "completion",
"amazon": "outputText",
"cohere": "text",
"meta": "generation",
}
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic":
input_body["prompt"] = _human_assistant_format(prompt)
elif provider in ("ai21", "cohere", "meta"):
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 256
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
elif provider == "cohere":
return response_body.get("generations")[0].get("text")
elif provider == "meta":
return response_body.get("generation")
else:
return response_body.get("results")[0].get("outputText")
@classmethod
def prepare_output_stream(
cls, provider: str, response: Any, stop: Optional[List[str]] = None
) -> Iterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
if provider not in cls.provider_to_output_key_map:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if chunk:
chunk_obj = json.loads(chunk.get("bytes").decode())
if provider == "cohere" and (
chunk_obj["is_finished"]
or chunk_obj[cls.provider_to_output_key_map[provider]]
== "<EOS_TOKEN>"
):
return
# chunk obj format varies with provider
yield GenerationChunk(
text=chunk_obj[cls.provider_to_output_key_map[provider]]
)
class BedrockBase(BaseModel, ABC):
"""Base class for Bedrock models."""
client: Any = Field(exclude=True) #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = Field(default=None, exclude=True)
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-text-express-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
streaming: bool = False
"""Whether to stream the results."""
provider_stop_sequence_key_name_map: Mapping[str, str] = {
"anthropic": "stop_sequences",
"amazon": "stopSequences",
"ai21": "stop_sequences",
"cohere": "stop_sequences",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
values["region_name"] = get_from_dict_or_env(
values,
"region_name",
"AWS_DEFAULT_REGION",
default=session.region_name,
)
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self) -> str:
return self.model_id.split(".")[0]
@property
def _model_is_anthropic(self) -> bool:
return self._get_provider() == "anthropic"
def _prepare_input_and_invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _prepare_input_and_invoke_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
# stop sequence from _generate() overrides
# stop sequences in the class attribute
_model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop
if provider == "cohere":
_model_kwargs["stream"] = True
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
try:
response = self.client.invoke_model_with_response_stream(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider, response, stop
):
yield chunk
if run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
class Bedrock(LLM, BedrockBase):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-text-express-v1",
streaming=True
)
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.region_name:
attributes["region_name"] = self.region_name
return attributes
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Returns:
Iterator[GenerationChunk]: Generator that yields the streamed responses.
Yields:
Iterator[GenerationChunk]: Responses from the model.
"""
return self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [
"\n\nAssistant:",
"\n\nHuman:"
] |
2024-01-10 | docugami/langchain | libs~core~langchain_core~outputs~run_info.py | from __future__ import annotations
from uuid import UUID
from langchain_core.pydantic_v1 import BaseModel
class RunInfo(BaseModel):
"""Class that contains metadata for a single execution of a Chain or model."""
run_id: UUID
"""A unique identifier for the model or chain run."""
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~unit_tests~utilities~test_imports.py | from langchain.utilities import __all__
EXPECTED_ALL = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GraphQLAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MetaphorSearchAPIWrapper",
"OpenWeatherMapAPIWrapper",
"OutlineAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"PythonREPL",
"Requests",
"RequestsWrapper",
"SQLDatabase",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"StackExchangeAPIWrapper",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"ZapierNLAWrapper",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~langchain~utilities~dalle_image_generator.py | """Utility that calls OpenAI's Dall-E Image Generator."""
from typing import Any, Dict, Optional
from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class DallEAPIWrapper(BaseModel):
"""Wrapper for OpenAI's DALL-E Image Generator.
https://platform.openai.com/docs/guides/images/generations?context=node
Usage instructions:
1. `pip install openai`
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any #: :meta private:
openai_api_key: Optional[str] = None
n: int = 1
"""Number of images to generate"""
size: str = "1024x1024"
"""Size of image to generate"""
separator: str = "\n"
"""Separator to use when multiple URLs are returned."""
model: Optional[str] = "dall-e-2"
"""Model to use for image generation"""
quality: Optional[str] = "standard"
"""Quality of the image that will be generated"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
from openai import OpenAI
client = OpenAI(
api_key=openai_api_key, # this is also the default, it can be omitted
)
values["client"] = client
except ImportError as e:
raise ImportError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
) from e
return values
def run(self, query: str) -> str:
"""Run query through OpenAI and parse result."""
response = self.client.images.generate(
prompt=query,
n=self.n,
size=self.size,
model=self.model,
quality=self.quality,
)
image_urls = self.separator.join([item.url for item in response.data])
return image_urls if image_urls else "No image was generated"
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~unit_tests~llms~test_together.py | """Test Together LLM"""
from typing import cast
from langchain_core.pydantic_v1 import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain.llms.together import Together
def test_together_api_key_is_secret_string() -> None:
"""Test that the API key is stored as a SecretStr."""
llm = Together(
together_api_key="secret-api-key",
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
assert isinstance(llm.together_api_key, SecretStr)
def test_together_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
"""Test that the API key is masked when passed from an environment variable."""
monkeypatch.setenv("TOGETHER_API_KEY", "secret-api-key")
llm = Together(
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
print(llm.together_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_together_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
"""Test that the API key is masked when passed via the constructor."""
llm = Together(
together_api_key="secret-api-key",
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
print(llm.together_api_key, end="")
captured = capsys.readouterr()
assert captured.out == "**********"
def test_together_uses_actual_secret_value_from_secretstr() -> None:
"""Test that the actual secret value is correctly retrieved."""
llm = Together(
together_api_key="secret-api-key",
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
assert cast(SecretStr, llm.together_api_key).get_secret_value() == "secret-api-key"
| [] |
2024-01-10 | docugami/langchain | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | docugami/langchain | libs~langchain~tests~integration_tests~llms~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK:
pip install google-cloud-aiplatform>=1.36.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
import os
from typing import Optional
import pytest
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
from pytest_mock import MockerFixture
from langchain.chains.summarize import load_summarize_chain
from langchain.llms import VertexAI, VertexAIModelGarden
def test_vertex_initialization() -> None:
llm = VertexAI()
assert llm._llm_type == "vertexai"
assert llm.model_name == llm.client._model_id
def test_vertex_call() -> None:
llm = VertexAI(temperature=0)
output = llm("Say foo:")
assert isinstance(output, str)
@pytest.mark.scheduled
def test_vertex_generate() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="text-bison@001")
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
def test_vertex_generate_code() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="code-bison@001")
output = llm.generate(["generate a python method that says foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
async def test_vertex_agenerate() -> None:
llm = VertexAI(temperature=0)
output = await llm.agenerate(["Please say foo:"])
assert isinstance(output, LLMResult)
@pytest.mark.scheduled
def test_vertex_stream() -> None:
llm = VertexAI(temperature=0)
outputs = list(llm.stream("Please say foo:"))
assert isinstance(outputs[0], str)
async def test_vertex_consistency() -> None:
llm = VertexAI(temperature=0)
output = llm.generate(["Please say foo:"])
streaming_output = llm.generate(["Please say foo:"], stream=True)
async_output = await llm.agenerate(["Please say foo:"])
assert output.generations[0][0].text == streaming_output.generations[0][0].text
assert output.generations[0][0].text == async_output.generations[0][0].text
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
def test_model_garden(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = llm("What is the meaning of life?")
assert isinstance(output, str)
assert llm._llm_type == "vertexai_model_garden"
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
def test_model_garden_generate(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = llm.generate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
@pytest.mark.asyncio
@pytest.mark.parametrize(
"endpoint_os_variable_name,result_arg",
[("FALCON_ENDPOINT_ID", "generated_text"), ("LLAMA_ENDPOINT_ID", None)],
)
async def test_model_garden_agenerate(
endpoint_os_variable_name: str, result_arg: Optional[str]
) -> None:
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ["PROJECT"]
location = "europe-west4"
llm = VertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
result_arg=result_arg,
location=location,
)
output = await llm.agenerate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
def test_vertex_call_count_tokens() -> None:
llm = VertexAI()
output = llm.get_num_tokens("How are you?")
assert output == 4
@pytest.mark.requires("google.cloud.aiplatform")
def test_get_num_tokens_be_called_when_using_mapreduce_chain(
mocker: MockerFixture,
) -> None:
from vertexai.language_models._language_models import CountTokensResponse
m1 = mocker.patch(
"vertexai.preview.language_models._PreviewTextGenerationModel.count_tokens",
return_value=CountTokensResponse(
total_tokens=2,
total_billable_characters=2,
_count_tokens_response={"total_tokens": 2, "total_billable_characters": 2},
),
)
llm = VertexAI()
chain = load_summarize_chain(
llm,
chain_type="map_reduce",
return_intermediate_steps=False,
)
doc = Document(page_content="Hi")
output = chain({"input_documents": [doc]})
assert isinstance(output["output_text"], str)
m1.assert_called_once()
assert llm._llm_type == "vertexai"
assert llm.model_name == llm.client._model_id
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.