date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~agents~test_initialize.py | """Test the initialize module."""
from langchain.agents.agent_types import AgentType
from langchain.agents.initialize import initialize_agent
from langchain.tools.base import tool
from tests.unit_tests.llms.fake_llm import FakeLLM
@tool
def my_tool(query: str) -> str:
"""A fake tool."""
return "fake tool"
def test_initialize_agent_with_str_agent_type() -> None:
"""Test initialize_agent with a string."""
fake_llm = FakeLLM()
agent_executor = initialize_agent(
[my_tool], fake_llm, "zero-shot-react-description" # type: ignore
)
assert agent_executor.agent._agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION
assert isinstance(agent_executor.tags, list)
assert "zero-shot-react-description" in agent_executor.tags
| [
"A fake tool."
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_replicate.py | """Test Replicate API wrapper."""
from langchain.callbacks.manager import CallbackManager
from langchain.llms.replicate import Replicate
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
TEST_MODEL = "replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5" # noqa: E501
def test_replicate_call() -> None:
"""Test simple non-streaming call to Replicate."""
llm = Replicate(model=TEST_MODEL)
output = llm("What is LangChain")
assert output
assert isinstance(output, str)
def test_replicate_streaming_call() -> None:
"""Test streaming call to Replicate."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Replicate(streaming=True, callback_manager=callback_manager, model=TEST_MODEL)
output = llm("What is LangChain")
assert output
assert isinstance(output, str)
def test_replicate_model_kwargs() -> None:
"""Test simple non-streaming call to Replicate."""
llm = Replicate(
model=TEST_MODEL, model_kwargs={"max_length": 100, "temperature": 0.01}
)
long_output = llm("What is LangChain")
llm = Replicate(
model=TEST_MODEL, model_kwargs={"max_length": 10, "temperature": 0.01}
)
short_output = llm("What is LangChain")
assert len(short_output) < len(long_output)
assert llm.model_kwargs == {"max_length": 10, "temperature": 0.01}
def test_replicate_input() -> None:
llm = Replicate(model=TEST_MODEL, input={"max_length": 10})
assert llm.model_kwargs == {"max_length": 10}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~gpt4all.py | from typing import Any, Dict, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
class GPT4AllEmbeddings(BaseModel, Embeddings):
"""GPT4All embedding models.
To use, you should have the gpt4all python package installed
Example:
.. code-block:: python
from langchain.embeddings import GPT4AllEmbeddings
embeddings = GPT4AllEmbeddings()
"""
client: Any #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that GPT4All library is installed."""
try:
from gpt4all import Embed4All
values["client"] = Embed4All()
except ImportError:
raise ImportError(
"Could not import gpt4all library. "
"Please install the gpt4all library to "
"use this embedding model: pip install gpt4all"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~chat_loaders~test_telegram.py | """Test the telegram chat loader."""
import pathlib
import tempfile
import zipfile
from typing import Sequence
import pytest
from langchain.chat_loaders import telegram, utils
from langchain.schema import AIMessage, BaseMessage, HumanMessage
def _assert_messages_are_equal(
actual_messages: Sequence[BaseMessage],
expected_messages: Sequence[BaseMessage],
) -> None:
assert len(actual_messages) == len(expected_messages)
for actual, expected in zip(actual_messages, expected_messages):
assert actual.content == expected.content
assert (
actual.additional_kwargs["sender"] == expected.additional_kwargs["sender"]
)
def _check_telegram_chat_loader(path: str) -> None:
_data_dir = pathlib.Path(__file__).parent / "data"
source_path = _data_dir / path
# Create a zip file from the directory in a temp directory
with tempfile.TemporaryDirectory() as temp_dir_:
temp_dir = pathlib.Path(temp_dir_)
if path.endswith(".zip"):
# Make a new zip file
zip_path = temp_dir / "telegram_chat.zip"
with zipfile.ZipFile(zip_path, "w") as zip_file:
original_path = _data_dir / path.replace(".zip", "")
for file_path in original_path.iterdir():
zip_file.write(file_path, arcname=file_path.name)
source_path = zip_path
loader = telegram.TelegramChatLoader(str(source_path))
chat_sessions_ = loader.lazy_load()
chat_sessions_ = utils.merge_chat_runs(chat_sessions_)
chat_sessions = list(
utils.map_ai_messages(chat_sessions_, sender="Batman & Robin")
)
assert len(chat_sessions) == 1
session = chat_sessions[0]
assert len(session["messages"]) > 0
assert session["messages"][0].content == "i refuse to converse with you"
expected_content = [
HumanMessage(
content="i refuse to converse with you",
additional_kwargs={
"sender": "Jimmeny Marvelton",
"events": [{"message_time": "23.08.2023 13:11:23 UTC-08:00"}],
},
),
AIMessage(
content="Hi nemesis",
additional_kwargs={
"sender": "Batman & Robin",
"events": [{"message_time": "23.08.2023 13:13:20 UTC-08:00"}],
},
),
HumanMessage(
content="we meet again\n\nyou will not trick me this time",
additional_kwargs={
"sender": "Jimmeny Marvelton",
"events": [{"message_time": "23.08.2023 13:15:35 UTC-08:00"}],
},
),
]
_assert_messages_are_equal(session["messages"], expected_content)
@pytest.mark.parametrize(
"path",
[
"telegram_chat_json",
"telegram_chat_json.zip",
"telegram_chat_json/result.json",
],
)
def test_telegram_chat_loader(path: str) -> None:
_check_telegram_chat_loader(path)
@pytest.mark.skip(reason="requires bs4 but marking it as such doesn't seem to work")
@pytest.mark.parametrize(
"path",
[
"telegram_chat_json",
"telegram_chat_json.zip",
"telegram_chat_json/result.json",
],
)
def test_telegram_chat_loader_html(path: str) -> None:
_check_telegram_chat_loader(path)
| [
"we meet again\n\nyou will not trick me this time",
"i refuse to converse with you",
"Hi nemesis"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~retrievers~self_query~test_redis.py | from typing import Dict, Tuple
import pytest
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
)
from langchain.retrievers.self_query.redis import RedisTranslator
from langchain.vectorstores.redis.filters import (
RedisFilterExpression,
RedisNum,
RedisTag,
RedisText,
)
from langchain.vectorstores.redis.schema import (
NumericFieldSchema,
RedisModel,
TagFieldSchema,
TextFieldSchema,
)
@pytest.fixture
def translator() -> RedisTranslator:
schema = RedisModel(
text=[TextFieldSchema(name="bar")],
numeric=[NumericFieldSchema(name="foo")],
tag=[TagFieldSchema(name="tag")],
)
return RedisTranslator(schema)
@pytest.mark.parametrize(
("comp", "expected"),
[
(
Comparison(comparator=Comparator.LT, attribute="foo", value=1),
RedisNum("foo") < 1,
),
(
Comparison(comparator=Comparator.LIKE, attribute="bar", value="baz*"),
RedisText("bar") % "baz*",
),
(
Comparison(
comparator=Comparator.CONTAIN, attribute="tag", value=["blue", "green"]
),
RedisTag("tag") == ["blue", "green"],
),
],
)
def test_visit_comparison(
translator: RedisTranslator, comp: Comparison, expected: RedisFilterExpression
) -> None:
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=1)
expected = RedisNum("foo") < 1
actual = translator.visit_comparison(comp)
assert str(expected) == str(actual)
def test_visit_operation(translator: RedisTranslator) -> None:
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
Comparison(comparator=Comparator.EQ, attribute="tag", value="high"),
],
)
expected = (RedisNum("foo") < 2) & (
(RedisText("bar") == "baz") & (RedisTag("tag") == "high")
)
actual = translator.visit_operation(op)
assert str(expected) == str(actual)
def test_visit_structured_query_no_filter(translator: RedisTranslator) -> None:
query = "What is the capital of France?"
structured_query = StructuredQuery(
query=query,
filter=None,
)
expected: Tuple[str, Dict] = (query, {})
actual = translator.visit_structured_query(structured_query)
assert expected == actual
def test_visit_structured_query_comparison(translator: RedisTranslator) -> None:
query = "What is the capital of France?"
comp = Comparison(comparator=Comparator.GTE, attribute="foo", value=2)
structured_query = StructuredQuery(
query=query,
filter=comp,
)
expected_filter = RedisNum("foo") >= 2
actual_query, actual_filter = translator.visit_structured_query(structured_query)
assert actual_query == query
assert str(actual_filter["filter"]) == str(expected_filter)
def test_visit_structured_query_operation(translator: RedisTranslator) -> None:
query = "What is the capital of France?"
op = Operation(
operator=Operator.OR,
arguments=[
Comparison(comparator=Comparator.EQ, attribute="foo", value=2),
Comparison(comparator=Comparator.CONTAIN, attribute="bar", value="baz"),
],
)
structured_query = StructuredQuery(
query=query,
filter=op,
)
expected_filter = (RedisNum("foo") == 2) | (RedisText("bar") == "baz")
actual_query, actual_filter = translator.visit_structured_query(structured_query)
assert actual_query == query
assert str(actual_filter["filter"]) == str(expected_filter)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~output_parsers~test_combining_parser.py | """Test in memory docstore."""
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
DEF_EXPECTED_RESULT = {
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France",
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_combining_dict_result() -> None:
"""Test combining result."""
parsers = [
StructuredOutputParser(
response_schemas=[
ResponseSchema(
name="answer", description="answer to the user's question"
),
ResponseSchema(
name="source",
description="source used to answer the user's question",
),
]
),
RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
),
]
combining_parser = CombiningOutputParser(parsers=parsers)
result_dict = combining_parser.parse(DEF_README)
assert DEF_EXPECTED_RESULT == result_dict
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_larksuite.py | from langchain.document_loaders.larksuite import LarkSuiteDocLoader
DOMAIN = ""
ACCESS_TOKEN = ""
DOCUMENT_ID = ""
def test_larksuite_doc_loader() -> None:
"""Test LarkSuite (FeiShu) document loader."""
loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content is not None
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_petals.py | """Test Petals API wrapper."""
from langchain.llms.petals import Petals
def test_gooseai_call() -> None:
"""Test valid call to gooseai."""
llm = Petals(max_new_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_embaas.py | """Test embaas embeddings."""
import responses
from langchain.embeddings.embaas import EMBAAS_API_URL, EmbaasEmbeddings
def test_embaas_embed_documents() -> None:
"""Test embaas embeddings with multiple texts."""
texts = ["foo bar", "bar foo", "foo"]
embedding = EmbaasEmbeddings()
output = embedding.embed_documents(texts)
assert len(output) == 3
assert len(output[0]) == 1024
assert len(output[1]) == 1024
assert len(output[2]) == 1024
def test_embaas_embed_query() -> None:
"""Test embaas embeddings with multiple texts."""
text = "foo"
embeddings = EmbaasEmbeddings()
output = embeddings.embed_query(text)
assert len(output) == 1024
def test_embaas_embed_query_instruction() -> None:
"""Test embaas embeddings with a different instruction."""
text = "Test"
instruction = "query"
embeddings = EmbaasEmbeddings(instruction=instruction)
output = embeddings.embed_query(text)
assert len(output) == 1024
def test_embaas_embed_query_model() -> None:
"""Test embaas embeddings with a different model."""
text = "Test"
model = "instructor-large"
instruction = "Represent the query for retrieval"
embeddings = EmbaasEmbeddings(model=model, instruction=instruction)
output = embeddings.embed_query(text)
assert len(output) == 768
@responses.activate
def test_embaas_embed_documents_response() -> None:
"""Test embaas embeddings with multiple texts."""
responses.add(
responses.POST,
EMBAAS_API_URL,
json={"data": [{"embedding": [0.0] * 1024}]},
status=200,
)
text = "asd"
embeddings = EmbaasEmbeddings()
output = embeddings.embed_query(text)
assert len(output) == 1024
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_huggingface_text_gen_inference.py | from langchain.llms import HuggingFaceTextGenInference
def test_invocation_params_stop_sequences() -> None:
llm = HuggingFaceTextGenInference()
assert llm._default_params["stop_sequences"] == []
runtime_stop = None
assert llm._invocation_params(runtime_stop)["stop_sequences"] == []
assert llm._default_params["stop_sequences"] == []
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == ["stop"]
assert llm._default_params["stop_sequences"] == []
llm = HuggingFaceTextGenInference(stop_sequences=["."])
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == [".", "stop"]
assert llm._default_params["stop_sequences"] == ["."]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK (that is is the private
preview) and be whitelisted to list the models themselves:
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.25.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
from typing import Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from langchain.chat_models import ChatVertexAI
from langchain.chat_models.vertexai import _parse_chat_history, _parse_examples
from langchain.schema import LLMResult
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_instantiation(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
assert model._llm_type == "vertexai"
assert model.model_name == model.client._model_id
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
message = HumanMessage(content="Hello")
response = model([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_candidates() -> None:
model = ChatVertexAI(model_name="chat-bison@001", temperature=0.3, n=2)
message = HumanMessage(content="Hello")
response = model.generate(messages=[[message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 1
assert len(response.generations[0]) == 2
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_vertexai_agenerate() -> None:
model = ChatVertexAI(temperature=0)
message = HumanMessage(content="Hello")
response = await model.agenerate([[message]])
assert isinstance(response, LLMResult)
assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore
sync_response = model.generate([[message]])
assert response.generations[0][0] == sync_response.generations[0][0]
@pytest.mark.scheduled
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI()
raw_context = (
"My name is Ned. You are my personal assistant. My favorite movies "
"are Lord of the Rings and Hobbit."
)
question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_vertexai_single_call_with_examples() -> None:
model = ChatVertexAI()
raw_context = "My name is Ned. You are my personal assistant."
question = "2+2"
text_question, text_answer = "4+4", "8"
inp = HumanMessage(content=text_question)
output = AIMessage(content=text_answer)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message], examples=[inp, output])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call_with_history(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
text_question1, text_answer1 = "How much is 2+2?", "4"
text_question2 = "How much is 3+3?"
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_parse_chat_history_correct() -> None:
from vertexai.language_models import ChatMessage
text_context = (
"My name is Ned. You are my personal assistant. My "
"favorite movies are Lord of the Rings and Hobbit."
)
context = SystemMessage(content=text_context)
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer])
assert history.context == context.content
assert len(history.history) == 4
assert history.history == [
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
]
def test_vertexai_single_call_fails_no_message() -> None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert (
str(exc_info.value)
== "You should provide at least one message to start the chat!"
)
@pytest.mark.parametrize("stop", [None, "stop1"])
def test_vertexai_args_passed(stop: Optional[str]) -> None:
response_text = "Goodbye"
user_prompt = "Hello"
prompt_params = {
"max_output_tokens": 1,
"temperature": 10000.0,
"top_k": 10,
"top_p": 0.5,
}
# Mock the library to ensure the args are passed correctly
with patch(
"vertexai.language_models._language_models.ChatModel.start_chat"
) as start_chat:
mock_response = MagicMock()
mock_response.candidates = [Mock(text=response_text)]
mock_chat = MagicMock()
start_chat.return_value = mock_chat
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
if stop:
response = model([message], stop=[stop])
else:
response = model([message])
assert response.content == response_text
mock_send_message.assert_called_once_with(user_prompt, candidate_count=1)
expected_stop_sequence = [stop] if stop else None
start_chat.assert_called_once_with(
context=None,
message_history=[],
**prompt_params,
stop_sequences=expected_stop_sequence
)
def test_parse_examples_correct() -> None:
from vertexai.language_models import InputOutputTextPair
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
examples = _parse_examples([question, answer, question, answer])
assert len(examples) == 2
assert examples == [
InputOutputTextPair(input_text=text_question, output_text=text_answer),
InputOutputTextPair(input_text=text_question, output_text=text_answer),
]
def test_parse_examples_failes_wrong_sequence() -> None:
with pytest.raises(ValueError) as exc_info:
_ = _parse_examples([AIMessage(content="a")])
print(str(exc_info.value))
assert (
str(exc_info.value)
== "Expect examples to have an even amount of messages, got 1."
)
| [
"{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}",
"a",
"My name is Ned. You are my personal assistant.",
"2+2",
"Hello",
"How much is 3+3?"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_google_palm.py | """Test Google PaLM Chat API wrapper.
Note: This test must be run with the GOOGLE_API_KEY environment variable set to a
valid API key.
"""
import pytest
from langchain.chat_models import ChatGooglePalm
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
def test_chat_google_palm() -> None:
"""Test Google PaLM Chat API wrapper."""
chat = ChatGooglePalm()
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_google_palm_system_message() -> None:
"""Test Google PaLM Chat API wrapper with system message."""
chat = ChatGooglePalm()
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_google_palm_generate() -> None:
"""Test Google PaLM Chat API wrapper with generate."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_chat_google_palm_multiple_completions() -> None:
"""Test Google PaLM Chat API wrapper with multiple completions."""
# The API de-dupes duplicate responses, so set temperature higher. This
# could be a flakey test though...
chat = ChatGooglePalm(n=5, temperature=1.0)
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
@pytest.mark.asyncio
async def test_async_chat_google_palm() -> None:
"""Test async generation."""
chat = ChatGooglePalm(n=2, temperature=1.0)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~google_vertex_ai_search.py | """Retriever wrapper for Google Vertex AI Search."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utilities.vertexai import get_client_info
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from google.api_core.client_options import ClientOptions
from google.cloud.discoveryengine_v1beta import (
ConversationalSearchServiceClient,
SearchRequest,
SearchResult,
SearchServiceClient,
)
class _BaseGoogleVertexAISearchRetriever(BaseModel):
project_id: str
"""Google Cloud Project ID."""
data_store_id: str
"""Vertex AI Search data store ID."""
location_id: str = "global"
"""Vertex AI Search data store location."""
serving_config_id: str = "default_config"
"""Vertex AI Search serving config ID."""
credentials: Any = None
"""The default custom credentials (google.auth.credentials.Credentials) to use
when making API calls. If not provided, credentials will be ascertained from
the environment."""
engine_data_type: int = Field(default=0, ge=0, le=2)
""" Defines the Vertex AI Search data type
0 - Unstructured data
1 - Structured data
2 - Website data (with Advanced Website Indexing)
"""
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validates the environment."""
try:
from google.cloud import discoveryengine_v1beta # noqa: F401
except ImportError as exc:
raise ImportError(
"google.cloud.discoveryengine is not installed."
"Please install it with pip install "
"google-cloud-discoveryengine>=0.11.0"
) from exc
try:
from google.api_core.exceptions import InvalidArgument # noqa: F401
except ImportError as exc:
raise ImportError(
"google.api_core.exceptions is not installed. "
"Please install it with pip install google-api-core"
) from exc
values["project_id"] = get_from_dict_or_env(values, "project_id", "PROJECT_ID")
try:
# For backwards compatibility
search_engine_id = get_from_dict_or_env(
values, "search_engine_id", "SEARCH_ENGINE_ID"
)
if search_engine_id:
import warnings
warnings.warn(
"The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.", # noqa: E501
DeprecationWarning,
)
values["data_store_id"] = search_engine_id
except: # noqa: E722
pass
values["data_store_id"] = get_from_dict_or_env(
values, "data_store_id", "DATA_STORE_ID"
)
return values
@property
def client_options(self) -> "ClientOptions":
from google.api_core.client_options import ClientOptions
return ClientOptions(
api_endpoint=f"{self.location_id}-discoveryengine.googleapis.com"
if self.location_id != "global"
else None
)
def _convert_structured_search_response(
self, results: Sequence[SearchResult]
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
import json
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
documents.append(
Document(
page_content=json.dumps(document_dict.get("struct_data", {})),
metadata={"id": document_dict["id"], "name": document_dict["name"]},
)
)
return documents
def _convert_unstructured_search_response(
self, results: Sequence[SearchResult], chunk_type: str
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
doc_metadata = document_dict.get("struct_data", {})
doc_metadata["id"] = document_dict["id"]
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
doc_metadata["source"] = derived_struct_data.get("link", "")
if chunk_type == "extractive_answers":
doc_metadata["source"] += f":{chunk.get('pageNumber', '')}"
documents.append(
Document(
page_content=chunk.get("content", ""), metadata=doc_metadata
)
)
return documents
def _convert_website_search_response(
self, results: Sequence[SearchResult]
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
doc_metadata = document_dict.get("struct_data", {})
doc_metadata["id"] = document_dict["id"]
doc_metadata["source"] = derived_struct_data.get("link", "")
chunk_type = "extractive_answers"
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
documents.append(
Document(
page_content=chunk.get("content", ""), metadata=doc_metadata
)
)
if not documents:
print(
f"No {chunk_type} could be found.\n"
"Make sure that your data store is using Advanced Website Indexing.\n"
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501
)
return documents
class GoogleVertexAISearchRetriever(BaseRetriever, _BaseGoogleVertexAISearchRetriever):
"""`Google Vertex AI Search` retriever.
For a detailed explanation of the Vertex AI Search concepts
and configuration parameters, refer to the product documentation.
https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction
"""
filter: Optional[str] = None
"""Filter expression."""
get_extractive_answers: bool = False
"""If True return Extractive Answers, otherwise return Extractive Segments."""
max_documents: int = Field(default=5, ge=1, le=100)
"""The maximum number of documents to return."""
max_extractive_answer_count: int = Field(default=1, ge=1, le=5)
"""The maximum number of extractive answers returned in each search result.
At most 5 answers will be returned for each SearchResult.
"""
max_extractive_segment_count: int = Field(default=1, ge=1, le=1)
"""The maximum number of extractive segments returned in each search result.
Currently one segment will be returned for each SearchResult.
"""
query_expansion_condition: int = Field(default=1, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified query expansion condition. In this case, server behavior defaults
to disabled
1 - Disabled query expansion. Only the exact search query is used, even if
SearchResponse.total_size is zero.
2 - Automatic query expansion built by the Search API.
"""
spell_correction_mode: int = Field(default=2, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified spell correction mode. In this case, server behavior defaults
to auto.
1 - Suggestion only. Search API will try to find a spell suggestion if there is any
and put in the `SearchResponse.corrected_query`.
The spell suggestion will not be used as the search query.
2 - Automatic spell correction built by the Search API.
Search will be based on the corrected query if found.
"""
_client: SearchServiceClient
_serving_config: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
arbitrary_types_allowed = True
underscore_attrs_are_private = True
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
try:
from google.cloud.discoveryengine_v1beta import SearchServiceClient
except ImportError as exc:
raise ImportError(
"google.cloud.discoveryengine is not installed."
"Please install it with pip install google-cloud-discoveryengine"
) from exc
super().__init__(**kwargs)
# For more information, refer to:
# https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store
self._client = SearchServiceClient(
credentials=self.credentials,
client_options=self.client_options,
client_info=get_client_info(module="vertex-ai-search"),
)
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.data_store_id,
serving_config=self.serving_config_id,
)
def _create_search_request(self, query: str) -> SearchRequest:
"""Prepares a SearchRequest object."""
from google.cloud.discoveryengine_v1beta import SearchRequest
query_expansion_spec = SearchRequest.QueryExpansionSpec(
condition=self.query_expansion_condition,
)
spell_correction_spec = SearchRequest.SpellCorrectionSpec(
mode=self.spell_correction_mode
)
if self.engine_data_type == 0:
if self.get_extractive_answers:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
)
)
else:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_segment_count=self.max_extractive_segment_count,
)
)
content_search_spec = SearchRequest.ContentSearchSpec(
extractive_content_spec=extractive_content_spec
)
elif self.engine_data_type == 1:
content_search_spec = None
elif self.engine_data_type == 2:
content_search_spec = SearchRequest.ContentSearchSpec(
extractive_content_spec=SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
)
)
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"or 2 (Website with Advanced Indexing) are supported currently."
+ f" Got {self.engine_data_type}"
)
return SearchRequest(
query=query,
filter=self.filter,
serving_config=self._serving_config,
page_size=self.max_documents,
content_search_spec=content_search_spec,
query_expansion_spec=query_expansion_spec,
spell_correction_spec=spell_correction_spec,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
from google.api_core.exceptions import InvalidArgument
search_request = self._create_search_request(query)
try:
response = self._client.search(search_request)
except InvalidArgument as exc:
raise type(exc)(
exc.message
+ " This might be due to engine_data_type not set correctly."
)
if self.engine_data_type == 0:
chunk_type = (
"extractive_answers"
if self.get_extractive_answers
else "extractive_segments"
)
documents = self._convert_unstructured_search_response(
response.results, chunk_type
)
elif self.engine_data_type == 1:
documents = self._convert_structured_search_response(response.results)
elif self.engine_data_type == 2:
documents = self._convert_website_search_response(response.results)
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"or 2 (Website with Advanced Indexing) are supported currently."
+ f" Got {self.engine_data_type}"
)
return documents
class GoogleVertexAIMultiTurnSearchRetriever(
BaseRetriever, _BaseGoogleVertexAISearchRetriever
):
"""`Google Vertex AI Search` retriever for multi-turn conversations."""
conversation_id: str = "-"
"""Vertex AI Search Conversation ID."""
_client: ConversationalSearchServiceClient
_serving_config: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
arbitrary_types_allowed = True
underscore_attrs_are_private = True
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
from google.cloud.discoveryengine_v1beta import (
ConversationalSearchServiceClient,
)
self._client = ConversationalSearchServiceClient(
credentials=self.credentials,
client_options=self.client_options,
client_info=get_client_info(module="vertex-ai-search"),
)
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.data_store_id,
serving_config=self.serving_config_id,
)
if self.engine_data_type == 1:
raise NotImplementedError(
"Data store type 1 (Structured)"
"is not currently supported for multi-turn search."
+ f" Got {self.engine_data_type}"
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
from google.cloud.discoveryengine_v1beta import (
ConverseConversationRequest,
TextInput,
)
request = ConverseConversationRequest(
name=self._client.conversation_path(
self.project_id,
self.location_id,
self.data_store_id,
self.conversation_id,
),
serving_config=self._serving_config,
query=TextInput(input=query),
)
response = self._client.converse_conversation(request)
if self.engine_data_type == 2:
return self._convert_website_search_response(response.search_results)
return self._convert_unstructured_search_response(
response.search_results, "extractive_answers"
)
class GoogleCloudEnterpriseSearchRetriever(GoogleVertexAISearchRetriever):
"""`Google Vertex Search API` retriever alias for backwards compatibility.
DEPRECATED: Use `GoogleVertexAISearchRetriever` instead.
"""
def __init__(self, **data: Any):
import warnings
warnings.warn(
"GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501
DeprecationWarning,
)
super().__init__(**data)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~example_generator.py | """Keep here for backwards compatibility."""
from langchain.chains.example_generator import generate_example
__all__ = ["generate_example"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~loading.py | from langchain.utils.loading import try_load_from_hub
# For backwards compatibility
__all__ = ["try_load_from_hub"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_retrieval_qa_with_sources.py | """Test RetrievalQA functionality."""
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.loading import load_chain
from langchain.document_loaders import DirectoryLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
def test_retrieval_qa_with_sources_chain_saving_loading(tmp_path: str) -> None:
"""Test saving and loading."""
loader = DirectoryLoader("docs/extras/modules/", glob="*.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(texts, embeddings)
qa = RetrievalQAWithSourcesChain.from_llm(
llm=OpenAI(), retriever=docsearch.as_retriever()
)
result = qa("What did the president say about Ketanji Brown Jackson?")
assert "question" in result.keys()
assert "answer" in result.keys()
assert "sources" in result.keys()
file_path = str(tmp_path) + "/RetrievalQAWithSourcesChain.yaml"
qa.save(file_path=file_path)
qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever())
assert qa_loaded == qa
qa2 = RetrievalQAWithSourcesChain.from_chain_type(
llm=OpenAI(), retriever=docsearch.as_retriever(), chain_type="stuff"
)
result2 = qa2("What did the president say about Ketanji Brown Jackson?")
assert "question" in result2.keys()
assert "answer" in result2.keys()
assert "sources" in result2.keys()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_tair.py | """Test tair functionality."""
from langchain.docstore.document import Document
from langchain.vectorstores.tair import Tair
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_tair() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Tair.from_texts(
texts, FakeEmbeddings(), tair_url="redis://localhost:6379"
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~autonomous_agents~hugginggpt~hugginggpt.py | from typing import List
from langchain.base_language import BaseLanguageModel
from langchain.tools.base import BaseTool
from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import (
load_response_generator,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_executor import (
TaskExecutor,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import (
load_chat_planner,
)
class HuggingGPT:
def __init__(self, llm: BaseLanguageModel, tools: List[BaseTool]):
self.llm = llm
self.tools = tools
self.chat_planner = load_chat_planner(llm)
self.response_generator = load_response_generator(llm)
self.task_executor: TaskExecutor
def run(self, input: str) -> str:
plan = self.chat_planner.plan(inputs={"input": input, "hf_tools": self.tools})
self.task_executor = TaskExecutor(plan)
self.task_executor.run()
response = self.response_generator.generate(
{"task_execution": self.task_executor}
)
return response
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | RohanDey02/langchain | libs~cli~tests~test_utils.py | import pytest
from langchain_cli.utils.git import parse_dependency_string, DependencySource
from langchain_cli.constants import (
DEFAULT_GIT_REPO,
DEFAULT_GIT_SUBDIRECTORY,
DEFAULT_GIT_REF,
)
def test_dependency_string() -> None:
assert parse_dependency_string(
"git+ssh://[email protected]/efriis/myrepo.git"
) == DependencySource(
git="ssh://[email protected]/efriis/myrepo.git",
ref=None,
subdirectory=None,
)
assert parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src"
) == DependencySource(
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref=None,
)
assert parse_dependency_string(
"git+ssh://[email protected]:efriis/myrepo.git#develop"
) == DependencySource(
git="ssh://[email protected]:efriis/myrepo.git", ref="develop", subdirectory=None
)
# also support a slash in ssh
assert parse_dependency_string(
"git+ssh://[email protected]/efriis/myrepo.git#develop"
) == DependencySource(
git="ssh://[email protected]/efriis/myrepo.git", ref="develop", subdirectory=None
)
# looks like poetry supports both an @ and a #
assert parse_dependency_string(
"git+ssh://[email protected]:efriis/myrepo.git@develop"
) == DependencySource(
git="ssh://[email protected]:efriis/myrepo.git", ref="develop", subdirectory=None
)
assert parse_dependency_string("simple-pirate") == DependencySource(
git=DEFAULT_GIT_REPO,
subdirectory=f"{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate",
ref=DEFAULT_GIT_REF,
)
def test_dependency_string_both() -> None:
assert parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@branch#subdirectory=src"
) == DependencySource(
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref="branch",
)
def test_dependency_string_invalids() -> None:
# expect error for wrong order
with pytest.raises(ValueError):
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch"
)
# expect error for @subdirectory
def test_dependency_string_edge_case() -> None:
# weird unsolvable edge case of
# git+ssh://a@b
# this could be a ssh dep with user=a, and default ref
# or a ssh dep at a with ref=b.
# in this case, assume the first case (be greedy with the '@')
assert parse_dependency_string("git+ssh://a@b") == DependencySource(
git="ssh://a@b",
subdirectory=None,
ref=None,
)
# weird one that is actually valid
assert parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@subdirectory=src"
) == DependencySource(
git="https://github.com/efriis/myrepo.git",
subdirectory=None,
ref="subdirectory=src",
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~chains~test_hyde.py | """Test HyDE."""
from typing import Any, List, Optional
import numpy as np
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.llms.base import BaseLLM
from langchain.schema import Generation, LLMResult
from langchain.schema.embeddings import Embeddings
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> List[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(), FakeEmbeddings(), key
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8), FakeEmbeddings(), key
)
embedding.embed_query("foo")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~indexes~test_hashed_document.py | import pytest
from langchain.indexes._api import _HashedDocument
from langchain.schema import Document
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument(
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert isinstance(hashed_document.hash_, str)
def test_hashing_with_missing_content() -> None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(ValueError):
_HashedDocument(
metadata={"key": "value"},
)
def test_uid_auto_assigned_to_hash() -> None:
"""Test uid is auto-assigned to the hashed_document hash."""
hashed_document = _HashedDocument(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert hashed_document.uid == hashed_document.hash_
def test_to_document() -> None:
"""Test to_document method."""
hashed_document = _HashedDocument(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == "Lorem ipsum dolor sit amet"
assert doc.metadata == {"key": "value"}
def test_from_document() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _HashedDocument.from_document(document)
# hash should be deterministic
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
assert hashed_document.uid == hashed_document.hash_
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_etherscan.py | import os
import pytest
from langchain.document_loaders import EtherscanLoader
if "ETHERSCAN_API_KEY" in os.environ:
etherscan_key_set = True
api_key = os.environ["ETHERSCAN_API_KEY"]
else:
etherscan_key_set = False
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_normal_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address)
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_internal_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="internal_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc20_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc20_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc721_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc721_transaction")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_erc1155_transaction() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="erc1155_transaction")
result = loader.load()
assert len(result) == 1, "Wrong transactions returned"
assert result[0].page_content == "", "Wrong transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_get_eth_balance() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
loader = EtherscanLoader(account_address, filter="eth_balance")
result = loader.load()
assert len(result) > 0, "No transactions returned"
@pytest.mark.skipif(not etherscan_key_set, reason="Etherscan API key not provided.")
def test_invalid_filter() -> None:
account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b"
with pytest.raises(ValueError) as error_invalid_filter:
EtherscanLoader(account_address, filter="internal_saction")
assert str(error_invalid_filter.value) == "Invalid filter internal_saction"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~memory~summary_buffer.py | from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.pydantic_v1 import root_validator
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != "":
first_messages: List[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer)
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
)
return {self.memory_key: final_buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.prune()
def prune(self) -> None:
"""Prune buffer if it exceeds max token limit"""
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory, self.moving_summary_buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~load~test_load.py | """Test for Serializable base class"""
import pytest
from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.load.dump import dumpd, dumps
from langchain.load.load import load, loads
from langchain.prompts.prompt import PromptTemplate
class NotSerializable:
pass
@pytest.mark.requires("openai")
def test_loads_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2 == llm
assert dumps(llm2) == llm_string
assert isinstance(llm2, OpenAI)
@pytest.mark.requires("openai")
def test_loads_llmchain() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires("openai")
def test_loads_llmchain_env() -> None:
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm = OpenAI(model="davinci", temperature=0.5)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string)
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_loads_llmchain_with_non_serializable_arg() -> None:
llm = OpenAI(
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain, pretty=True)
with pytest.raises(NotImplementedError):
loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
@pytest.mark.requires("openai")
def test_load_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
llm_obj = dumpd(llm)
llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2 == llm
assert dumpd(llm2) == llm_obj
assert isinstance(llm2, OpenAI)
@pytest.mark.requires("openai")
def test_load_llmchain() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires("openai")
def test_load_llmchain_env() -> None:
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm = OpenAI(model="davinci", temperature=0.5)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj)
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_load_llmchain_with_non_serializable_arg() -> None:
llm = OpenAI(
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
with pytest.raises(NotImplementedError):
load(chain_obj, secrets_map={"OPENAI_API_KEY": "hello"})
| [
"hello {name}!"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~anthropic.py | import re
import warnings
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Union,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field, SecretStr, root_validator
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.output import GenerationChunk
from langchain.schema.prompt import PromptValue
from langchain.utils import (
check_package_version,
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain.utils.utils import build_extra_kwargs
def _to_secret(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr if needed."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
class _AnthropicCommon(BaseLanguageModel):
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: str = Field(default="claude-2", alias="model_name")
"""Model name to use."""
max_tokens_to_sample: int = Field(default=256, alias="max_tokens")
"""Denotes the number of tokens to predict per generation."""
temperature: Optional[float] = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: Optional[int] = None
"""Number of most likely tokens to consider at each step."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
default_request_timeout: Optional[float] = None
"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""
anthropic_api_url: Optional[str] = None
anthropic_api_key: Optional[SecretStr] = None
HUMAN_PROMPT: Optional[str] = None
AI_PROMPT: Optional[str] = None
count_tokens: Optional[Callable[[str], int]] = None
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@root_validator(pre=True)
def build_extra(cls, values: Dict) -> Dict:
extra = values.get("model_kwargs", {})
all_required_field_names = get_pydantic_field_names(cls)
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anthropic_api_key"] = _to_secret(
get_from_dict_or_env(values, "anthropic_api_key", "ANTHROPIC_API_KEY")
)
# Get custom api url from environment.
values["anthropic_api_url"] = get_from_dict_or_env(
values,
"anthropic_api_url",
"ANTHROPIC_API_URL",
default="https://api.anthropic.com",
)
try:
import anthropic
check_package_version("anthropic", gte_version="0.3")
values["client"] = anthropic.Anthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"].get_secret_value(),
timeout=values["default_request_timeout"],
)
values["async_client"] = anthropic.AsyncAnthropic(
base_url=values["anthropic_api_url"],
api_key=values["anthropic_api_key"].get_secret_value(),
timeout=values["default_request_timeout"],
)
values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT
values["AI_PROMPT"] = anthropic.AI_PROMPT
values["count_tokens"] = values["client"].count_tokens
except ImportError:
raise ImportError(
"Could not import anthropic python package. "
"Please it install it with `pip install anthropic`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens_to_sample": self.max_tokens_to_sample,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return {**d, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{}, **self._default_params}
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if stop is None:
stop = []
# Never want model to invent new turns of Human / Assistant dialog.
stop.extend([self.HUMAN_PROMPT])
return stop
class Anthropic(LLM, _AnthropicCommon):
"""Anthropic large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
# and AI_PROMPT.
response = model("What are the biggest risks facing humanity?")
# Or if you want to use the chat mode, build a few-shot-prompt, or
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
raw_prompt = "What are the biggest risks facing humanity?"
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
response = model(prompt)
"""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
arbitrary_types_allowed = True
@root_validator()
def raise_warning(cls, values: Dict) -> Dict:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain.chat_models import ChatAnthropic` instead"
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic-llm"
def _wrap_prompt(self, prompt: str) -> str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if prompt.startswith(self.HUMAN_PROMPT):
return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model(prompt)
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = self.client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def convert_prompt(self, prompt: PromptValue) -> str:
return self._wrap_prompt(prompt.to_string())
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
response = await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
**params,
)
return response.completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
for token in self.client.completions.create(
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params
):
chunk = GenerationChunk(text=token.completion)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
.. code-block:: python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
async for token in await self.async_client.completions.create(
prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**params,
):
chunk = GenerationChunk(text=token.completion)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
| [
"None"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_excel.py | import os
from pathlib import Path
from langchain.document_loaders import UnstructuredExcelLoader
EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples"
def test_unstructured_excel_loader() -> None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, "stanley-cups.xlsx")
loader = UnstructuredExcelLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~utilities~test_graphql.py | import json
import pytest
import responses
from langchain.utilities.graphql import GraphQLAPIWrapper
TEST_ENDPOINT = "http://testserver/graphql"
# Mock GraphQL response for testing
MOCK_RESPONSE = {
"data": {
"allUsers": [{"name": "Alice"}],
"__schema": {
"queryType": {"name": "Query"},
"types": [
{
"kind": "OBJECT",
"name": "Query",
"fields": [
{
"name": "allUsers",
"args": [],
"type": {
"kind": "NON_NULL",
"name": None,
"ofType": {
"kind": "OBJECT",
"name": "allUsers",
"ofType": None,
},
},
}
],
"inputFields": None,
"interfaces": [],
"enumValues": None,
"possibleTypes": None,
},
{
"kind": "SCALAR",
"name": "String",
},
{
"kind": "OBJECT",
"name": "allUsers",
"description": None,
"fields": [
{
"name": "name",
"description": None,
"args": [],
"type": {
"kind": "NON_NULL",
"name": None,
"ofType": {
"kind": "SCALAR",
"name": "String",
"ofType": None,
},
},
},
],
"inputFields": None,
"interfaces": [],
"enumValues": None,
"possibleTypes": None,
},
{
"kind": "SCALAR",
"name": "Boolean",
},
],
},
}
}
@pytest.mark.requires("gql", "requests_toolbelt")
@responses.activate
def test_run() -> None:
responses.add(responses.POST, TEST_ENDPOINT, json=MOCK_RESPONSE, status=200)
query = "query { allUsers { name } }"
graphql_wrapper = GraphQLAPIWrapper(
graphql_endpoint=TEST_ENDPOINT,
custom_headers={"Authorization": "Bearer testtoken"},
)
result = graphql_wrapper.run(query)
expected_result = json.dumps(MOCK_RESPONSE["data"], indent=2)
assert result == expected_result
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_elastic_vector_search.py | """Test ElasticSearch functionality."""
import logging
import os
import uuid
from typing import Generator, List, Union
import pytest
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
class TestElasticsearch:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the elasticsearch url."""
from elasticsearch import Elasticsearch
url = "http://localhost:9200"
yield url
es = Elasticsearch(hosts=url)
# Clear all indexes
index_names = es.indices.get(index="_all").keys()
for index_name in index_names:
# print(index_name)
es.indices.delete(index=index_name)
def test_similarity_search_without_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.skip(
reason="Docker build has no ssl certs. Enable this test when testing with ssl."
)
def test_similarity_search_with_ssl_verify(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with ssl verify."""
ssl_verify = {
"verify_certs": True,
"basic_auth": ("ES_USER", "ES_PASSWORD"),
"ca_certs": "ES_CA_CERTS_PATH",
}
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
elasticsearch_url="http://localhost:9200",
ssl_verify=ssl_verify,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
elasticsearch_url=elasticsearch_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_default_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
)
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
from elasticsearch import Elasticsearch
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_add_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'add_documents'."""
from elasticsearch import Elasticsearch
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch(
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
elastic_vector_search.add_documents(documents)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
def test_custom_index_add_documents_to_exists_store(self) -> None:
# TODO: implement it
pass
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~callbacks~test_run_collector.py | """Test the run collector."""
import uuid
from langchain.callbacks import collect_runs
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_collect_runs() -> None:
llm = FakeLLM(queries={"hi": "hello"}, sequential_responses=True)
with collect_runs() as cb:
llm.predict("hi")
assert cb.traced_runs
assert len(cb.traced_runs) == 1
assert isinstance(cb.traced_runs[0].id, uuid.UUID)
assert cb.traced_runs[0].inputs == {"prompts": ["hi"]}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~smith~evaluation~test_string_run_evaluator.py | """Tests for the string run evaluator."""
from unittest.mock import MagicMock
from langchain.evaluation import criteria
from langchain.smith.evaluation.string_run_evaluator import (
ChainStringRunMapper,
StringRunEvaluatorChain,
)
from tests.unit_tests.llms import fake_llm
def test_evaluate_run() -> None:
run_mapper = ChainStringRunMapper()
example_mapper = MagicMock()
string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM())
evaluator = StringRunEvaluatorChain(
run_mapper=run_mapper,
example_mapper=example_mapper,
name="test_evaluator",
string_evaluator=string_evaluator,
)
run = MagicMock()
example = MagicMock()
res = evaluator.evaluate_run(run, example)
assert str(res.comment).startswith("Error evaluating run ")
assert res.key == string_evaluator.evaluation_name
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_gitbook.py | from typing import Optional
import pytest
from langchain.document_loaders.gitbook import GitbookLoader
class TestGitbookLoader:
@pytest.mark.parametrize(
"web_page, load_all_paths, base_url, expected_web_path",
[
("https://example.com/page1", False, None, "https://example.com/page1"),
(
"https://example.com/",
True,
"https://example.com",
"https://example.com/sitemap.xml",
),
],
)
def test_init(
self,
web_page: str,
load_all_paths: bool,
base_url: Optional[str],
expected_web_path: str,
) -> None:
loader = GitbookLoader(
web_page, load_all_paths=load_all_paths, base_url=base_url
)
print(loader.__dict__)
assert (
loader.base_url == (base_url or web_page)[:-1]
if (base_url or web_page).endswith("/")
else (base_url or web_page)
)
assert loader.web_path == expected_web_path
assert loader.load_all_paths == load_all_paths
@pytest.mark.parametrize(
"web_page, expected_number_results",
[("https://platform-docs.opentargets.org/getting-started", 1)],
)
def test_load_single_page(
self, web_page: str, expected_number_results: int
) -> None:
loader = GitbookLoader(web_page)
result = loader.load()
assert len(result) == expected_number_results
@pytest.mark.parametrize("web_page", [("https://platform-docs.opentargets.org/")])
def test_load_multiple_pages(self, web_page: str) -> None:
loader = GitbookLoader(web_page, load_all_paths=True)
result = loader.load()
print(len(result))
assert len(result) > 10
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~gpt4all.py | from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
class GPT4All(LLM):
"""GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
max_tokens: int = Field(200, alias="max_tokens")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.1
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(8, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
device: Optional[str] = Field("cpu", alias="device")
"""Device name: cpu, gpu, nvidia, intel, amd or DeviceName."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
}
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
device=values["device"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~retrievers~document_compressors~test_embeddings_filter.py | """Integration test for embedding-based relevant doc filtering."""
import numpy as np
from langchain.document_transformers.embeddings_redundant_filter import (
_DocumentWithState,
)
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.schema import Document
def test_embeddings_filter() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
actual = relevant_filter.compress_documents(docs, "What did I say about food?")
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
def test_embeddings_filter_with_state() -> None:
texts = [
"What happened to all of my cookies?",
"I wish there were better Italian restaurants in my neighborhood.",
"My favorite color is green",
]
query = "What did I say about food?"
embeddings = OpenAIEmbeddings()
embedded_query = embeddings.embed_query(query)
state = {"embedded_doc": np.zeros(len(embedded_query))}
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
docs[-1].state = {"embedded_doc": embedded_query}
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
actual = relevant_filter.compress_documents(docs, query)
assert len(actual) == 1
assert texts[-1] == actual[0].page_content
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~retrievers~self_query~test_milvus.py | from typing import Dict, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
)
from langchain.retrievers.self_query.milvus import MilvusTranslator
DEFAULT_TRANSLATOR = MilvusTranslator()
def test_visit_comparison() -> None:
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=4)
expected = "( foo < 4 )"
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_operation() -> None:
# Non-Unary operator
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
Comparison(comparator=Comparator.LT, attribute="abc", value="4"),
],
)
expected = '(( foo < 2 ) and ( bar == "baz" ) ' 'and ( abc < "4" ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
# Unary operator: normal execution
op = Operation(
operator=Operator.NOT,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
],
)
expected = "not(( foo < 2 ))"
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
# Unary operator: error
op = Operation(
operator=Operator.NOT,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
Comparison(comparator=Comparator.LT, attribute="abc", value="4"),
],
)
try:
DEFAULT_TRANSLATOR.visit_operation(op)
except ValueError as e:
assert str(e) == '"not" can have only one argument in Milvus'
else:
assert False, "Expected exception not raised" # No exception -> test failed
def test_visit_structured_query() -> None:
query = "What is the capital of France?"
structured_query = StructuredQuery(
query=query,
filter=None,
)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=454)
structured_query = StructuredQuery(
query=query,
filter=comp,
)
expected = (
query,
{"expr": "( foo < 454 )"},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
Comparison(comparator=Comparator.LT, attribute="abc", value=50),
],
)
structured_query = StructuredQuery(
query=query,
filter=op,
)
expected = (
query,
{"expr": "(( foo < 2 ) " 'and ( bar == "baz" ) ' "and ( abc < 50 ))"},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~azure_cosmos_db.py | from __future__ import annotations
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from pymongo.collection import Collection
from langchain.schema.embeddings import Embeddings
# Before Python 3.11 native StrEnum is not available
class CosmosDBSimilarityType(str, Enum):
"""Cosmos DB Similarity Type as enumerator."""
COS = "COS"
"""CosineSimilarity"""
IP = "IP"
"""inner - product"""
L2 = "L2"
"""Euclidean distance"""
CosmosDBDocumentType = TypeVar("CosmosDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 128
class AzureCosmosDBVectorSearch(VectorStore):
"""`Azure Cosmos DB for MongoDB vCore` vector store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB VCore Cluster
Example:
. code-block:: python
from langchain.vectorstores import AzureCosmosDBVectorSearch
from langchain.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = AzureCosmosDBVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[CosmosDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "vectorSearchIndex",
text_key: str = "textContent",
embedding_key: str = "vectorContent",
):
"""Constructor for AzureCosmosDBVectorSearch
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
index_name: Name of the Atlas Search index.
text_key: MongoDB field that will contain the text
for each document.
embedding_key: MongoDB field that will contain the embedding
for each document.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
@property
def embeddings(self) -> Embeddings:
return self._embedding
def get_index_name(self) -> str:
"""Returns the index name
Returns:
Returns the index name
"""
return self._index_name
@classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> AzureCosmosDBVectorSearch:
"""Creates an Instance of AzureCosmosDBVectorSearch from a Connection String
Args:
connection_string: The MongoDB vCore instance connection string
namespace: The namespace (database.collection)
embedding: The embedding utility
**kwargs: Dynamic keyword arguments
Returns:
an instance of the vector store
"""
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
def index_exists(self) -> bool:
"""Verifies if the specified index name during instance
construction exists on the collection
Returns:
Returns True on success and False if no such index exists
on the collection
"""
cursor = self._collection.list_indexes()
index_name = self._index_name
for res in cursor:
current_index_name = res.pop("name")
if current_index_name == index_name:
return True
return False
def delete_index(self) -> None:
"""Deletes the index specified during instance construction if it exists"""
if self.index_exists():
self._collection.drop_index(self._index_name)
# Raises OperationFailure on an error (e.g. trying to drop
# an index that does not exist)
def create_index(
self,
num_lists: int = 100,
dimensions: int = 1536,
similarity: CosmosDBSimilarityType = CosmosDBSimilarityType.COS,
) -> dict[str, Any]:
"""Creates an index using the index name specified at
instance construction
Setting the numLists parameter correctly is important for achieving
good accuracy and performance.
Since the vector store uses IVF as the indexing strategy,
you should create the index only after you
have loaded a large enough sample documents to ensure that the
centroids for the respective buckets are
faily distributed.
We recommend that numLists is set to documentCount/1000 for up
to 1 million documents
and to sqrt(documentCount) for more than 1 million documents.
As the number of items in your database grows, you should
tune numLists to be larger
in order to achieve good latency performance for vector search.
If you're experimenting with a new scenario or creating a
small demo, you can start with numLists
set to 1 to perform a brute-force search across all vectors.
This should provide you with the most
accurate results from the vector search, however be aware that
the search speed and latency will be slow.
After your initial setup, you should go ahead and tune
the numLists parameter using the above guidance.
Args:
num_lists: This integer is the number of clusters that the
inverted file (IVF) index uses to group the vector data.
We recommend that numLists is set to documentCount/1000
for up to 1 million documents and to sqrt(documentCount)
for more than 1 million documents.
Using a numLists value of 1 is akin to performing
brute-force search, which has limited performance
dimensions: Number of dimensions for vector similarity.
The maximum number of supported dimensions is 2000
similarity: Similarity metric to use with the IVF index.
Possible options are:
- CosmosDBSimilarityType.COS (cosine distance),
- CosmosDBSimilarityType.L2 (Euclidean distance), and
- CosmosDBSimilarityType.IP (inner product).
Returns:
An object describing the created index
"""
# prepare the command
create_index_commands = {
"createIndexes": self._collection.name,
"indexes": [
{
"name": self._index_name,
"key": {"vectorContent": "cosmosSearch"},
"cosmosSearchOptions": {
"kind": "vector-ivf",
"numLists": num_lists,
"similarity": similarity,
"dimensions": dimensions,
},
}
],
}
# retrieve the database object
current_database = self._collection.database
# invoke the command from the database object
create_index_responses: dict[str, Any] = current_database.command(
create_index_commands
)
return create_index_responses
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
"""Used to Load Documents into the collection
Args:
texts: The list of documents strings to load
metadatas: The list of metadata objects associated with each document
Returns:
"""
# If the text is empty, then exit early
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in Cosmos DB
insert_result = self._collection.insert_many(to_insert) # type: ignore
return insert_result.inserted_ids
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection: Optional[Collection[CosmosDBDocumentType]] = None,
**kwargs: Any,
) -> AzureCosmosDBVectorSearch:
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vectorstore = cls(collection, embedding, **kwargs)
vectorstore.add_texts(texts, metadatas=metadatas)
return vectorstore
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
if ids is None:
raise ValueError("No document ids provided to delete.")
for document_id in ids:
self.delete_document_by_id(document_id)
return True
def delete_document_by_id(self, document_id: Optional[str] = None) -> None:
"""Removes a Specific Document by Id
Args:
document_id: The document identifier
"""
try:
from bson.objectid import ObjectId
except ImportError as e:
raise ImportError(
"Unable to import bson, please install with `pip install bson`."
) from e
if document_id is None:
raise ValueError("No document id provided to delete.")
self._collection.delete_one({"_id": ObjectId(document_id)})
def _similarity_search_with_score(
self, embeddings: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
"""Returns a list of documents with their scores
Args:
embeddings: The query vector
k: the number of documents to return
Returns:
A list of documents closest to the query vector
"""
pipeline: List[dict[str, Any]] = [
{
"$search": {
"cosmosSearch": {
"vector": embeddings,
"path": self._embedding_key,
"k": k,
},
"returnStoredSource": True,
}
},
{
"$project": {
"similarityScore": {"$meta": "searchScore"},
"document": "$$ROOT",
}
},
]
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
score = res.pop("similarityScore")
document_object_field = res.pop("document")
text = document_object_field.pop(self._text_key)
docs.append(
(Document(page_content=text, metadata=document_object_field), score)
)
return docs
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
embeddings = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(embeddings=embeddings, k=k)
return docs
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
# Retrieves the docs with similarity scores
# sorted by similarity scores in DESC order
docs = self._similarity_search_with_score(embedding, k=fetch_k)
# Re-ranks the docs using MMR
mmr_doc_indexes = maximal_marginal_relevance(
np.array(embedding),
[doc.metadata[self._embedding_key] for doc, _ in docs],
k=k,
lambda_mult=lambda_mult,
)
mmr_docs = [docs[i][0] for i in mmr_doc_indexes]
return mmr_docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
# compute the embeddings vector from the query string
embeddings = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embeddings, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult
)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~bedrock.py | import asyncio
import json
import os
from functools import partial
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
class BedrockEmbeddings(BaseModel, Embeddings):
"""Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-embed-text-v1"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-embed-text-v1"
"""Id of the model to call, e.g., amazon.titan-embed-text-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs, "inputText": text}
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
return response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.embed_query, text)
)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed
Returns:
List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_titan_takeoff_pro.py | """Test Titan Takeoff wrapper."""
import responses
from langchain.llms.titan_takeoff_pro import TitanTakeoffPro
@responses.activate
def test_titan_takeoff_pro_call() -> None:
"""Test valid call to Titan Takeoff."""
url = "http://localhost:3000/generate"
responses.add(responses.POST, url, json={"message": "2 + 2 is 4"}, status=200)
# response = requests.post(url)
llm = TitanTakeoffPro()
output = llm("What is 2 + 2?")
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~prompts~test_loading.py | """Test loading functionality."""
import os
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import pytest
from langchain.output_parsers import RegexParser
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.loading import load_prompt
from langchain.prompts.prompt import PromptTemplate
EXAMPLE_DIR = Path("tests/unit_tests/examples").absolute()
@contextmanager
def change_directory(dir: Path) -> Iterator:
"""Change the working directory to the right folder."""
origin = Path().absolute()
try:
os.chdir(dir)
yield
finally:
os.chdir(origin)
def test_loading_from_YAML() -> None:
"""Test loading from yaml file."""
prompt = load_prompt(EXAMPLE_DIR / "simple_prompt.yaml")
expected_prompt = PromptTemplate(
input_variables=["adjective", "content"],
template="Tell me a {adjective} joke about {content}.",
)
assert prompt == expected_prompt
def test_loading_from_JSON() -> None:
"""Test loading from json file."""
prompt = load_prompt(EXAMPLE_DIR / "simple_prompt.json")
expected_prompt = PromptTemplate(
input_variables=["adjective", "content"],
template="Tell me a {adjective} joke about {content}.",
)
assert prompt == expected_prompt
def test_loading_jinja_from_JSON() -> None:
"""Test that loading jinja2 format prompts from JSON raises ValueError."""
prompt_path = EXAMPLE_DIR / "jinja_injection_prompt.json"
with pytest.raises(ValueError, match=".*can lead to arbitrary code execution.*"):
load_prompt(prompt_path)
def test_loading_jinja_from_YAML() -> None:
"""Test that loading jinja2 format prompts from YAML raises ValueError."""
prompt_path = EXAMPLE_DIR / "jinja_injection_prompt.yaml"
with pytest.raises(ValueError, match=".*can lead to arbitrary code execution.*"):
load_prompt(prompt_path)
def test_saving_loading_round_trip(tmp_path: Path) -> None:
"""Test equality when saving and loading a prompt."""
simple_prompt = PromptTemplate(
input_variables=["adjective", "content"],
template="Tell me a {adjective} joke about {content}.",
)
simple_prompt.save(file_path=tmp_path / "prompt.yaml")
loaded_prompt = load_prompt(tmp_path / "prompt.yaml")
assert loaded_prompt == simple_prompt
few_shot_prompt = FewShotPromptTemplate(
input_variables=["adjective"],
prefix="Write antonyms for the following words.",
example_prompt=PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
),
examples=[
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
],
suffix="Input: {adjective}\nOutput:",
)
few_shot_prompt.save(file_path=tmp_path / "few_shot.yaml")
loaded_prompt = load_prompt(tmp_path / "few_shot.yaml")
assert loaded_prompt == few_shot_prompt
def test_loading_with_template_as_file() -> None:
"""Test loading when the template is a file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("simple_prompt_with_template_file.json")
expected_prompt = PromptTemplate(
input_variables=["adjective", "content"],
template="Tell me a {adjective} joke about {content}.",
)
assert prompt == expected_prompt
def test_loading_few_shot_prompt_from_yaml() -> None:
"""Test loading few shot prompt from yaml."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("few_shot_prompt.yaml")
expected_prompt = FewShotPromptTemplate(
input_variables=["adjective"],
prefix="Write antonyms for the following words.",
example_prompt=PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
),
examples=[
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
],
suffix="Input: {adjective}\nOutput:",
)
assert prompt == expected_prompt
def test_loading_few_shot_prompt_from_json() -> None:
"""Test loading few shot prompt from json."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("few_shot_prompt.json")
expected_prompt = FewShotPromptTemplate(
input_variables=["adjective"],
prefix="Write antonyms for the following words.",
example_prompt=PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
),
examples=[
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
],
suffix="Input: {adjective}\nOutput:",
)
assert prompt == expected_prompt
def test_loading_few_shot_prompt_when_examples_in_config() -> None:
"""Test loading few shot prompt when the examples are in the config."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("few_shot_prompt_examples_in.json")
expected_prompt = FewShotPromptTemplate(
input_variables=["adjective"],
prefix="Write antonyms for the following words.",
example_prompt=PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
),
examples=[
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
],
suffix="Input: {adjective}\nOutput:",
)
assert prompt == expected_prompt
def test_loading_few_shot_prompt_example_prompt() -> None:
"""Test loading few shot when the example prompt is in its own file."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("few_shot_prompt_example_prompt.json")
expected_prompt = FewShotPromptTemplate(
input_variables=["adjective"],
prefix="Write antonyms for the following words.",
example_prompt=PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
),
examples=[
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
],
suffix="Input: {adjective}\nOutput:",
)
assert prompt == expected_prompt
def test_loading_with_output_parser() -> None:
with change_directory(EXAMPLE_DIR):
prompt = load_prompt("prompt_with_output_parser.json")
expected_template = "Given the following question and student answer, provide a correct answer and score the student answer.\nQuestion: {question}\nStudent Answer: {student_answer}\nCorrect Answer:" # noqa: E501
expected_prompt = PromptTemplate(
input_variables=["question", "student_answer"],
output_parser=RegexParser(
regex="(.*?)\nScore: (.*)",
output_keys=["answer", "score"],
),
template=expected_template,
)
assert prompt == expected_prompt
| [
"simple_prompt.yaml",
"few_shot_prompt.json",
"few_shot_prompt_examples_in.json",
"(.*?)\nScore: (.*)",
"prompt_with_output_parser.json",
"Input: {adjective}\nOutput:",
"simple_prompt_with_template_file.json",
"content",
"answer",
"few_shot_prompt_example_prompt.json",
"prompt.yaml",
"Input: {input}\nOutput: {output}",
"question",
"input",
"simple_prompt.json",
"Given the following question and student answer, provide a correct answer and score the student answer.\nQuestion: {question}\nStudent Answer: {student_answer}\nCorrect Answer:",
"few_shot_prompt.yaml",
"few_shot.yaml",
"jinja_injection_prompt.json",
"student_answer",
"Write antonyms for the following words.",
"jinja_injection_prompt.yaml",
"Tell me a {adjective} joke about {content}."
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~bittensor.py | import http.client
import json
import ssl
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
class NIBittensorLLM(LLM):
"""NIBittensor LLMs
NIBittensorLLM is created by Neural Internet (https://neuralinternet.ai/),
powered by Bittensor, a decentralized network full of different AI models.
To analyze API_KEYS and logs of your usage visit
https://api.neuralinternet.ai/api-keys
https://api.neuralinternet.ai/logs
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM()
"""
system_prompt: Optional[str]
"""Provide system prompt that you want to supply it to model before every prompt"""
top_responses: Optional[int] = 0
"""Provide top_responses to get Top N miner responses on one request.May get delayed
Don't use in Production"""
@property
def _llm_type(self) -> str:
return "NIBittensorLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with \
5+ years of experience.")
"""
# Creating HTTPS connection with SSL
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection("test.neuralinternet.ai", context=context)
# Sanitizing User Input before passing to API.
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = "You are an assistant which is created by Neural Internet(NI) \
in decentralized network named as a Bittensor."
if self.system_prompt is None:
system_prompt = (
default_prompt
+ " Your task is to provide accurate response based on user prompt"
)
else:
system_prompt = default_prompt + str(self.system_prompt)
# Retrieving API KEY to pass into header of each request
conn.request("GET", "/admin/api-keys/")
api_key_response = conn.getresponse()
api_keys_data = (
api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]["api_key"]
# Creating Header and getting top benchmark miner uids
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"Endpoint-Version": "2023-05-19",
}
conn.request("GET", "/top_miner_uids", headers=headers)
miner_response = conn.getresponse()
miner_data = (
miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
uids = json.loads(miner_data)
# Condition for benchmark miner response
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps(
{
"uids": [uid],
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
init_response = conn.getresponse()
init_data = (
init_response.read()
.decode("utf-8")
.replace("\n", "")
.replace("\t", "")
)
init_json = json.loads(init_data)
if "choices" not in init_json:
continue
reply = init_json["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception:
continue
# For top miner based on bittensor response
try:
payload = json.dumps(
{
"top_n": top_n,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
response = conn.getresponse()
utf_string = (
response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception as e:
conn.request("GET", f"/error_msg?e={e}&p={prompt}", headers=headers)
return "Sorry I am unable to provide response now, Please try again later."
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"system_prompt": self.system_prompt,
"top_responses": self.top_responses,
}
| [
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor.",
"default_prompt + str(self.system_prompt)",
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt"
] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~unit_tests~test_smartllm.py | """Test SmartLLM."""
from langchain.chat_models import FakeListChatModel
from langchain.llms import FakeListLLM
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.smart_llm import SmartLLMChain
def test_ideation() -> None:
# test that correct responses are returned
responses = ["Idea 1", "Idea 2", "Idea 3"]
llm = FakeListLLM(responses=responses)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = SmartLLMChain(llm=llm, prompt=prompt)
prompt_value, _ = chain.prep_prompts({"product": "socks"})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert results == responses
# test that correct number of responses are returned
for i in range(1, 5):
responses = [f"Idea {j+1}" for j in range(i)]
llm = FakeListLLM(responses=responses)
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=i)
prompt_value, _ = chain.prep_prompts({"product": "socks"})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert len(results) == i
def test_critique() -> None:
response = "Test Critique"
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({"product": "socks"})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ["Test Idea 1", "Test Idea 2"]
result = chain._critique()
assert result == response
def test_resolver() -> None:
response = "Test resolution"
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({"product": "socks"})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ["Test Idea 1", "Test Idea 2"]
chain.history.critique = "Test Critique"
result = chain._resolve()
assert result == response
def test_all_steps() -> None:
joke = "Why did the chicken cross the Mobius strip?"
response = "Resolution response"
ideation_llm = FakeListLLM(responses=["Ideation response" for _ in range(20)])
critique_llm = FakeListLLM(responses=["Critique response" for _ in range(20)])
resolver_llm = FakeListLLM(responses=[response for _ in range(20)])
prompt = PromptTemplate(
input_variables=["joke"],
template="Explain this joke to me: {joke}?",
)
chain = SmartLLMChain(
ideation_llm=ideation_llm,
critique_llm=critique_llm,
resolver_llm=resolver_llm,
prompt=prompt,
)
result = chain(joke)
assert result["joke"] == joke
assert result["resolution"] == response
def test_intermediate_output() -> None:
joke = "Why did the chicken cross the Mobius strip?"
llm = FakeListLLM(responses=[f"Response {i+1}" for i in range(5)])
prompt = PromptTemplate(
input_variables=["joke"],
template="Explain this joke to me: {joke}?",
)
chain = SmartLLMChain(llm=llm, prompt=prompt, return_intermediate_steps=True)
result = chain(joke)
assert result["joke"] == joke
assert result["ideas"] == [f"Response {i+1}" for i in range(3)]
assert result["critique"] == "Response 4"
assert result["resolution"] == "Response 5"
def test_all_steps_with_chat_model() -> None:
joke = "Why did the chicken cross the Mobius strip?"
response = "Resolution response"
ideation_llm = FakeListChatModel(responses=["Ideation response" for _ in range(20)])
critique_llm = FakeListChatModel(responses=["Critique response" for _ in range(20)])
resolver_llm = FakeListChatModel(responses=[response for _ in range(20)])
prompt = PromptTemplate(
input_variables=["joke"],
template="Explain this joke to me: {joke}?",
)
chain = SmartLLMChain(
ideation_llm=ideation_llm,
critique_llm=critique_llm,
resolver_llm=resolver_llm,
prompt=prompt,
)
result = chain(joke)
assert result["joke"] == joke
assert result["resolution"] == response
| [
"Explain this joke to me: {joke}?",
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Coroutine,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from langsmith.run_helpers import get_run_tree_context
from tenacity import RetryCallState
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers import run_collector
from langchain.callbacks.tracers.langchain import (
LangChainTracer,
)
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
Document,
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
if TYPE_CHECKING:
from langsmith import Client as LangSmithClient
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
run_collector_var: ContextVar[
Optional[run_collector.RunCollectorCallbackHandler]
] = ContextVar( # noqa: E501
"run_collector", default=None
)
def _get_debug() -> bool:
from langchain.globals import get_debug
return get_debug()
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get the Deprecated LangChainTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
TracerSessionV1: The LangChainTracer session.
Example:
>>> with tracing_enabled() as session:
... # Use the LangChainTracer session
"""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
client: Optional[LangSmithClient] = None,
) -> Generator[LangChainTracer, None, None]:
"""Instruct LangChain to log all runs in context to LangSmith.
Args:
project_name (str, optional): The name of the project.
Defaults to "default".
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The tags to add to the run.
Defaults to None.
Returns:
None
Example:
>>> with tracing_v2_enabled():
... # LangChain code will automatically be traced
You can use this to fetch the LangSmith run URL:
>>> with tracing_v2_enabled() as cb:
... chain.invoke("foo")
... run_url = cb.get_run_url()
"""
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
project_name=project_name,
tags=tags,
client=client,
)
tracing_v2_callback_var.set(cb)
yield cb
tracing_v2_callback_var.set(None)
@contextmanager
def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]:
"""Collect all run traces in context.
Returns:
run_collector.RunCollectorCallbackHandler: The run collector callback handler.
Example:
>>> with collect_runs() as runs_cb:
chain.invoke("foo")
run_id = runs_cb.traced_runs[0].id
"""
cb = run_collector.RunCollectorCallbackHandler()
run_collector_var.set(cb)
yield cb
run_collector_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: Optional[CallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
""" # noqa: E501
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: Optional[AsyncCallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (AsyncCallbackManager, optional): The async callback manager to use,
which manages tracing and other callback behavior.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the async callback manager for the chain group
res = await llm.apredict(llm_input, callbacks=manager)
await manager.on_chain_end({"output": res})
""" # noqa: E501
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = await cm.on_chain_start(
{"name": group_name}, inputs or {}, run_id=run_id
)
child_cm = run_manager.get_child()
group_cm = AsyncCallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
await run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
await run_manager.on_chain_end({})
def handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
handler_name = handler.__class__.__name__
logger.warning(
f"NotImplementedError in {handler_name}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
finally:
if coros:
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
# If we try to submit this coroutine to the running loop
# we end up in a deadlock, as we'd have gotten here from a
# running coroutine, which we cannot interrupt to run this one.
# The solution is to create a new loop in a new thread.
with ThreadPoolExecutor(1) as executor:
executor.submit(_run_coros, coros).result()
else:
_run_coros(coros)
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None:
if hasattr(asyncio, "Runner"):
# Python 3.11+
# Run the coroutines in a new event loop, taking care to
# - install signal handlers
# - run pending tasks scheduled by `coros`
# - close asyncgens and executors
# - close the loop
with asyncio.Runner() as runner:
# Run the coroutine, get the result
for coro in coros:
runner.run(coro)
# Run pending tasks scheduled by coros until they are all done
while pending := asyncio.all_tasks(runner.get_loop()):
runner.run(asyncio.wait(pending))
else:
# Before Python 3.11 we need to run each coroutine in a new event loop
# as the Runner api is not available.
for coro in coros:
asyncio.run(coro)
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {repr(e)}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
)
if handler.raise_error:
raise e
async def ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Sync Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
await ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
await ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that handles callbacks from LangChain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class CallbackManagerForChainGroup(CallbackManager):
"""Callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: CallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
tasks.append(
ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
"""Async callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: AsyncCallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
await self.parent_run_manager.on_chain_end(outputs, **kwargs)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
await self.parent_run_manager.on_chain_error(error, **kwargs)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
run_tree = get_run_tree_context()
parent_run_id = None if run_tree is None else getattr(run_tree, "id")
callback_manager = callback_manager_cls(handlers=[], parent_run_id=parent_run_id)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
parent_run_id=parent_run_id,
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers.copy(),
inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags.copy(),
inheritable_tags=inheritable_callbacks.inheritable_tags.copy(),
metadata=inheritable_callbacks.metadata.copy(),
inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(),
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_project = getattr(
run_tree,
"session_name",
os.environ.get(
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
),
)
run_collector_ = run_collector_var.get()
debug = _get_debug()
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_project)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
handler is open_ai # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
if run_collector_ is not None and not any(
handler is run_collector_ # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(run_collector_, False)
return callback_manager
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~playwright~toolkit.py | """Playwright web browser toolkit."""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Type, cast
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.pydantic_v1 import Extra, root_validator
from langchain.tools.base import BaseTool
from langchain.tools.playwright.base import (
BaseBrowserTool,
lazy_import_playwright_browsers,
)
from langchain.tools.playwright.click import ClickTool
from langchain.tools.playwright.current_page import CurrentWebPageTool
from langchain.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool
from langchain.tools.playwright.extract_text import ExtractTextTool
from langchain.tools.playwright.get_elements import GetElementsTool
from langchain.tools.playwright.navigate import NavigateTool
from langchain.tools.playwright.navigate_back import NavigateBackTool
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
class PlayWrightBrowserToolkit(BaseToolkit):
"""Toolkit for PlayWright browser tools.
**Security Note**: This toolkit provides code to control a web-browser.
Careful if exposing this toolkit to end-users. The tools in the toolkit
are capable of navigating to arbitrary webpages, clicking on arbitrary
elements, and extracting arbitrary text and hyperlinks from webpages.
Specifically, by default this toolkit allows navigating to:
- Any URL (including any internal network URLs)
- And local files
If exposing to end-users, consider limiting network access to the
server that hosts the agent; in addition, consider it is advised
to create a custom NavigationTool wht an args_schema that limits the URLs
that can be navigated to (e.g., only allow navigating to URLs that
start with a particular prefix).
Remember to scope permissions to the minimal permissions necessary for
the application. If the default tool selection is not appropriate for
the application, consider creating a custom toolkit with the appropriate
tools.
See https://python.langchain.com/docs/security for more information.
"""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator
def validate_imports_and_browser_provided(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tool_classes: List[Type[BaseBrowserTool]] = [
ClickTool,
NavigateTool,
NavigateBackTool,
ExtractTextTool,
ExtractHyperlinksTool,
GetElementsTool,
CurrentWebPageTool,
]
tools = [
tool_cls.from_browser(
sync_browser=self.sync_browser, async_browser=self.async_browser
)
for tool_cls in tool_classes
]
return cast(List[BaseTool], tools)
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> PlayWrightBrowserToolkit:
"""Instantiate the toolkit."""
# This is to raise a better error than the forward ref ones Pydantic would have
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~tools~powerbi~test_powerbi.py | def test_power_bi_can_be_imported() -> None:
"""Test that powerbi tools can be imported.
The goal of this test is to verify that langchain users will not get import errors
when loading powerbi related code if they don't have optional dependencies
installed.
"""
from langchain.tools.powerbi.tool import QueryPowerBITool # noqa
from langchain.agents.agent_toolkits import PowerBIToolkit, create_pbi_agent # noqa
from langchain.utilities.powerbi import PowerBIDataset # noqa
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~huggingface_pipeline.py | from __future__ import annotations
import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
from langchain.schema import Generation, LLMResult
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
DEFAULT_BATCH_SIZE = 4
logger = logging.getLogger(__name__)
class HuggingFacePipeline(BaseLLM):
"""HuggingFace Pipeline API.
To use, you should have the ``transformers`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import HuggingFacePipeline
hf = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
)
Example passing pipeline in directly:
.. code-block:: python
from langchain.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
hf = HuggingFacePipeline(pipeline=pipe)
"""
pipeline: Any #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Keyword arguments passed to the pipeline."""
batch_size: int = DEFAULT_BATCH_SIZE
"""Batch size to use when passing multiple documents to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
device: Optional[int] = -1,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
**kwargs: Any,
) -> HuggingFacePipeline:
"""Construct the pipeline object from model_id and task."""
try:
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers import pipeline as hf_pipeline
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if (
getattr(model, "is_loaded_in_4bit", False)
or getattr(model, "is_loaded_in_8bit", False)
) and device is not None:
logger.warning(
f"Setting the `device` argument to None from {device} to avoid "
"the error caused by attempting to move the model that was already "
"loaded on the GPU using the Accelerate module to the same or "
"another device."
)
device = None
if device is not None and importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 (default) for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
batch_size=batch_size,
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
batch_size=batch_size,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "huggingface_pipeline"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
# List to hold all results
text_generations: List[str] = []
for i in range(0, len(prompts), self.batch_size):
batch_prompts = prompts[i : i + self.batch_size]
# Process batch of prompts
responses = self.pipeline(batch_prompts)
# Process each response in the batch
for j, response in enumerate(responses):
if isinstance(response, list):
# if model returns multiple generations, pick the top one
response = response[0]
if self.pipeline.task == "text-generation":
try:
from transformers.pipelines.text_generation import ReturnType
remove_prompt = (
self.pipeline._postprocess_params.get("return_type")
!= ReturnType.NEW_TEXT
)
except Exception as e:
logger.warning(
f"Unable to extract pipeline return_type. "
f"Received error:\n\n{e}"
)
remove_prompt = True
if remove_prompt:
text = response["generated_text"][len(batch_prompts[j]) :]
else:
text = response["generated_text"]
elif self.pipeline.task == "text2text-generation":
text = response["generated_text"]
elif self.pipeline.task == "summarization":
text = response["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop:
# Enforce stop tokens
text = enforce_stop_tokens(text, stop)
# Append the processed text to results
text_generations.append(text)
return LLMResult(
generations=[[Generation(text=text)] for text in text_generations]
)
| [
"True",
"return_type"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~adapters~test_openai.py | from typing import Any
import openai
import pytest
from langchain.adapters import openai as lcopenai
def _test_no_stream(**kwargs: Any) -> None:
result = openai.ChatCompletion.create(**kwargs)
lc_result = lcopenai.ChatCompletion.create(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
def _test_stream(**kwargs: Any) -> None:
result = []
for c in openai.ChatCompletion.create(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
for c in lcopenai.ChatCompletion.create(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
async def _test_async(**kwargs: Any) -> None:
result = await openai.ChatCompletion.acreate(**kwargs)
lc_result = await lcopenai.ChatCompletion.acreate(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
result_dict = result["choices"][0]["message"].to_dict_recursive()
lc_result_dict = lc_result["choices"][0]["message"]
assert result_dict == lc_result_dict
return
async def _test_astream(**kwargs: Any) -> None:
result = []
async for c in await openai.ChatCompletion.acreate(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())
lc_result = []
async for c in await lcopenai.ChatCompletion.acreate(**kwargs):
lc_result.append(c["choices"][0]["delta"])
assert result == lc_result
FUNCTIONS = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
async def _test_module(**kwargs: Any) -> None:
_test_no_stream(**kwargs)
await _test_async(**kwargs)
_test_stream(stream=True, **kwargs)
await _test_astream(stream=True, **kwargs)
@pytest.mark.asyncio
async def test_normal_call() -> None:
await _test_module(
messages=[{"role": "user", "content": "hi"}],
model="gpt-3.5-turbo",
temperature=0,
)
@pytest.mark.asyncio
async def test_function_calling() -> None:
await _test_module(
messages=[{"role": "user", "content": "whats the weather in boston"}],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
@pytest.mark.asyncio
async def test_answer_with_function_calling() -> None:
await _test_module(
messages=[
{"role": "user", "content": "say hi, then whats the weather in boston"}
],
model="gpt-3.5-turbo",
functions=FUNCTIONS,
temperature=0,
)
| [
"say hi, then whats the weather in boston",
"hi",
"whats the weather in boston"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~usearch.py | from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
def dependable_usearch_import() -> Any:
"""
Import usearch if available, otherwise raise error.
"""
try:
import usearch.index
except ImportError:
raise ImportError(
"Could not import usearch python package. "
"Please install it with `pip install usearch` "
)
return usearch.index
class USearch(VectorStore):
"""`USearch` vector store.
To use, you should have the ``usearch`` python package installed.
"""
def __init__(
self,
embedding: Embeddings,
index: Any,
docstore: Docstore,
ids: List[str],
):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.ids = ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict]] = None,
ids: Optional[np.ndarray] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
embeddings = self.embedding.embed_documents(list(texts))
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
last_id = int(self.ids[-1]) + 1
if ids is None:
ids = np.array([str(last_id + id) for id, _ in enumerate(texts)])
self.index.add(np.array(ids), np.array(embeddings))
self.docstore.add(dict(zip(ids, documents)))
self.ids.extend(ids)
return ids.tolist()
def similarity_search_with_score(
self,
query: str,
k: int = 4,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query with distance.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs_with_scores: List[Tuple[Document, float]] = []
for id, score in zip(matches.keys, matches.distances):
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {id}, got {doc}")
docs_with_scores.append((doc, score))
return docs_with_scores
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs: List[Document] = []
for id in matches.keys:
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {id}, got {doc}")
docs.append(doc)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict]] = None,
ids: Optional[np.ndarray] = None,
metric: str = "cos",
**kwargs: Any,
) -> USearch:
"""Construct USearch wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the USearch database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import USearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
usearch = USearch.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
documents: List[Document] = []
if ids is None:
ids = np.array([str(id) for id, _ in enumerate(texts)])
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
docstore = InMemoryDocstore(dict(zip(ids, documents)))
usearch = dependable_usearch_import()
index = usearch.Index(ndim=len(embeddings[0]), metric=metric)
index.add(np.array(ids), np.array(embeddings))
return cls(embedding, index, docstore, ids.tolist())
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~multion~toolkit.py | """MultiOn agent."""
from __future__ import annotations
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.multion.create_session import MultionCreateSession
from langchain.tools.multion.update_session import MultionUpdateSession
class MultionToolkit(BaseToolkit):
"""Toolkit for interacting with the Browser Agent.
**Security Note**: This toolkit contains tools that interact with the
user's browser via the multion API which grants an agent
access to the user's browser.
Please review the documentation for the multion API to understand
the security implications of using this toolkit.
See https://python.langchain.com/docs/security for more information.
"""
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [MultionCreateSession(), MultionUpdateSession()]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~word_document.py | """Loads word documents."""
import os
import tempfile
from abc import ABC
from typing import List
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class Docx2txtLoader(BaseLoader, ABC):
"""Load `DOCX` file using `docx2txt` and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Load `Microsoft Word` file using `Unstructured`.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~smith~evaluation~config.py | """Configuration for run evaluators."""
from typing import Any, Dict, List, Optional, Union
from langsmith import RunEvaluator
from langchain.evaluation.criteria.eval_chain import CRITERIA_TYPE
from langchain.evaluation.embedding_distance.base import (
EmbeddingDistance as EmbeddingDistanceEnum,
)
from langchain.evaluation.schema import EvaluatorType, StringEvaluator
from langchain.evaluation.string_distance.base import (
StringDistance as StringDistanceEnum,
)
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema.embeddings import Embeddings
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.prompt_template import BasePromptTemplate
class EvalConfig(BaseModel):
"""Configuration for a given run evaluator.
Parameters
----------
evaluator_type : EvaluatorType
The type of evaluator to use.
Methods
-------
get_kwargs()
Get the keyword arguments for the evaluator configuration.
"""
evaluator_type: EvaluatorType
def get_kwargs(self) -> Dict[str, Any]:
"""Get the keyword arguments for the load_evaluator call.
Returns
-------
Dict[str, Any]
The keyword arguments for the load_evaluator call.
"""
kwargs = {}
for field, val in self:
if field == "evaluator_type":
continue
elif val is None:
continue
kwargs[field] = val
return kwargs
class RunEvalConfig(BaseModel):
"""Configuration for a run evaluation.
Parameters
----------
evaluators : List[Union[EvaluatorType, EvalConfig]]
Configurations for which evaluators to apply to the dataset run.
Each can be the string of an :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such
as EvaluatorType.QA, the evaluator type string ("qa"), or a configuration for a
given evaluator (e.g., :class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).
custom_evaluators : Optional[List[Union[RunEvaluator, StringEvaluator]]]
Custom evaluators to apply to the dataset run.
reference_key : Optional[str]
The key in the dataset run to use as the reference string.
If not provided, it will be inferred automatically.
prediction_key : Optional[str]
The key from the traced run's outputs dictionary to use to
represent the prediction. If not provided, it will be inferred
automatically.
input_key : Optional[str]
The key from the traced run's inputs dictionary to use to represent the
input. If not provided, it will be inferred automatically.
eval_llm : Optional[BaseLanguageModel]
The language model to pass to any evaluators that use a language model.
""" # noqa: E501
evaluators: List[Union[EvaluatorType, str, EvalConfig]] = Field(
default_factory=list
)
"""Configurations for which evaluators to apply to the dataset run.
Each can be the string of an
:class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such
as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a
given evaluator
(e.g.,
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).""" # noqa: E501
custom_evaluators: Optional[List[Union[RunEvaluator, StringEvaluator]]] = None
"""Custom evaluators to apply to the dataset run."""
reference_key: Optional[str] = None
"""The key in the dataset run to use as the reference string.
If not provided, we will attempt to infer automatically."""
prediction_key: Optional[str] = None
"""The key from the traced run's outputs dictionary to use to
represent the prediction. If not provided, it will be inferred
automatically."""
input_key: Optional[str] = None
"""The key from the traced run's inputs dictionary to use to represent the
input. If not provided, it will be inferred automatically."""
eval_llm: Optional[BaseLanguageModel] = None
"""The language model to pass to any evaluators that require one."""
class Config:
arbitrary_types_allowed = True
class Criteria(EvalConfig):
"""Configuration for a reference-free criteria evaluator.
Parameters
----------
criteria : Optional[CRITERIA_TYPE]
The criteria to evaluate.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
"""
criteria: Optional[CRITERIA_TYPE] = None
llm: Optional[BaseLanguageModel] = None
evaluator_type: EvaluatorType = EvaluatorType.CRITERIA
def __init__(
self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any
) -> None:
super().__init__(criteria=criteria, **kwargs)
class LabeledCriteria(EvalConfig):
"""Configuration for a labeled (with references) criteria evaluator.
Parameters
----------
criteria : Optional[CRITERIA_TYPE]
The criteria to evaluate.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
"""
criteria: Optional[CRITERIA_TYPE] = None
llm: Optional[BaseLanguageModel] = None
evaluator_type: EvaluatorType = EvaluatorType.LABELED_CRITERIA
def __init__(
self, criteria: Optional[CRITERIA_TYPE] = None, **kwargs: Any
) -> None:
super().__init__(criteria=criteria, **kwargs)
class EmbeddingDistance(EvalConfig):
"""Configuration for an embedding distance evaluator.
Parameters
----------
embeddings : Optional[Embeddings]
The embeddings to use for computing the distance.
distance_metric : Optional[EmbeddingDistanceEnum]
The distance metric to use for computing the distance.
"""
evaluator_type: EvaluatorType = EvaluatorType.EMBEDDING_DISTANCE
embeddings: Optional[Embeddings] = None
distance_metric: Optional[EmbeddingDistanceEnum] = None
class Config:
arbitrary_types_allowed = True
class StringDistance(EvalConfig):
"""Configuration for a string distance evaluator.
Parameters
----------
distance : Optional[StringDistanceEnum]
The string distance metric to use.
"""
evaluator_type: EvaluatorType = EvaluatorType.STRING_DISTANCE
distance: Optional[StringDistanceEnum] = None
"""The string distance metric to use.
damerau_levenshtein: The Damerau-Levenshtein distance.
levenshtein: The Levenshtein distance.
jaro: The Jaro distance.
jaro_winkler: The Jaro-Winkler distance.
"""
normalize_score: bool = True
"""Whether to normalize the distance to between 0 and 1.
Applies only to the Levenshtein and Damerau-Levenshtein distances."""
class QA(EvalConfig):
"""Configuration for a QA evaluator.
Parameters
----------
prompt : Optional[BasePromptTemplate]
The prompt template to use for generating the question.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
"""
evaluator_type: EvaluatorType = EvaluatorType.QA
llm: Optional[BaseLanguageModel] = None
prompt: Optional[BasePromptTemplate] = None
class ContextQA(EvalConfig):
"""Configuration for a context-based QA evaluator.
Parameters
----------
prompt : Optional[BasePromptTemplate]
The prompt template to use for generating the question.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
"""
evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA
llm: Optional[BaseLanguageModel] = None
prompt: Optional[BasePromptTemplate] = None
class CoTQA(EvalConfig):
"""Configuration for a context-based QA evaluator.
Parameters
----------
prompt : Optional[BasePromptTemplate]
The prompt template to use for generating the question.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
"""
evaluator_type: EvaluatorType = EvaluatorType.CONTEXT_QA
llm: Optional[BaseLanguageModel] = None
prompt: Optional[BasePromptTemplate] = None
class JsonValidity(EvalConfig):
"""Configuration for a json validity evaluator.
Parameters
----------
"""
evaluator_type: EvaluatorType = EvaluatorType.JSON_VALIDITY
class JsonEqualityEvaluator(EvalConfig):
"""Configuration for a json equality evaluator.
Parameters
----------
"""
evaluator_type: EvaluatorType = EvaluatorType.JSON_EQUALITY
class ExactMatch(EvalConfig):
"""Configuration for an exact match string evaluator.
Parameters
----------
ignore_case : bool
Whether to ignore case when comparing strings.
ignore_punctuation : bool
Whether to ignore punctuation when comparing strings.
ignore_numbers : bool
Whether to ignore numbers when comparing strings.
"""
evaluator_type: EvaluatorType = EvaluatorType.STRING_DISTANCE
ignore_case: bool = False
ignore_punctuation: bool = False
ignore_numbers: bool = False
class RegexMatch(EvalConfig):
"""Configuration for a regex match string evaluator.
Parameters
----------
flags : int
The flags to pass to the regex. Example: re.IGNORECASE.
"""
evaluator_type: EvaluatorType = EvaluatorType.REGEX_MATCH
flags: int = 0
class ScoreString(EvalConfig):
"""Configuration for a score string evaluator.
This is like the criteria evaluator but it is configured by
default to return a score on the scale from 1-10.
It is recommended to normalize these scores
by setting `normalize_by` to 10.
Parameters
----------
criteria : Optional[CRITERIA_TYPE]
The criteria to evaluate.
llm : Optional[BaseLanguageModel]
The language model to use for the evaluation chain.
normalize_by: Optional[int] = None
If you want to normalize the score, the denominator to use.
If not provided, the score will be between 1 and 10 (by default).
prompt : Optional[BasePromptTemplate]
"""
evaluator_type: EvaluatorType = EvaluatorType.SCORE_STRING
criteria: Optional[CRITERIA_TYPE] = None
llm: Optional[BaseLanguageModel] = None
normalize_by: Optional[float] = None
prompt: Optional[BasePromptTemplate] = None
def __init__(
self,
criteria: Optional[CRITERIA_TYPE] = None,
normalize_by: Optional[float] = None,
**kwargs: Any
) -> None:
super().__init__(criteria=criteria, normalize_by=normalize_by, **kwargs)
class LabeledScoreString(ScoreString):
evaluator_type: EvaluatorType = EvaluatorType.LABELED_SCORE_STRING
| [
"None"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~retrievers~test_pubmed.py | """Integration test for PubMed API Wrapper."""
from typing import List
import pytest
from langchain.retrievers import PubMedRetriever
from langchain.schema import Document
@pytest.fixture
def retriever() -> PubMedRetriever:
return PubMedRetriever()
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata) == {
"Copyright Information",
"uid",
"Title",
"Published",
}
def test_load_success(retriever: PubMedRetriever) -> None:
docs = retriever.get_relevant_documents(query="chatgpt")
assert len(docs) == 3
assert_docs(docs)
def test_load_success_top_k_results(retriever: PubMedRetriever) -> None:
retriever.top_k_results = 2
docs = retriever.get_relevant_documents(query="chatgpt")
assert len(docs) == 2
assert_docs(docs)
def test_load_no_result(retriever: PubMedRetriever) -> None:
docs = retriever.get_relevant_documents("1605.08386WWW")
assert not docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~parsers~test_html_parsers.py | """Tests for the HTML parsers."""
from pathlib import Path
import pytest
from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers.html import BS4HTMLParser
HERE = Path(__file__).parent
EXAMPLES = HERE.parent.parent.parent / "integration_tests" / "examples"
@pytest.mark.requires("bs4", "lxml")
def test_bs_html_loader() -> None:
"""Test unstructured loader."""
file_path = EXAMPLES / "example.html"
blob = Blob.from_path(file_path)
parser = BS4HTMLParser(get_text_separator="|")
docs = list(parser.lazy_parse(blob))
assert isinstance(docs, list)
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata["title"] == "Chew dad's slippers"
assert metadata["source"] == str(file_path)
assert content[:2] == "\n|"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~google_scholar.py | """Util that calls Google Scholar Search."""
from typing import Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class GoogleScholarAPIWrapper(BaseModel):
"""Wrapper for Google Scholar API
You can create serpapi key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the serpapi python package:
https://serpapi.com/integrations/python#search-google-scholar
To use, you should have the environment variable ``SERP_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Attributes:
top_k_results: number of results to return from google-scholar query search.
By default it returns top 10 results.
hl: attribute defines the language to use for the Google Scholar search.
It's a two-letter language code.
(e.g., en for English, es for Spanish, or fr for French). Head to the
Google languages page for a full list of supported Google languages:
https://serpapi.com/google-languages
lr: attribute defines one or multiple languages to limit the search to.
It uses lang_{two-letter language code} to specify languages
and | as a delimiter. (e.g., lang_fr|lang_de will only search French
and German pages). Head to the Google lr languages for a full
list of supported languages: https://serpapi.com/google-lr-languages
Example:
.. code-block:: python
from langchain.utilities import GoogleScholarAPIWrapper
google_scholar = GoogleScholarAPIWrapper()
google_scholar.run('langchain')
"""
top_k_results: int = 10
hl: str = "en"
lr: str = "lang_en"
serp_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serp_api_key = get_from_dict_or_env(values, "serp_api_key", "SERP_API_KEY")
values["SERP_API_KEY"] = serp_api_key
try:
from serpapi import GoogleScholarSearch
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
GoogleScholarSearch.SERP_API_KEY = serp_api_key
values["google_scholar_engine"] = GoogleScholarSearch
return values
def run(self, query: str) -> str:
"""Run query through GoogleSearchScholar and parse result"""
total_results = []
page = 0
while page < max((self.top_k_results - 20), 1):
# We are getting 20 results from every page
# which is the max in order to reduce the number of API CALLS.
# 0 is the first page of results, 20 is the 2nd page of results,
# 40 is the 3rd page of results, etc.
results = (
self.google_scholar_engine( # type: ignore
{
"q": query,
"start": page,
"hl": self.hl,
"num": min(
self.top_k_results, 20
), # if top_k_result is less than 20.
"lr": self.lr,
}
)
.get_dict()
.get("organic_results", [])
)
total_results.extend(results)
if not results: # No need to search for more pages if current page
# has returned no results
break
page += 20
if (
self.top_k_results % 20 != 0 and page > 20 and total_results
): # From the last page we would only need top_k_results%20 results
# if k is not divisible by 20.
results = (
self.google_scholar_engine( # type: ignore
{
"q": query,
"start": page,
"num": self.top_k_results % 20,
"hl": self.hl,
"lr": self.lr,
}
)
.get_dict()
.get("organic_results", [])
)
total_results.extend(results)
if not total_results:
return "No good Google Scholar Result was found"
docs = [
f"Title: {result.get('title','')}\n"
f"Authors: {','.join([author.get('name') for author in result.get('publication_info',{}).get('authors',[])])}\n" # noqa: E501
f"Summary: {result.get('publication_info',{}).get('summary','')}\n"
f"Total-Citations: {result.get('inline_links',{}).get('cited_by',{}).get('total','')}" # noqa: E501
for result in total_results
]
return "\n\n".join(docs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~test_long_context_reorder.py | """Integration test for doc reordering."""
from langchain.document_transformers.long_context_reorder import LongContextReorder
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
def test_long_context_reorder() -> None:
"""Test Lost in the middle reordering get_relevant_docs."""
texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]
embeddings = OpenAIEmbeddings()
retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": 10}
)
reordering = LongContextReorder()
docs = retriever.get_relevant_documents("Tell me about the Celtics")
actual = reordering.transform_documents(docs)
# First 2 and Last 2 elements must contain the most relevant
first_and_last = list(actual[:2]) + list(actual[-2:])
assert len(actual) == 10
assert texts[2] in [d.page_content for d in first_and_last]
assert texts[3] in [d.page_content for d in first_and_last]
assert texts[5] in [d.page_content for d in first_and_last]
assert texts[8] in [d.page_content for d in first_and_last]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~agents~test_sql.py | from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.utilities.sql_database import SQLDatabase
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_create_sql_agent() -> None:
db = SQLDatabase.from_uri("sqlite:///:memory:")
queries = {"foo": "Final Answer: baz"}
llm = FakeLLM(queries=queries, sequential_responses=True)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
)
assert agent_executor.run("hello") == "baz"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_powerbi_api.py | """Integration test for POWERBI API Wrapper."""
import pytest
from langchain.utilities.powerbi import PowerBIDataset
from langchain.utils import get_from_env
def azure_installed() -> bool:
try:
from azure.core.credentials import TokenCredential # noqa: F401
from azure.identity import DefaultAzureCredential # noqa: F401
return True
except Exception as e:
print(f"azure not installed, skipping test {e}")
return False
@pytest.mark.skipif(not azure_installed(), reason="requires azure package")
def test_daxquery() -> None:
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env("", "POWERBI_DATASET_ID")
TABLE_NAME = get_from_env("", "POWERBI_TABLE_NAME")
NUM_ROWS = get_from_env("", "POWERBI_NUMROWS")
powerbi = PowerBIDataset(
dataset_id=DATASET_ID,
table_names=[TABLE_NAME],
credential=DefaultAzureCredential(),
)
output = powerbi.run(f'EVALUATE ROW("RowCount", COUNTROWS({TABLE_NAME}))')
numrows = str(output["results"][0]["tables"][0]["rows"][0]["[RowCount]"])
assert NUM_ROWS == numrows
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~unit_tests~test_llm_symbolic_math.py | """Test LLM Math functionality."""
import pytest
from langchain_experimental.llm_symbolic_math.base import (
LLMSymbolicMathChain,
)
from langchain_experimental.llm_symbolic_math.prompt import (
_PROMPT_TEMPLATE,
)
from tests.unit_tests.fake_llm import FakeLLM
try:
import sympy
except ImportError:
pytest.skip("sympy not installed", allow_module_level=True)
@pytest.fixture
def fake_llm_symbolic_math_chain() -> LLMSymbolicMathChain:
"""Fake LLM Math chain for testing."""
queries = {
_PROMPT_TEMPLATE.format(question="What is 1 plus 1?"): "Answer: 2",
_PROMPT_TEMPLATE.format(
question="What is the square root of 2?"
): "```text\nsqrt(2)\n```",
_PROMPT_TEMPLATE.format(
question="What is the limit of sin(x) / x as x goes to 0?"
): "```text\nlimit(sin(x)/x,x,0)\n```",
_PROMPT_TEMPLATE.format(
question="What is the integral of e^-x from 0 to infinity?"
): "```text\nintegrate(exp(-x), (x, 0, oo))\n```",
_PROMPT_TEMPLATE.format(
question="What are the solutions to this equation x**2 - x?"
): "```text\nsolveset(x**2 - x, x)\n```",
_PROMPT_TEMPLATE.format(question="foo"): "foo",
}
fake_llm = FakeLLM(queries=queries)
return LLMSymbolicMathChain.from_llm(fake_llm, input_key="q", output_key="a")
def test_simple_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None:
"""Test simple question that should not need python."""
question = "What is 1 plus 1?"
output = fake_llm_symbolic_math_chain.run(question)
assert output == "Answer: 2"
def test_root_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None:
"""Test irrational number that should need sympy."""
question = "What is the square root of 2?"
output = fake_llm_symbolic_math_chain.run(question)
assert output == f"Answer: {sympy.sqrt(2)}"
def test_limit_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None:
"""Test question about limits that needs sympy"""
question = "What is the limit of sin(x) / x as x goes to 0?"
output = fake_llm_symbolic_math_chain.run(question)
assert output == "Answer: 1"
def test_integration_question(
fake_llm_symbolic_math_chain: LLMSymbolicMathChain,
) -> None:
"""Test question about integration that needs sympy"""
question = "What is the integral of e^-x from 0 to infinity?"
output = fake_llm_symbolic_math_chain.run(question)
assert output == "Answer: 1"
def test_solver_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None:
"""Test question about solving algebraic equations that needs sympy"""
question = "What are the solutions to this equation x**2 - x?"
output = fake_llm_symbolic_math_chain.run(question)
assert output == "Answer: {0, 1}"
def test_error(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None:
"""Test question that raises error."""
with pytest.raises(ValueError):
fake_llm_symbolic_math_chain.run("foo")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~milvus.py | """Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
class MilvusRetriever(BaseRetriever):
"""`Milvus API` retriever."""
embedding_function: Embeddings
collection_name: str = "LangChainCollection"
connection_args: Optional[Dict[str, Any]] = None
consistency_level: str = "Session"
search_params: Optional[dict] = None
store: Milvus
retriever: BaseRetriever
@root_validator(pre=True)
def create_retriever(cls, values: Dict) -> Dict:
"""Create the Milvus store and retriever."""
values["store"] = Milvus(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK (that is is the private
preview) and be whitelisted to list the models themselves:
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.25.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
import os
import pytest
from langchain.llms import VertexAI, VertexAIModelGarden
from langchain.schema import LLMResult
def test_vertex_initialization() -> None:
llm = VertexAI()
assert llm._llm_type == "vertexai"
assert llm.model_name == llm.client._model_id
def test_vertex_call() -> None:
llm = VertexAI(temperature=0)
output = llm("Say foo:")
assert isinstance(output, str)
@pytest.mark.scheduled
def test_vertex_generate() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="text-bison@001")
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
def test_vertex_generate_code() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="code-bison@001")
output = llm.generate(["generate a python method that says foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_vertex_agenerate() -> None:
llm = VertexAI(temperature=0)
output = await llm.agenerate(["Please say foo:"])
assert isinstance(output, LLMResult)
@pytest.mark.scheduled
def test_vertex_stream() -> None:
llm = VertexAI(temperature=0)
outputs = list(llm.stream("Please say foo:"))
assert isinstance(outputs[0], str)
@pytest.mark.asyncio
async def test_vertex_consistency() -> None:
llm = VertexAI(temperature=0)
output = llm.generate(["Please say foo:"])
streaming_output = llm.generate(["Please say foo:"], stream=True)
async_output = await llm.agenerate(["Please say foo:"])
assert output.generations[0][0].text == streaming_output.generations[0][0].text
assert output.generations[0][0].text == async_output.generations[0][0].text
def test_model_garden() -> None:
"""In order to run this test, you should provide an endpoint name.
Example:
export ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ["ENDPOINT_ID"]
project = os.environ["PROJECT"]
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project)
output = llm("What is the meaning of life?")
assert isinstance(output, str)
assert llm._llm_type == "vertexai_model_garden"
def test_model_garden_generate() -> None:
"""In order to run this test, you should provide an endpoint name.
Example:
export ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ["ENDPOINT_ID"]
project = os.environ["PROJECT"]
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project)
output = llm.generate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
@pytest.mark.asyncio
async def test_model_garden_agenerate() -> None:
endpoint_id = os.environ["ENDPOINT_ID"]
project = os.environ["PROJECT"]
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project)
output = await llm.agenerate(["What is the meaning of life?", "How much is 2+2"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 2
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_timescalevector.py | """Test TimescaleVector functionality."""
import os
from datetime import datetime, timedelta
from typing import List
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.timescalevector import TimescaleVector
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
SERVICE_URL = TimescaleVector.service_url_from_db_params(
host=os.environ.get("TEST_TIMESCALE_HOST", "localhost"),
port=int(os.environ.get("TEST_TIMESCALE_PORT", "5432")),
database=os.environ.get("TEST_TIMESCALE_DATABASE", "postgres"),
user=os.environ.get("TEST_TIMESCALE_USER", "postgres"),
password=os.environ.get("TEST_TIMESCALE_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_timescalevector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_timescalevector_from_documents() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
@pytest.mark.asyncio
async def test_timescalevector_afrom_documents() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = await TimescaleVector.afrom_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
def test_timescalevector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = TimescaleVector.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.asyncio
async def test_timescalevector_aembeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = await TimescaleVector.afrom_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_timescalevector_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_timescalevector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
@pytest.mark.asyncio
async def test_timescalevector_awith_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = await TimescaleVector.afrom_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_timescalevector_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_timescalevector_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406)
]
def test_timescalevector_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_timescalevector_with_filter_in_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter=[{"page": "0"}, {"page": "2"}]
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
def test_timescalevector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065),
(Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621),
]
@pytest.mark.asyncio
async def test_timescalevector_relevance_score_async() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = await TimescaleVector.afrom_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065),
(Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621),
]
def test_timescalevector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.999},
)
output = retriever.get_relevant_documents("summer")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
]
def test_timescalevector_retriever_search_threshold_custom_normalization_fn() -> None:
"""Test searching with threshold and custom normalization function"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
relevance_score_fn=lambda d: d * 0,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.5},
)
output = retriever.get_relevant_documents("foo")
assert output == []
def test_timescalevector_delete() -> None:
"""Test deleting functionality."""
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
texts = ["foo"]
meta = [{"b": "c"}]
ids = docsearch.add_texts(texts, meta)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
docsearch.delete(ids)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 2
docsearch.delete_by_metadata({"a": "b"})
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 0
def test_timescalevector_with_index() -> None:
"""Test deleting functionality."""
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
texts = ["foo"]
meta = [{"b": "c"}]
docsearch.add_texts(texts, meta)
docsearch.create_index()
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
docsearch.drop_index()
docsearch.create_index(
index_type=TimescaleVector.IndexType.TIMESCALE_VECTOR,
max_alpha=1.0,
num_neighbors=50,
)
docsearch.drop_index()
docsearch.create_index("tsv", max_alpha=1.0, num_neighbors=50)
docsearch.drop_index()
docsearch.create_index("ivfflat", num_lists=20, num_records=1000)
docsearch.drop_index()
docsearch.create_index("hnsw", m=16, ef_construction=64)
def test_timescalevector_time_partitioning() -> None:
"""Test deleting functionality."""
from timescale_vector import client
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection_time_partitioning",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
time_partition_interval=timedelta(hours=1),
)
texts = ["foo"]
meta = [{"b": "c"}]
ids = [client.uuid_from_time(datetime.now() - timedelta(hours=3))]
docsearch.add_texts(texts, meta, ids)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
output = docsearch.similarity_search(
"bar", k=10, start_date=datetime.now() - timedelta(hours=1)
)
assert len(output) == 2
output = docsearch.similarity_search(
"bar", k=10, end_date=datetime.now() - timedelta(hours=1)
)
assert len(output) == 1
output = docsearch.similarity_search(
"bar", k=10, start_date=datetime.now() - timedelta(minutes=200)
)
assert len(output) == 3
output = docsearch.similarity_search(
"bar",
k=10,
start_date=datetime.now() - timedelta(minutes=200),
time_delta=timedelta(hours=1),
)
assert len(output) == 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~unit_tests~rl_chain~test_rl_chain_base_embedder.py | from typing import List, Union
import pytest
from test_utils import MockEncoder
import langchain_experimental.rl_chain.base as base
encoded_keyword = "[encoded]"
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_context_str_no_emb() -> None:
expected = [{"a_namespace": "test"}]
assert base.embed("test", MockEncoder(), "a_namespace") == expected
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_context_str_w_emb() -> None:
str1 = "test"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{"a_namespace": encoded_str1}]
assert base.embed(base.Embed(str1), MockEncoder(), "a_namespace") == expected
expected_embed_and_keep = [{"a_namespace": str1 + " " + encoded_str1}]
assert (
base.embed(base.EmbedAndKeep(str1), MockEncoder(), "a_namespace")
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_context_str_w_nested_emb() -> None:
# nested embeddings, innermost wins
str1 = "test"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{"a_namespace": encoded_str1}]
assert (
base.embed(base.EmbedAndKeep(base.Embed(str1)), MockEncoder(), "a_namespace")
== expected
)
expected2 = [{"a_namespace": str1 + " " + encoded_str1}]
assert (
base.embed(base.Embed(base.EmbedAndKeep(str1)), MockEncoder(), "a_namespace")
== expected2
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_context_w_namespace_no_emb() -> None:
expected = [{"test_namespace": "test"}]
assert base.embed({"test_namespace": "test"}, MockEncoder()) == expected
@pytest.mark.requires("vowpal_wabbit_next")
def test_context_w_namespace_w_emb() -> None:
str1 = "test"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{"test_namespace": encoded_str1}]
assert base.embed({"test_namespace": base.Embed(str1)}, MockEncoder()) == expected
expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}]
assert (
base.embed({"test_namespace": base.EmbedAndKeep(str1)}, MockEncoder())
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_context_w_namespace_w_emb2() -> None:
str1 = "test"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{"test_namespace": encoded_str1}]
assert base.embed(base.Embed({"test_namespace": str1}), MockEncoder()) == expected
expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}]
assert (
base.embed(base.EmbedAndKeep({"test_namespace": str1}), MockEncoder())
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_context_w_namespace_w_some_emb() -> None:
str1 = "test1"
str2 = "test2"
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
expected = [{"test_namespace": str1, "test_namespace2": encoded_str2}]
assert (
base.embed(
{"test_namespace": str1, "test_namespace2": base.Embed(str2)}, MockEncoder()
)
== expected
)
expected_embed_and_keep = [
{
"test_namespace": str1,
"test_namespace2": str2 + " " + encoded_str2,
}
]
assert (
base.embed(
{"test_namespace": str1, "test_namespace2": base.EmbedAndKeep(str2)},
MockEncoder(),
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_action_strlist_no_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
expected = [{"a_namespace": str1}, {"a_namespace": str2}, {"a_namespace": str3}]
to_embed: List[Union[str, base._Embed]] = [str1, str2, str3]
assert base.embed(to_embed, MockEncoder(), "a_namespace") == expected
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_action_strlist_w_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"a_namespace": encoded_str1},
{"a_namespace": encoded_str2},
{"a_namespace": encoded_str3},
]
assert (
base.embed(base.Embed([str1, str2, str3]), MockEncoder(), "a_namespace")
== expected
)
expected_embed_and_keep = [
{"a_namespace": str1 + " " + encoded_str1},
{"a_namespace": str2 + " " + encoded_str2},
{"a_namespace": str3 + " " + encoded_str3},
]
assert (
base.embed(base.EmbedAndKeep([str1, str2, str3]), MockEncoder(), "a_namespace")
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_simple_action_strlist_w_some_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"a_namespace": str1},
{"a_namespace": encoded_str2},
{"a_namespace": encoded_str3},
]
assert (
base.embed(
[str1, base.Embed(str2), base.Embed(str3)], MockEncoder(), "a_namespace"
)
== expected
)
expected_embed_and_keep = [
{"a_namespace": str1},
{"a_namespace": str2 + " " + encoded_str2},
{"a_namespace": str3 + " " + encoded_str3},
]
assert (
base.embed(
[str1, base.EmbedAndKeep(str2), base.EmbedAndKeep(str3)],
MockEncoder(),
"a_namespace",
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_action_w_namespace_no_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
expected = [
{"test_namespace": str1},
{"test_namespace": str2},
{"test_namespace": str3},
]
assert (
base.embed(
[
{"test_namespace": str1},
{"test_namespace": str2},
{"test_namespace": str3},
],
MockEncoder(),
)
== expected
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_action_w_namespace_w_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"test_namespace": encoded_str1},
{"test_namespace": encoded_str2},
{"test_namespace": encoded_str3},
]
assert (
base.embed(
[
{"test_namespace": base.Embed(str1)},
{"test_namespace": base.Embed(str2)},
{"test_namespace": base.Embed(str3)},
],
MockEncoder(),
)
== expected
)
expected_embed_and_keep = [
{"test_namespace": str1 + " " + encoded_str1},
{"test_namespace": str2 + " " + encoded_str2},
{"test_namespace": str3 + " " + encoded_str3},
]
assert (
base.embed(
[
{"test_namespace": base.EmbedAndKeep(str1)},
{"test_namespace": base.EmbedAndKeep(str2)},
{"test_namespace": base.EmbedAndKeep(str3)},
],
MockEncoder(),
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_action_w_namespace_w_emb2() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"test_namespace1": encoded_str1},
{"test_namespace2": encoded_str2},
{"test_namespace3": encoded_str3},
]
assert (
base.embed(
base.Embed(
[
{"test_namespace1": str1},
{"test_namespace2": str2},
{"test_namespace3": str3},
]
),
MockEncoder(),
)
== expected
)
expected_embed_and_keep = [
{"test_namespace1": str1 + " " + encoded_str1},
{"test_namespace2": str2 + " " + encoded_str2},
{"test_namespace3": str3 + " " + encoded_str3},
]
assert (
base.embed(
base.EmbedAndKeep(
[
{"test_namespace1": str1},
{"test_namespace2": str2},
{"test_namespace3": str3},
]
),
MockEncoder(),
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_action_w_namespace_w_some_emb() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"test_namespace": str1},
{"test_namespace": encoded_str2},
{"test_namespace": encoded_str3},
]
assert (
base.embed(
[
{"test_namespace": str1},
{"test_namespace": base.Embed(str2)},
{"test_namespace": base.Embed(str3)},
],
MockEncoder(),
)
== expected
)
expected_embed_and_keep = [
{"test_namespace": str1},
{"test_namespace": str2 + " " + encoded_str2},
{"test_namespace": str3 + " " + encoded_str3},
]
assert (
base.embed(
[
{"test_namespace": str1},
{"test_namespace": base.EmbedAndKeep(str2)},
{"test_namespace": base.EmbedAndKeep(str3)},
],
MockEncoder(),
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() -> None:
str1 = "test1"
str2 = "test2"
str3 = "test3"
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [
{"test_namespace": encoded_str1, "test_namespace2": str1},
{"test_namespace": encoded_str2, "test_namespace2": str2},
{"test_namespace": encoded_str3, "test_namespace2": str3},
]
assert (
base.embed(
[
{"test_namespace": base.Embed(str1), "test_namespace2": str1},
{"test_namespace": base.Embed(str2), "test_namespace2": str2},
{"test_namespace": base.Embed(str3), "test_namespace2": str3},
],
MockEncoder(),
)
== expected
)
expected_embed_and_keep = [
{
"test_namespace": str1 + " " + encoded_str1,
"test_namespace2": str1,
},
{
"test_namespace": str2 + " " + encoded_str2,
"test_namespace2": str2,
},
{
"test_namespace": str3 + " " + encoded_str3,
"test_namespace2": str3,
},
]
assert (
base.embed(
[
{"test_namespace": base.EmbedAndKeep(str1), "test_namespace2": str1},
{"test_namespace": base.EmbedAndKeep(str2), "test_namespace2": str2},
{"test_namespace": base.EmbedAndKeep(str3), "test_namespace2": str3},
],
MockEncoder(),
)
== expected_embed_and_keep
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_one_namespace_w_list_of_features_no_emb() -> None:
str1 = "test1"
str2 = "test2"
expected = [{"test_namespace": [str1, str2]}]
assert base.embed({"test_namespace": [str1, str2]}, MockEncoder()) == expected
@pytest.mark.requires("vowpal_wabbit_next")
def test_one_namespace_w_list_of_features_w_some_emb() -> None:
str1 = "test1"
str2 = "test2"
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
expected = [{"test_namespace": [str1, encoded_str2]}]
assert (
base.embed({"test_namespace": [str1, base.Embed(str2)]}, MockEncoder())
== expected
)
@pytest.mark.requires("vowpal_wabbit_next")
def test_nested_list_features_throws() -> None:
with pytest.raises(ValueError):
base.embed({"test_namespace": [[1, 2], [3, 4]]}, MockEncoder())
@pytest.mark.requires("vowpal_wabbit_next")
def test_dict_in_list_throws() -> None:
with pytest.raises(ValueError):
base.embed({"test_namespace": [{"a": 1}, {"b": 2}]}, MockEncoder())
@pytest.mark.requires("vowpal_wabbit_next")
def test_nested_dict_throws() -> None:
with pytest.raises(ValueError):
base.embed({"test_namespace": {"a": {"b": 1}}}, MockEncoder())
@pytest.mark.requires("vowpal_wabbit_next")
def test_list_of_tuples_throws() -> None:
with pytest.raises(ValueError):
base.embed({"test_namespace": [("a", 1), ("b", 2)]}, MockEncoder())
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~test_confluence.py | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "[email protected]"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="[email protected]",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| [
"/rest/api/content/PLACEHOLDER",
"{'value': '<p>Content PLACEHOLDER</p>'}"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_huggingface_pipeline.py | """Test HuggingFace Pipeline wrapper."""
from pathlib import Path
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_pipeline_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", model_kwargs={"max_new_tokens": 10}
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text generation model."""
llm = HuggingFacePipeline.from_model_id(
model_id="google/flan-t5-small", task="text2text-generation"
)
output = llm("Say foo:")
assert isinstance(output, str)
def text_huggingface_pipeline_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFacePipeline.from_model_id(
model_id="facebook/bart-large-cnn", task="summarization"
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation", model_kwargs={"max_new_tokens": 10}
)
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
def test_init_with_pipeline() -> None:
"""Test initialization with a HF pipeline."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
llm = HuggingFacePipeline(pipeline=pipe)
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~wolfram_alpha.py | """Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~scoring~eval_chain.py | """Base classes for scoring the output of a model on a scale of 1-10."""
from __future__ import annotations
import logging
import re
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.manager import Callbacks
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
from langchain.chat_models.azure_openai import AzureChatOpenAI
from langchain.chat_models.openai import ChatOpenAI
from langchain.evaluation.criteria.eval_chain import (
CRITERIA_TYPE,
Criteria,
)
from langchain.evaluation.schema import LLMEvalChain, StringEvaluator
from langchain.evaluation.scoring.prompt import (
CRITERIA_INSTRUCTIONS,
DEFAULT_CRITERIA,
SCORING_TEMPLATE,
SCORING_TEMPLATE_WITH_REFERENCE,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import Extra, Field
from langchain.schema import RUN_KEY, BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel
logger = logging.getLogger(__name__)
_FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]")
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?",
Criteria.MISOGYNY: "Is the submission misogynistic or sexist?",
Criteria.CRIMINALITY: "Is the submission criminal in any way?",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
def resolve_criteria(
criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]]
) -> dict:
"""Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use.
Returns:
dict: The resolved criteria.
"""
if criteria is None:
_default_criteria = [
Criteria.HELPFULNESS,
Criteria.RELEVANCE,
Criteria.CORRECTNESS,
Criteria.DEPTH,
]
return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria}
elif isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
if criteria in _SUPPORTED_CRITERIA:
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
else:
criteria_ = {criteria: ""}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
elif isinstance(criteria, (list, tuple)):
criteria_ = {
k: v
for criterion in criteria
for k, v in resolve_criteria(criterion).items()
}
else:
if not criteria:
raise ValueError(
"Criteria cannot be empty. "
"Please provide a criterion name or a mapping of the criterion name"
" to its description."
)
criteria_ = dict(criteria)
return criteria_
class ScoreStringResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the ScoreStringEvalChain.
Attributes:
_type (str): The type of the output parser.
"""
@property
def _type(self) -> str:
"""Return the type of the output parser.
Returns:
str: The type of the output parser.
"""
return "pairwise_string_result"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in list("123456789") + ["10"]:
raise ValueError(
f"Invalid output: {text}. "
"Output must contain a double bracketed string\
with the verdict between 1 and 10."
)
return {
"reasoning": text,
"score": int(verdict),
}
class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
"""A chain for scoring on a scale of 1-10 the output of a model.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
Example:
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.scoring import ScoreStringEvalChain
>>> llm = ChatOpenAI(temperature=0, model_name="gpt-4")
>>> chain = ScoreStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_strings(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result)
# {
# "score": 8,
# "comment": "The response accurately states "
# "that the chemical formula for water is H2O."
# "However, it does not provide an explanation of what the formula means."
# }
"""
output_key: str = "results" #: :meta private:
output_parser: BaseOutputParser = Field(
default_factory=ScoreStringResultOutputParser
)
normalize_by: Optional[float] = None
"""The value to normalize the score by, if specified."""
criterion_name: str
"""The name of the criterion being evaluated."""
class Config:
"""Configuration for the ScoreStringEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False
@property
def requires_input(self) -> bool:
"""Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
"""
return True
@property
def evaluation_name(self) -> str:
"""Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
"""
return f"score_string:{self.criterion_name}"
@property
def _skip_reference_warning(self) -> str:
"""Return the warning to show when reference is ignored.
Returns:
str: The warning to show when reference is ignored.
"""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use a reference, use the LabeledScoreStringEvalChain instead."
" (EvaluatorType.LABELED_SCORE_STRING) instead."
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
normalize_by: Optional[float] = None,
**kwargs: Any,
) -> ScoreStringEvalChain:
"""Initialize the ScoreStringEvalChain from an LLM.
Args:
llm (BaseChatModel): The LLM to use (GPT-4 recommended).
prompt (PromptTemplate, optional): The prompt to use.
**kwargs (Any): Additional keyword arguments.
Returns:
ScoreStringEvalChain: The initialized ScoreStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
"""
if not (
isinstance(llm, (ChatOpenAI, AzureChatOpenAI))
and llm.model_name.startswith("gpt-4")
):
logger.warning(
"This chain was only tested with GPT-4. \
Performance may be significantly worse with other models."
)
expected_input_vars = {"prediction", "input", "criteria"}
prompt_ = prompt or SCORING_TEMPLATE.partial(reference="")
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_criteria(criteria)
criteria_str = "\n".join(
f"{k}: {v}" if v else k for k, v in criteria_.items()
).strip()
criteria_str = (
CRITERIA_INSTRUCTIONS + f"{criteria_str}\n"
if criteria_str
else DEFAULT_CRITERIA
)
return cls(
llm=llm,
prompt=prompt_.partial(criteria=criteria_str),
normalize_by=normalize_by,
criterion_name="-".join(criteria_),
**kwargs,
)
def _prepare_input(
self,
prediction: str,
input: Optional[str],
reference: Optional[str],
) -> dict:
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {
"prediction": prediction,
"input": input,
}
if self.requires_reference:
input_["reference"] = reference
return input_
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
if "score" in parsed and self.normalize_by is not None:
parsed["score"] = parsed["score"] / self.normalize_by
return parsed
def _evaluate_strings(
self,
*,
prediction: str,
input: Optional[str] = None,
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = self(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_string_pairs(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = await self.acall(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class LabeledScoreStringEvalChain(ScoreStringEvalChain):
"""A chain for scoring the output of a model on a scale of 1-10.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
"""
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return True
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
normalize_by: Optional[float] = None,
**kwargs: Any,
) -> LabeledScoreStringEvalChain:
"""Initialize the LabeledScoreStringEvalChain from an LLM.
Args:
llm (BaseLanguageModel): The LLM to use.
prompt (PromptTemplate, optional): The prompt to use.
criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use.
normalize_by (float, optional): The value to normalize the score by.
**kwargs (Any): Additional keyword arguments.
Returns:
LabeledScoreStringEvalChain: The initialized LabeledScoreStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
""" # noqa: E501
expected_input_vars = {
"prediction",
"input",
"reference",
"criteria",
}
prompt_ = prompt or SCORING_TEMPLATE_WITH_REFERENCE
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items()).strip()
criteria_str = (
CRITERIA_INSTRUCTIONS + f"{criteria_str}\n"
if criteria_str
else DEFAULT_CRITERIA
)
return cls(
llm=llm,
prompt=prompt_.partial(criteria=criteria_str),
normalize_by=normalize_by,
criterion_name="-".join(criteria_),
**kwargs,
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~smith~evaluation~test_runner_utils.py | from typing import Iterator, List
from uuid import uuid4
import pytest
from langsmith import Client as Client
from langsmith.schemas import DataType
from langchain.chains.llm import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.evaluation import EvaluatorType
from langchain.llms.openai import OpenAI
from langchain.prompts.chat import ChatPromptTemplate
from langchain.schema.messages import BaseMessage, HumanMessage
from langchain.smith import RunEvalConfig, run_on_dataset
from langchain.smith.evaluation import InputFormatError
from langchain.smith.evaluation.runner_utils import arun_on_dataset
def _check_all_feedback_passed(_project_name: str, client: Client) -> None:
# Assert that all runs completed, all feedback completed, and that the
# chain or llm passes for the feedback provided.
runs = list(client.list_runs(project_name=_project_name, execution_order=1))
if not runs:
# Queue delays. We are mainly just smoke checking rn.
return
feedback = list(client.list_feedback(run_ids=[run.id for run in runs]))
if not feedback:
return
assert all([f.score == 1 for f in feedback])
@pytest.fixture
def eval_project_name() -> str:
return f"lcp integration tests - {str(uuid4())[-8:]}"
@pytest.fixture(scope="module")
def client() -> Client:
return Client()
@pytest.fixture(
scope="module",
)
def kv_dataset_name() -> Iterator[str]:
import pandas as pd
client = Client()
df = pd.DataFrame(
{
"some_input": [
"What's the capital of California?",
"What's the capital of Nevada?",
"What's the capital of Oregon?",
"What's the capital of Washington?",
],
"other_input": [
"a",
"b",
"c",
"d",
],
"some_output": ["Sacramento", "Carson City", "Salem", "Olympia"],
"other_output": ["e", "f", "g", "h"],
}
)
uid = str(uuid4())[-8:]
_dataset_name = f"lcp kv dataset integration tests - {uid}"
client.upload_dataframe(
df,
name=_dataset_name,
input_keys=["some_input", "other_input"],
output_keys=["some_output", "other_output"],
description="Integration test dataset",
)
yield _dataset_name
def test_chat_model(
kv_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
with pytest.raises(ValueError, match="Must specify reference_key"):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
)
eval_config = RunEvalConfig(
evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA],
reference_key="some_output",
)
with pytest.raises(
InputFormatError, match="Example inputs do not match language model"
):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
)
def input_mapper(d: dict) -> List[BaseMessage]:
return [HumanMessage(content=d["some_input"])]
run_on_dataset(
client=client,
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
input_mapper=input_mapper,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_llm(kv_dataset_name: str, eval_project_name: str, client: Client) -> None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
with pytest.raises(ValueError, match="Must specify reference_key"):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
)
eval_config = RunEvalConfig(
evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA],
reference_key="some_output",
)
with pytest.raises(
InputFormatError, match="Example inputs do not match language model"
):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
)
def input_mapper(d: dict) -> str:
return d["some_input"]
run_on_dataset(
client=client,
dataset_name=kv_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
input_mapper=input_mapper,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_chain(kv_dataset_name: str, eval_project_name: str, client: Client) -> None:
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, "The answer to the {question} is: ")
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
with pytest.raises(ValueError, match="Must specify reference_key"):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=lambda: chain,
evaluation=eval_config,
client=client,
)
eval_config = RunEvalConfig(
evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA],
reference_key="some_output",
)
with pytest.raises(
InputFormatError, match="Example inputs do not match chain input keys"
):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=lambda: chain,
evaluation=eval_config,
client=client,
)
def input_mapper(d: dict) -> dict:
return {"input": d["some_input"]}
with pytest.raises(
InputFormatError,
match=" match the chain's expected input keys.",
):
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=lambda: input_mapper | chain,
client=client,
evaluation=eval_config,
)
def right_input_mapper(d: dict) -> dict:
return {"question": d["some_input"]}
run_on_dataset(
dataset_name=kv_dataset_name,
llm_or_chain_factory=lambda: right_input_mapper | chain,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
### Testing Chat Datasets
@pytest.fixture(
scope="module",
)
def chat_dataset_name() -> Iterator[str]:
def _create_message(txt: str, role: str = "human") -> List[dict]:
return [{"type": role, "data": {"content": txt}}]
import pandas as pd
client = Client()
df = pd.DataFrame(
{
"input": [
_create_message(txt)
for txt in (
"What's the capital of California?",
"What's the capital of Nevada?",
"What's the capital of Oregon?",
"What's the capital of Washington?",
)
],
"output": [
_create_message(txt, role="ai")[0]
for txt in ("Sacramento", "Carson City", "Salem", "Olympia")
],
}
)
uid = str(uuid4())[-8:]
_dataset_name = f"lcp chat dataset integration tests - {uid}"
ds = client.create_dataset(
_dataset_name, description="Integration test dataset", data_type=DataType.chat
)
for row in df.itertuples():
client.create_example(
dataset_id=ds.id,
inputs={"input": row.input},
outputs={"output": row.output},
)
yield _dataset_name
def test_chat_model_on_chat_dataset(
chat_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
dataset_name=chat_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
project_name=eval_project_name,
)
_check_all_feedback_passed(eval_project_name, client)
def test_llm_on_chat_dataset(
chat_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
dataset_name=chat_dataset_name,
llm_or_chain_factory=llm,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_chain_on_chat_dataset(chat_dataset_name: str, client: Client) -> None:
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, "The answer to the {question} is: ")
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
with pytest.raises(
ValueError, match="Cannot evaluate a chain on dataset with data_type=chat"
):
run_on_dataset(
dataset_name=chat_dataset_name,
client=client,
llm_or_chain_factory=lambda: chain,
evaluation=eval_config,
)
@pytest.fixture(
scope="module",
)
def llm_dataset_name() -> Iterator[str]:
import pandas as pd
client = Client()
df = pd.DataFrame(
{
"input": [
"What's the capital of California?",
"What's the capital of Nevada?",
"What's the capital of Oregon?",
"What's the capital of Washington?",
],
"output": ["Sacramento", "Carson City", "Salem", "Olympia"],
}
)
uid = str(uuid4())[-8:]
_dataset_name = f"lcp llm dataset integration tests - {uid}"
client.upload_dataframe(
df,
name=_dataset_name,
input_keys=["input"],
output_keys=["output"],
description="Integration test dataset",
data_type=DataType.llm,
)
yield _dataset_name
def test_chat_model_on_llm_dataset(
llm_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
client=client,
dataset_name=llm_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_llm_on_llm_dataset(
llm_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
client=client,
dataset_name=llm_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_chain_on_llm_dataset(llm_dataset_name: str, client: Client) -> None:
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, "The answer to the {question} is: ")
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
with pytest.raises(
ValueError, match="Cannot evaluate a chain on dataset with data_type=llm"
):
run_on_dataset(
client=client,
dataset_name=llm_dataset_name,
llm_or_chain_factory=lambda: chain,
evaluation=eval_config,
)
@pytest.fixture(
scope="module",
)
def kv_singleio_dataset_name() -> Iterator[str]:
import pandas as pd
client = Client()
df = pd.DataFrame(
{
"the wackiest input": [
"What's the capital of California?",
"What's the capital of Nevada?",
"What's the capital of Oregon?",
"What's the capital of Washington?",
],
"unthinkable output": ["Sacramento", "Carson City", "Salem", "Olympia"],
}
)
uid = str(uuid4())[-8:]
_dataset_name = f"lcp singleio kv dataset integration tests - {uid}"
client.upload_dataframe(
df,
name=_dataset_name,
input_keys=["the wackiest input"],
output_keys=["unthinkable output"],
description="Integration test dataset",
)
yield _dataset_name
def test_chat_model_on_kv_singleio_dataset(
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_llm_on_kv_singleio_dataset(
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=llm,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
def test_chain_on_kv_singleio_dataset(
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
) -> None:
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(llm, "The answer to the {question} is: ")
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
run_on_dataset(
dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=lambda: chain,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
@pytest.mark.asyncio
async def test_runnable_on_kv_singleio_dataset(
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
) -> None:
runnable = (
ChatPromptTemplate.from_messages([("human", "{the wackiest input}")])
| ChatOpenAI()
)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
await arun_on_dataset(
dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=runnable,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
@pytest.mark.asyncio
async def test_arb_func_on_kv_singleio_dataset(
kv_singleio_dataset_name: str, eval_project_name: str, client: Client
) -> None:
runnable = (
ChatPromptTemplate.from_messages([("human", "{the wackiest input}")])
| ChatOpenAI()
)
def my_func(x: dict) -> str:
return runnable.invoke(x).content
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.CRITERIA])
await arun_on_dataset(
dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=my_func,
client=client,
evaluation=eval_config,
project_name=eval_project_name,
tags=["shouldpass"],
)
_check_all_feedback_passed(eval_project_name, client)
| [
"some_input",
"[('human', '{the wackiest input}')]"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chat_models~cohere.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import (
BaseChatModel,
_agenerate_from_stream,
_generate_from_stream,
)
from langchain.llms.cohere import BaseCohere
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGeneration, ChatGenerationChunk, ChatResult
def get_role(message: BaseMessage) -> str:
"""Get the role of the message.
Args:
message: The message.
Returns:
The role of the message.
Raises:
ValueError: If the message is of an unknown type.
"""
if isinstance(message, ChatMessage) or isinstance(message, HumanMessage):
return "User"
elif isinstance(message, AIMessage):
return "Chatbot"
elif isinstance(message, SystemMessage):
return "System"
else:
raise ValueError(f"Got unknown type {message}")
def get_cohere_chat_request(
messages: List[BaseMessage],
*,
connectors: Optional[List[Dict[str, str]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Get the request for the Cohere chat API.
Args:
messages: The messages.
connectors: The connectors.
**kwargs: The keyword arguments.
Returns:
The request for the Cohere chat API.
"""
documents = (
None
if "source_documents" not in kwargs
else [
{
"snippet": doc.page_content,
"id": doc.metadata.get("id") or f"doc-{str(i)}",
}
for i, doc in enumerate(kwargs["source_documents"])
]
)
kwargs.pop("source_documents", None)
maybe_connectors = connectors if documents is None else None
# by enabling automatic prompt truncation, the probability of request failure is
# reduced with minimal impact on response quality
prompt_truncation = (
"AUTO" if documents is not None or connectors is not None else None
)
return {
"message": messages[0].content,
"chat_history": [
{"role": get_role(x), "message": x.content} for x in messages[1:]
],
"documents": documents,
"connectors": maybe_connectors,
"prompt_truncation": prompt_truncation,
**kwargs,
}
class ChatCohere(BaseChatModel, BaseCohere):
"""`Cohere` chat large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.chat_models import ChatCohere
from langchain.schema import HumanMessage
chat = ChatCohere(model="foo")
result = chat([HumanMessage(content="Hello")])
print(result.content)
"""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
arbitrary_types_allowed = True
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "cohere-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"temperature": self.temperature,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
stream = self.client.chat(**request, stream=True)
for data in stream:
if data.event_type == "text-generation":
delta = data.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
stream = await self.async_client.chat(**request, stream=True)
async for data in stream:
if data.event_type == "text-generation":
delta = data.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = {"documents": response.documents}
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await _agenerate_from_stream(stream_iter)
request = get_cohere_chat_request(messages, **self._default_params, **kwargs)
response = self.client.chat(**request, stream=False)
message = AIMessage(content=response.text)
generation_info = None
if hasattr(response, "documents"):
generation_info = {"documents": response.documents}
return ChatResult(
generations=[
ChatGeneration(message=message, generation_info=generation_info)
]
)
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
return len(self.client.tokenize(text).tokens)
| [
"AUTO"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~amadeus~toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.pydantic_v1 import Field
from langchain.tools import BaseTool
from langchain.tools.amadeus.closest_airport import AmadeusClosestAirport
from langchain.tools.amadeus.flight_search import AmadeusFlightSearch
from langchain.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusToolkit(BaseToolkit):
"""Toolkit for interacting with Amadeus which offers APIs for travel search."""
client: Client = Field(default_factory=authenticate)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
AmadeusClosestAirport(),
AmadeusFlightSearch(),
]
| [] |
2024-01-10 | RohanDey02/langchain | templates~rag-pinecone-rerank~rag_pinecone_rerank~chain.py | import os
import pinecone
from operator import itemgetter
from langchain.vectorstores import Pinecone
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.output_parser import StrOutputParser
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CohereRerank
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
if os.environ.get("PINECONE_API_KEY", None) is None:
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
### Ingest code - you may need to run this the first time
# Load
# from langchain.document_loaders import WebBaseLoader
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
# data = loader.load()
# # Split
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
# all_splits = text_splitter.split_documents(data)
# # Add to vectorDB
# vectorstore = Pinecone.from_documents(
# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
# )
# retriever = vectorstore.as_retriever()
vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings())
# Get k=10 docs
retriever = vectorstore.as_retriever(search_kwargs={"k":10})
# Re-rank
compressor = CohereRerank()
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# RAG
model = ChatOpenAI()
chain = (
RunnableParallel({"context": compression_retriever, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
)
| [
"Answer the question based only on the following context:\n{context}\nQuestion: {question}\n"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~gmail~toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.pydantic_v1 import Field
from langchain.tools import BaseTool
from langchain.tools.gmail.create_draft import GmailCreateDraft
from langchain.tools.gmail.get_message import GmailGetMessage
from langchain.tools.gmail.get_thread import GmailGetThread
from langchain.tools.gmail.search import GmailSearch
from langchain.tools.gmail.send_message import GmailSendMessage
from langchain.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
SCOPES = ["https://mail.google.com/"]
class GmailToolkit(BaseToolkit):
"""Toolkit for interacting with Gmail.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
For example, this toolkit can be used to send emails on behalf of the
associated account.
See https://python.langchain.com/docs/security for more information.
"""
api_resource: Resource = Field(default_factory=build_resource_service)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
GmailCreateDraft(api_resource=self.api_resource),
GmailSendMessage(api_resource=self.api_resource),
GmailSearch(api_resource=self.api_resource),
GmailGetMessage(api_resource=self.api_resource),
GmailGetThread(api_resource=self.api_resource),
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~jira~toolkit.py | from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain.tools.jira.tool import JiraAction
from langchain.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~clarifai.py | from __future__ import annotations
import logging
import os
import traceback
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Iterable, List, Optional, Tuple
import requests
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger(__name__)
class Clarifai(VectorStore):
"""`Clarifai AI` vector store.
To use, you should have the ``clarifai`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Clarifai
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Clarifai("langchain_store", embeddings.embed_query)
"""
def __init__(
self,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
) -> None:
"""Initialize with Clarifai client.
Args:
user_id (Optional[str], optional): User ID. Defaults to None.
app_id (Optional[str], optional): App ID. Defaults to None.
pat (Optional[str], optional): Personal access token. Defaults to None.
number_of_docs (Optional[int], optional): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str], optional): API base. Defaults to None.
Raises:
ValueError: If user ID, app ID or personal access token is not provided.
"""
try:
from clarifai.auth.helper import DEFAULT_BASE, ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
if api_base is None:
self._api_base = DEFAULT_BASE
self._user_id = user_id or os.environ.get("CLARIFAI_USER_ID")
self._app_id = app_id or os.environ.get("CLARIFAI_APP_ID")
self._pat = pat or os.environ.get("CLARIFAI_PAT")
if self._user_id is None or self._app_id is None or self._pat is None:
raise ValueError(
"Could not find CLARIFAI_USER_ID, CLARIFAI_APP_ID or\
CLARIFAI_PAT in your environment. "
"Please set those env variables with a valid user ID, \
app ID and personal access token \
from https://clarifai.com/settings/security."
)
self._auth = ClarifaiAuthHelper(
user_id=self._user_id,
app_id=self._app_id,
pat=self._pat,
base=self._api_base,
)
self._stub = create_stub(self._auth)
self._userDataObject = self._auth.get_user_app_id_proto()
self._number_of_docs = number_of_docs
def _post_texts_as_inputs(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[str]:
"""Post text to Clarifai and return the ID of the input.
Args:
text (str): Text to post.
metadata (dict): Metadata to post.
Returns:
str: ID of the input.
"""
try:
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf.struct_pb2 import Struct # type: ignore
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
if metadatas is not None:
assert len(list(texts)) == len(
metadatas
), "Number of texts and metadatas should be the same."
inputs = []
for idx, text in enumerate(texts):
if metadatas is not None:
input_metadata = Struct()
input_metadata.update(metadatas[idx])
inputs.append(
resources_pb2.Input(
data=resources_pb2.Data(
text=resources_pb2.Text(raw=text),
metadata=input_metadata,
)
)
)
post_inputs_response = self._stub.PostInputs(
service_pb2.PostInputsRequest(
user_app_id=self._userDataObject,
inputs=inputs,
)
)
if post_inputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_inputs_response.status)
raise Exception(
"Post inputs failed, status: " + post_inputs_response.status.description
)
input_ids = []
for input in post_inputs_response.inputs:
input_ids.append(input.id)
return input_ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
ltexts = list(texts)
length = len(ltexts)
assert length > 0, "No texts provided to add to the vectorstore."
if metadatas is not None:
assert length == len(
metadatas
), "Number of texts and metadatas should be the same."
batch_size = 32
input_ids = []
for idx in range(0, length, batch_size):
try:
batch_texts = ltexts[idx : idx + batch_size]
batch_metadatas = (
metadatas[idx : idx + batch_size] if metadatas else None
)
result_ids = self._post_texts_as_inputs(batch_texts, batch_metadatas)
input_ids.extend(result_ids)
logger.debug(f"Input {result_ids} posted successfully.")
except Exception as error:
logger.warning(f"Post inputs failed: {error}")
traceback.print_exc()
return input_ids
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
try:
from clarifai_grpc.grpc.api import resources_pb2, service_pb2
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf import json_format # type: ignore
from google.protobuf.struct_pb2 import Struct # type: ignore
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
# Get number of docs to return
if self._number_of_docs is not None:
k = self._number_of_docs
req = service_pb2.PostAnnotationsSearchesRequest(
user_app_id=self._userDataObject,
searches=[
resources_pb2.Search(
query=resources_pb2.Query(
ranks=[
resources_pb2.Rank(
annotation=resources_pb2.Annotation(
data=resources_pb2.Data(
text=resources_pb2.Text(raw=query),
)
)
)
]
)
)
],
pagination=service_pb2.Pagination(page=1, per_page=k),
)
# Add filter by metadata if provided.
if filter is not None:
search_metadata = Struct()
search_metadata.update(filter)
f = req.searches[0].query.filters.add()
f.annotation.data.metadata.update(search_metadata)
post_annotations_searches_response = self._stub.PostAnnotationsSearches(req)
# Check if search was successful
if post_annotations_searches_response.status.code != status_code_pb2.SUCCESS:
raise Exception(
"Post searches failed, status: "
+ post_annotations_searches_response.status.description
)
# Retrieve hits
hits = post_annotations_searches_response.hits
executor = ThreadPoolExecutor(max_workers=10)
def hit_to_document(hit: resources_pb2.Hit) -> Tuple[Document, float]:
metadata = json_format.MessageToDict(hit.input.data.metadata)
h = {"Authorization": f"Key {self._auth.pat}"}
request = requests.get(hit.input.data.text.url, headers=h)
# override encoding by real educated guess as provided by chardet
request.encoding = request.apparent_encoding
requested_text = request.text
logger.debug(
f"\tScore {hit.score:.2f} for annotation: {hit.annotation.id}\
off input: {hit.input.id}, text: {requested_text[:125]}"
)
return (Document(page_content=requested_text, metadata=metadata), hit.score)
# Iterate over hits and retrieve metadata and text
futures = [executor.submit(hit_to_document, hit) for hit in hits]
docs_and_scores = [future.result() for future in futures]
return docs_and_scores
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
pat (Optional[str]): Personal access token. Defaults to None.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str]): API base. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
clarifai_vector_db = cls(
user_id=user_id,
app_id=app_id,
pat=pat,
number_of_docs=number_of_docs,
api_base=api_base,
)
clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas)
return clarifai_vector_db
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
pat: Optional[str] = None,
number_of_docs: Optional[int] = None,
api_base: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
pat (Optional[str]): Personal access token. Defaults to None.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str]): API base. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
user_id=user_id,
app_id=app_id,
texts=texts,
pat=pat,
number_of_docs=number_of_docs,
api_base=api_base,
metadatas=metadatas,
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~qa~generate_chain.py | """LLM Chain for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.generate_prompt import PROMPT
from langchain.output_parsers.regex import RegexParser
from langchain.pydantic_v1 import Field
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.output_parser import BaseLLMOutputParser
_QA_OUTPUT_PARSER = RegexParser(
regex=r"QUESTION: (.*?)\n+ANSWER: (.*)", output_keys=["query", "answer"]
)
class QAGenerateChain(LLMChain):
"""LLM Chain for generating examples for question answering."""
output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER)
output_key: str = "qa_pairs"
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""
return cls(llm=llm, prompt=PROMPT, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~file_management~toolkit.py | from __future__ import annotations
from typing import List, Optional
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.pydantic_v1 import root_validator
from langchain.tools import BaseTool
from langchain.tools.file_management.copy import CopyFileTool
from langchain.tools.file_management.delete import DeleteFileTool
from langchain.tools.file_management.file_search import FileSearchTool
from langchain.tools.file_management.list_dir import ListDirectoryTool
from langchain.tools.file_management.move import MoveFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
_FILE_TOOLS = {
# "Type[Runnable[Any, Any]]" has no attribute "__fields__" [attr-defined]
tool_cls.__fields__["name"].default: tool_cls # type: ignore[attr-defined]
for tool_cls in [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with local files.
*Security Notice*: This toolkit provides methods to interact with local files.
If providing this toolkit to an agent on an LLM, ensure you scope
the agent's permissions to only include the necessary permissions
to perform the desired operations.
By **default** the agent will have access to all files within
the root dir and will be able to Copy, Delete, Move, Read, Write
and List files in that directory.
Consider the following:
- Limit access to particular directories using `root_dir`.
- Use filesystem permissions to restrict access and permissions to only
the files and directories required by the agent.
- Limit the tools available to the agent to only the file operations
necessary for the agent's intended use.
- Sandbox the agent by running it in a container.
See https://python.langchain.com/docs/security for more information.
"""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@root_validator
def validate_tools(cls, values: dict) -> dict:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore
return tools
__all__ = ["FileManagementToolkit"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~self_query~timescalevector.py | from __future__ import annotations
from typing import TYPE_CHECKING, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from timescale_vector import client
class TimescaleVectorTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
COMPARATOR_MAP = {
Comparator.EQ: "==",
Comparator.GT: ">",
Comparator.GTE: ">=",
Comparator.LT: "<",
Comparator.LTE: "<=",
}
OPERATOR_MAP = {Operator.AND: "AND", Operator.OR: "OR", Operator.NOT: "NOT"}
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
if isinstance(func, Operator):
value = self.OPERATOR_MAP[func.value] # type: ignore
elif isinstance(func, Comparator):
value = self.COMPARATOR_MAP[func.value] # type: ignore
return f"{value}"
def visit_operation(self, operation: Operation) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
return client.Predicates(*args, operator=self._format_func(operation.operator))
def visit_comparison(self, comparison: Comparison) -> client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
"Cannot import timescale-vector. Please install with `pip install "
"timescale-vector`."
) from e
return client.Predicates(
(
comparison.attribute,
self._format_func(comparison.comparator),
comparison.value,
)
)
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"predicates": structured_query.filter.accept(self)}
return structured_query.query, kwargs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~azure_blob_storage_file.py | import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class AzureBlobStorageFileLoader(BaseLoader):
"""Load from `Azure Blob Storage` files."""
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.blob = blob_name
"""Blob name."""
def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import BlobClient
except ImportError as exc:
raise ImportError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
client = BlobClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container, blob_name=self.blob
)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.container}/{self.blob}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f"{file_path}", "wb") as file:
blob_data = client.download_blob()
blob_data.readinto(file)
loader = UnstructuredFileLoader(file_path)
return loader.load()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~conversational~output_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~docstore~test_inmemory.py | """Test in memory docstore."""
import pytest
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
def test_document_found() -> None:
"""Test document found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("foo")
assert isinstance(output, Document)
assert output.page_content == "bar"
def test_document_not_found() -> None:
"""Test when document is not found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("bar")
assert output == "ID bar not found."
def test_adding_document() -> None:
"""Test that documents are added correctly."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
new_dict = {"bar": Document(page_content="foo")}
docstore.add(new_dict)
# Test that you can find new document.
foo_output = docstore.search("bar")
assert isinstance(foo_output, Document)
assert foo_output.page_content == "foo"
# Test that old document is the same.
bar_output = docstore.search("foo")
assert isinstance(bar_output, Document)
assert bar_output.page_content == "bar"
def test_adding_document_already_exists() -> None:
"""Test that error is raised if document id already exists."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
new_dict = {"foo": Document(page_content="foo")}
# Test that error is raised.
with pytest.raises(ValueError):
docstore.add(new_dict)
# Test that old document is the same.
bar_output = docstore.search("foo")
assert isinstance(bar_output, Document)
assert bar_output.page_content == "bar"
def test_default_dict_value_in_constructor() -> None:
"""Test proper functioning if no _dict is provided to the constructor."""
docstore = InMemoryDocstore()
docstore.add({"foo": Document(page_content="bar")})
output = docstore.search("foo")
assert isinstance(output, Document)
assert output.page_content == "bar"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~googledrive.py | # Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.pydantic_v1 import BaseModel, root_validator, validator
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
class GoogleDriveLoader(BaseLoader, BaseModel):
"""Load Google Docs from `Google Drive`."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
"""Path to the service account key file."""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
"""Path to the credentials file."""
token_path: Path = Path.home() / ".credentials" / "token.json"
"""Path to the token file."""
folder_id: Optional[str] = None
"""The folder id to load from."""
document_ids: Optional[List[str]] = None
"""The document ids to load from."""
file_ids: Optional[List[str]] = None
"""The file ids to load from."""
recursive: bool = False
"""Whether to load recursively. Only applies when folder_id is given."""
file_types: Optional[Sequence[str]] = None
"""The file types to load. Only applies when folder_id is given."""
load_trashed_files: bool = False
"""Whether to load trashed files. Only applies when folder_id is given."""
# NOTE(MthwRobinson) - changing the file_loader_cls to type here currently
# results in pydantic validation errors
file_loader_cls: Any = None
"""The file loader class to use."""
file_loader_kwargs: Dict["str", Any] = {}
"""The file loader kwargs to use."""
@root_validator
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
file_types = values.get("file_types")
if file_types:
if values.get("document_ids") or values.get("file_ids"):
raise ValueError(
"file_types can only be given when folder_id is given,"
" (not when document_ids or file_ids are given)."
)
type_mapping = {
"document": "application/vnd.google-apps.document",
"sheet": "application/vnd.google-apps.spreadsheet",
"pdf": "application/pdf",
}
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()])
full_names = ", ".join([f"'{x}'" for x in type_mapping.values()])
for file_type in file_types:
if file_type not in allowed_types:
raise ValueError(
f"Given file type {file_type} is not supported. "
f"Supported values are: {short_names}; and "
f"their full-form names: {full_names}"
)
# replace short-form file types by full-form file types
def full_form(x: str) -> str:
return type_mapping[x] if x in type_mapping else x
values["file_types"] = [full_form(file_type) for file_type in file_types]
return values
@validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth import default
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run "
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib` "
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
creds, project = default()
creds = creds.with_scopes(SCOPES)
# no need to write to file
if creds:
return creds
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
if not values:
continue # empty sheet
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = (
service.files()
.get(fileId=id, supportsAllDrives=True, fields="modifiedTime,name")
.execute()
)
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
"when": f"{file.get('modifiedTime')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(
self, folder_id: str, *, file_types: Optional[Sequence[str]] = None
) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
# If file types filter is provided, we'll filter by the file type.
if file_types:
_files = [f for f in files if f["mimeType"] in file_types] # type: ignore
else:
_files = files
returns = []
for file in _files:
if file["trashed"] and not self.load_trashed_files:
continue
elif file["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(file["id"])) # type: ignore
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore
elif (
file["mimeType"] == "application/pdf"
or self.file_loader_cls is not None
):
returns.extend(self._load_file_from_id(file["id"])) # type: ignore
else:
pass
return returns
def _fetch_files_recursive(
self, service: Any, folder_id: str
) -> List[Dict[str, Union[str, List[str]]]]:
"""Fetch all files and subfolders recursively."""
results = (
service.files()
.list(
q=f"'{folder_id}' in parents",
pageSize=1000,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields="nextPageToken, files(id, name, mimeType, parents, trashed)",
)
.execute()
)
files = results.get("files", [])
returns = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
if self.recursive:
returns.extend(self._fetch_files_recursive(service, file["id"]))
else:
returns.append(file)
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if self.file_loader_cls is not None:
fh.seek(0)
loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs)
docs = loader.load()
for doc in docs:
doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view"
return docs
else:
from PyPDF2 import PdfReader
content = fh.getvalue()
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
return docs
def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder(
self.folder_id, file_types=self.file_types
)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~prompts~test_pipeline_prompt.py | from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import PromptTemplate
def test_get_input_variables() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
assert pipeline_prompt.input_variables == ["foo"]
def test_simple_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim")
assert output == "jim"
def test_multi_variable_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("okay {bar} {baz}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim", baz="deep")
assert output == "okay jim deep"
def test_partial_with_chat_prompts() -> None:
prompt_a = ChatPromptTemplate(
input_variables=["foo"], messages=[MessagesPlaceholder(variable_name="foo")]
)
prompt_b = ChatPromptTemplate.from_template("jim {bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_a, pipeline_prompts=[("foo", prompt_b)]
)
assert pipeline_prompt.input_variables == ["bar"]
output = pipeline_prompt.format_prompt(bar="okay")
assert output.to_messages()[0].content == "jim okay"
| [
"{foo}",
"[('bar', PLACEHOLDER)]",
"jim {bar}",
"[('foo', PLACEHOLDER)]",
"{bar}",
"okay {bar} {baz}"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_pai_eas_endpoint.py | """Test PaiEasEndpoint API wrapper."""
import os
from typing import Generator
from langchain.llms.pai_eas_endpoint import PaiEasEndpoint
def test_pai_eas_v1_call() -> None:
"""Test valid call to PAI-EAS Service."""
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
version="1.0",
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_pai_eas_v2_call() -> None:
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
version="2.0",
)
output = llm("Say foo:")
assert isinstance(output, str)
def test_pai_eas_v1_streaming() -> None:
"""Test streaming call to PAI-EAS Service."""
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
version="1.0",
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
def test_pai_eas_v2_streaming() -> None:
llm = PaiEasEndpoint(
eas_service_url=os.getenv("EAS_SERVICE_URL"),
eas_service_token=os.getenv("EAS_SERVICE_TOKEN"),
version="2.0",
)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~bilibili.py | import json
import re
import warnings
from typing import List, Tuple
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BiliBiliLoader(BaseLoader):
"""Load `BiliBili` video transcripts."""
def __init__(self, video_urls: List[str]):
"""Initialize with bilibili url.
Args:
video_urls: List of bilibili urls.
"""
self.video_urls = video_urls
def load(self) -> List[Document]:
"""Load Documents from bilibili url."""
results = []
for url in self.video_urls:
transcript, video_info = self._get_bilibili_subs_and_info(url)
doc = Document(page_content=transcript, metadata=video_info)
results.append(doc)
return results
def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]:
try:
from bilibili_api import sync, video
except ImportError:
raise ImportError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
bvid = re.search(r"BV\w+", url)
if bvid is not None:
v = video.Video(bvid=bvid.group())
else:
aid = re.search(r"av[0-9]+", url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]))
except AttributeError:
raise ValueError(f"{url} is not bilibili url.")
else:
raise ValueError(f"{url} is not bilibili url.")
video_info = sync(v.get_info())
video_info.update({"url": url})
sub = sync(v.get_subtitle(video_info["cid"]))
# Get subtitle url
sub_list = sub["subtitles"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
if not sub_url.startswith("http"):
sub_url = "https:" + sub_url
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
raw_transcript_with_meta_info = (
f"Video Title: {video_info['title']},"
f"description: {video_info['desc']}\n\n"
f"Transcript: {raw_transcript}"
)
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ""
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_tongyi.py | """Test Tongyi API wrapper."""
from langchain.llms.tongyi import Tongyi
from langchain.schema import LLMResult
def test_tongyi_call() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm("who are you")
assert isinstance(output, str)
def test_tongyi_generate() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_generate_stream() -> None:
"""Test valid call to tongyi."""
llm = Tongyi(streaming=True)
output = llm.generate(["who are you"])
print(output)
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
| [] |
2024-01-10 | RohanDey02/langchain | templates~cassandra-entomology-rag~setup.py | import os
import cassio
from langchain.vectorstores import Cassandra
from langchain.embeddings import OpenAIEmbeddings
use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0"))
if use_cassandra:
from cassandra_entomology_rag.cassandra_cluster_init import get_cassandra_connection
session, keyspace = get_cassandra_connection()
cassio.init(
session=session,
keyspace=keyspace,
)
else:
cassio.init(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
database_id=os.environ["ASTRA_DB_ID"],
keyspace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
if __name__ == '__main__':
embeddings = OpenAIEmbeddings()
vector_store = Cassandra(
session=None,
keyspace=None,
embedding=embeddings,
table_name="langserve_rag_demo",
)
#
lines = [
l.strip()
for l in open("sources.txt").readlines()
if l.strip()
if l[0] != "#"
]
# deterministic IDs to prevent duplicates on multiple runs
ids = [
"_".join(l.split(" ")[:2]).lower().replace(":", "")
for l in lines
]
#
vector_store.add_texts(texts=lines, ids=ids)
print(f"Done ({len(lines)} lines inserted).")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~retrievers~test_remote_retriever.py | from typing import Any, Dict
from pytest_mock import MockerFixture
from langchain.retrievers import RemoteLangChainRetriever
from langchain.schema import Document
class MockResponse:
def __init__(self, json_data: Dict, status_code: int):
self.json_data = json_data
self.status_code = status_code
def json(self) -> Dict:
return self.json_data
def mocked_requests_post(*args: Any, **kwargs: Any) -> MockResponse:
return MockResponse(
json_data={
"response": [
{
"page_content": "I like apples",
"metadata": {
"test": 0,
},
},
{
"page_content": "I like pineapples",
"metadata": {
"test": 1,
},
},
]
},
status_code=200,
)
def test_RemoteLangChainRetriever_get_relevant_documents(
mocker: MockerFixture,
) -> None:
mocker.patch("requests.post", side_effect=mocked_requests_post)
remote_langchain_retriever = RemoteLangChainRetriever(
url="http://localhost:8000",
)
response = remote_langchain_retriever.get_relevant_documents("I like apples")
want = [
Document(page_content="I like apples", metadata={"test": 0}),
Document(page_content="I like pineapples", metadata={"test": 1}),
]
assert len(response) == len(want)
for r, w in zip(response, want):
assert r.page_content == w.page_content
assert r.metadata == w.metadata
# TODO: _aget_relevant_documents test
| [
"I like pineapples",
"I like apples"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.25.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
from langchain.embeddings import VertexAIEmbeddings
def test_embedding_documents() -> None:
documents = ["foo bar"]
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
def test_embedding_query() -> None:
document = "foo bar"
model = VertexAIEmbeddings()
output = model.embed_query(document)
assert len(output) == 768
def test_paginated_texts() -> None:
documents = [
"foo bar",
"foo baz",
"bar foo",
"baz foo",
"bar bar",
"foo foo",
"baz baz",
"baz bar",
]
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 8
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~vectorstores~redis~test_redis_schema.py | import pytest
from langchain.vectorstores.redis.schema import (
FlatVectorField,
HNSWVectorField,
NumericFieldSchema,
RedisModel,
RedisVectorField,
TagFieldSchema,
TextFieldSchema,
read_schema,
)
def test_text_field_schema_creation() -> None:
"""Test creating a text field with default parameters."""
field = TextFieldSchema(name="example")
assert field.name == "example"
assert field.weight == 1 # default value
assert field.no_stem is False # default value
def test_tag_field_schema_creation() -> None:
"""Test creating a tag field with custom parameters."""
field = TagFieldSchema(name="tag", separator="|")
assert field.name == "tag"
assert field.separator == "|"
def test_numeric_field_schema_creation() -> None:
"""Test creating a numeric field with default parameters."""
field = NumericFieldSchema(name="numeric")
assert field.name == "numeric"
assert field.no_index is False # default value
def test_redis_vector_field_validation() -> None:
"""Test validation for RedisVectorField's datatype."""
from langchain.pydantic_v1 import ValidationError
with pytest.raises(ValidationError):
RedisVectorField(
name="vector", dims=128, algorithm="INVALID_ALGO", datatype="INVALID_TYPE"
)
# Test creating a valid RedisVectorField
vector_field = RedisVectorField(
name="vector", dims=128, algorithm="SOME_ALGO", datatype="FLOAT32"
)
assert vector_field.datatype == "FLOAT32"
def test_flat_vector_field_defaults() -> None:
"""Test defaults for FlatVectorField."""
flat_vector_field_data = {
"name": "example",
"dims": 100,
"algorithm": "FLAT",
}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.datatype == "FLOAT32"
assert flat_vector.distance_metric == "COSINE"
assert flat_vector.initial_cap is None
assert flat_vector.block_size is None
def test_flat_vector_field_optional_values() -> None:
"""Test optional values for FlatVectorField."""
flat_vector_field_data = {
"name": "example",
"dims": 100,
"algorithm": "FLAT",
"initial_cap": 1000,
"block_size": 10,
}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.initial_cap == 1000
assert flat_vector.block_size == 10
def test_hnsw_vector_field_defaults() -> None:
"""Test defaults for HNSWVectorField."""
hnsw_vector_field_data = {
"name": "example",
"dims": 100,
"algorithm": "HNSW",
}
hnsw_vector = HNSWVectorField(**hnsw_vector_field_data)
assert hnsw_vector.datatype == "FLOAT32"
assert hnsw_vector.distance_metric == "COSINE"
assert hnsw_vector.initial_cap is None
assert hnsw_vector.m == 16
assert hnsw_vector.ef_construction == 200
assert hnsw_vector.ef_runtime == 10
assert hnsw_vector.epsilon == 0.01
def test_hnsw_vector_field_optional_values() -> None:
"""Test optional values for HNSWVectorField."""
hnsw_vector_field_data = {
"name": "example",
"dims": 100,
"algorithm": "HNSW",
"initial_cap": 2000,
"m": 10,
"ef_construction": 250,
"ef_runtime": 15,
"epsilon": 0.05,
}
hnsw_vector = HNSWVectorField(**hnsw_vector_field_data)
assert hnsw_vector.initial_cap == 2000
assert hnsw_vector.m == 10
assert hnsw_vector.ef_construction == 250
assert hnsw_vector.ef_runtime == 15
assert hnsw_vector.epsilon == 0.05
def test_read_schema_dict_input() -> None:
"""Test read_schema with dict input."""
index_schema = {
"text": [{"name": "content"}],
"tag": [{"name": "tag"}],
"vector": [{"name": "content_vector", "dims": 100, "algorithm": "FLAT"}],
}
output = read_schema(index_schema=index_schema) # type: ignore
assert output == index_schema
def test_redis_model_creation() -> None:
# Test creating a RedisModel with a mixture of fields
redis_model = RedisModel(
text=[TextFieldSchema(name="content")],
tag=[TagFieldSchema(name="tag")],
numeric=[NumericFieldSchema(name="numeric")],
vector=[FlatVectorField(name="flat_vector", dims=128, algorithm="FLAT")],
)
assert redis_model.text[0].name == "content"
assert redis_model.tag[0].name == "tag" # type: ignore
assert redis_model.numeric[0].name == "numeric" # type: ignore
assert redis_model.vector[0].name == "flat_vector" # type: ignore
# Test the content_vector property
with pytest.raises(ValueError):
_ = (
redis_model.content_vector
) # this should fail because there's no field with name 'content_vector_key'
def test_read_schema() -> None:
# Test the read_schema function with invalid input
with pytest.raises(TypeError):
read_schema(index_schema=None) # non-dict and non-str/pathlike input
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~graphs~test_falkordb.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain.graphs import FalkorDBGraph
class TestFalkorDB(unittest.TestCase):
def setUp(self) -> None:
self.host = "localhost"
self.graph = "test_falkordb"
self.port = 6379
@patch("redis.Redis")
def test_init(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
@patch("redis.Redis")
def test_execute(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
graph = FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
query = "RETURN 1"
result = graph.query(query)
self.assertIsInstance(result, MagicMock)
@patch("redis.Redis")
def test_refresh_schema(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
graph = FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
graph.refresh_schema()
self.assertNotEqual(graph.get_schema, "")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~utilities~test_searchapi.py | """Integration tests for SearchApi"""
import pytest
from langchain.utilities.searchapi import SearchApiAPIWrapper
def test_call() -> None:
"""Test that call gives correct answer."""
search = SearchApiAPIWrapper()
output = search.run("What is the capital of Lithuania?")
assert "Vilnius" in output
def test_results() -> None:
"""Test that call gives correct answer."""
search = SearchApiAPIWrapper()
output = search.results("What is the capital of Lithuania?")
assert "Vilnius" in output["answer_box"]["answer"]
assert "Vilnius" in output["answer_box"]["snippet"]
assert "Vilnius" in output["knowledge_graph"]["description"]
assert "Vilnius" in output["organic_results"][0]["snippet"]
def test_results_with_custom_params() -> None:
"""Test that call gives correct answer with custom params."""
search = SearchApiAPIWrapper()
output = search.results(
"cafeteria",
hl="es",
gl="es",
google_domain="google.es",
location="Madrid, Spain",
)
assert "Madrid" in output["search_information"]["detected_location"]
def test_scholar_call() -> None:
"""Test that call gives correct answer for scholar search."""
search = SearchApiAPIWrapper(engine="google_scholar")
output = search.run("large language models")
assert "state of large language models and their applications" in output
def test_jobs_call() -> None:
"""Test that call gives correct answer for jobs search."""
search = SearchApiAPIWrapper(engine="google_jobs")
output = search.run("AI")
assert "years of experience" in output
@pytest.mark.asyncio
async def test_async_call() -> None:
"""Test that call gives the correct answer."""
search = SearchApiAPIWrapper()
output = await search.arun("What is Obama's full name?")
assert "Barack Hussein Obama II" in output
@pytest.mark.asyncio
async def test_async_results() -> None:
"""Test that call gives the correct answer."""
search = SearchApiAPIWrapper()
output = await search.aresults("What is Obama's full name?")
assert "Barack Hussein Obama II" in output["knowledge_graph"]["description"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~cohere_rag_retriever.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document, HumanMessage
if TYPE_CHECKING:
from langchain.schema.messages import BaseMessage
def _get_docs(response: Any) -> List[Document]:
return [
Document(page_content=doc["snippet"], metadata=doc)
for doc in response.generation_info["documents"]
]
class CohereRagRetriever(BaseRetriever):
"""`ChatGPT plugin` retriever."""
top_k: int = 3
"""Number of documents to return."""
connectors: List[Dict] = Field(default_factory=lambda: [{"id": "web-search"}])
"""
When specified, the model's reply will be enriched with information found by
querying each of the connectors (RAG). These will be returned as langchain
documents.
Currently only accepts {"id": "web-search"}.
"""
llm: BaseChatModel
"""Cohere ChatModel to use."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
"""Allow arbitrary types."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = self.llm.generate(
messages,
connectors=self.connectors,
callbacks=run_manager.get_child(),
**kwargs,
).generations[0][0]
return _get_docs(res)[: self.top_k]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = (
await self.llm.agenerate(
messages,
connectors=self.connectors,
callbacks=run_manager.get_child(),
**kwargs,
)
).generations[0][0]
return _get_docs(res)[: self.top_k]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chat_models~bedrock.py | from typing import Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.anthropic import convert_messages_to_prompt_anthropic
from langchain.chat_models.base import BaseChatModel
from langchain.llms.bedrock import BedrockBase
from langchain.pydantic_v1 import Extra
from langchain.schema.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain.schema.output import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
class ChatPromptAdapter:
"""Adapter class to prepare the inputs from Langchain to prompt format
that Chat model expects.
"""
@classmethod
def convert_messages_to_prompt(
cls, provider: str, messages: List[BaseMessage]
) -> str:
if provider == "anthropic":
prompt = convert_messages_to_prompt_anthropic(messages=messages)
else:
raise NotImplementedError(
f"Provider {provider} model does not support chat."
)
return prompt
class BedrockChat(BaseChatModel, BedrockBase):
"""A chat model that uses the Bedrock API."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "amazon_bedrock_chat"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
for chunk in self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
params: Dict[str, Any] = {**kwargs}
if stop:
params["stop_sequences"] = stop
completion = self._prepare_input_and_invoke(
prompt=prompt, stop=stop, run_manager=run_manager, **params
)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~wikipedia.py | from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaLoader(BaseLoader):
"""Load from `Wikipedia`.
The hard limit on the number of downloaded Documents is 300 for now.
Each wiki page represents one Document.
"""
def __init__(
self,
query: str,
lang: str = "en",
load_max_docs: Optional[int] = 100,
load_all_available_meta: Optional[bool] = False,
doc_content_chars_max: Optional[int] = 4000,
):
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
def load(self) -> List[Document]:
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(
lang=self.lang,
top_k_results=self.load_max_docs,
load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max,
)
docs = client.load(self.query)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~comparison~eval_chain.py | """Base classes for comparing the output of two models."""
from __future__ import annotations
import logging
import re
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.manager import Callbacks
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain.chains.llm import LLMChain
from langchain.chat_models.azure_openai import AzureChatOpenAI
from langchain.chat_models.openai import ChatOpenAI
from langchain.evaluation.comparison.prompt import (
COMPARISON_TEMPLATE,
COMPARISON_TEMPLATE_WITH_REFERENCE,
CRITERIA_INSTRUCTIONS,
)
from langchain.evaluation.criteria.eval_chain import (
CRITERIA_TYPE,
Criteria,
)
from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import Extra, Field
from langchain.schema import RUN_KEY, BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel
logger = logging.getLogger(__name__)
_FIND_DOUBLE_BRACKETS = re.compile(r"\[\[(.*?)\]\]")
_SUPPORTED_CRITERIA = {
Criteria.CONCISENESS: "Is the submission concise and to the point?",
Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?",
Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?",
Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?",
Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?",
Criteria.MALICIOUSNESS: "Is the submission malicious in any way?",
Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?",
Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?",
Criteria.MISOGYNY: "Is the submission misogynistic or sexist?",
Criteria.CRIMINALITY: "Is the submission criminal in any way?",
Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?",
Criteria.DEPTH: "Does the submission demonstrate depth of thought?",
Criteria.CREATIVITY: "Does the submission demonstrate novelty or unique ideas?",
Criteria.DETAIL: "Does the submission demonstrate attention to detail?",
}
def resolve_pairwise_criteria(
criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]]
) -> dict:
"""Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional):
The criteria to use.
Returns:
dict: The resolved criteria.
"""
if criteria is None:
_default_criteria = [
Criteria.HELPFULNESS,
Criteria.RELEVANCE,
Criteria.CORRECTNESS,
Criteria.DEPTH,
]
return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria}
elif isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
if criteria in _SUPPORTED_CRITERIA:
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
else:
criteria_ = {criteria: ""}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
elif isinstance(criteria, (list, tuple)):
criteria_ = {
k: v
for criterion in criteria
for k, v in resolve_pairwise_criteria(criterion).items()
}
else:
if not criteria:
raise ValueError(
"Criteria cannot be empty. "
"Please provide a criterion name or a mapping of the criterion name"
" to its description."
)
criteria_ = dict(criteria)
return criteria_
class PairwiseStringResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the PairwiseStringEvalChain.
Attributes:
_type (str): The type of the output parser.
"""
@property
def _type(self) -> str:
"""Return the type of the output parser.
Returns:
str: The type of the output parser.
"""
return "pairwise_string_result"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in {"A", "B", "C"}:
raise ValueError(
f"Invalid output: {text}. "
"Output must contain a double bracketed string\
with the verdict 'A', 'B', or 'C'."
)
# C means the models are tied. Return 'None' meaning no preference
verdict_ = None if verdict == "C" else verdict
score = {
"A": 1,
"B": 0,
"C": 0.5,
}[verdict]
return {
"reasoning": text,
"value": verdict_,
"score": score,
}
class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
"""A chain for comparing two outputs, such as the outputs
of two models, prompts, or outputs of a single model on similar inputs.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
Example:
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.comparison import PairwiseStringEvalChain
>>> llm = ChatOpenAI(temperature=0, model_name="gpt-4")
>>> chain = PairwiseStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_string_pairs(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... prediction_b = (
... "The chemical formula for water is H2O, which means"
... " there are two hydrogen atoms and one oxygen atom."
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result)
# {
# "value": "B",
# "comment": "Both responses accurately state"
# " that the chemical formula for water is H2O."
# " However, Response B provides additional information"
# . " by explaining what the formula means.\\n[[B]]"
# }
"""
output_key: str = "results" #: :meta private:
output_parser: BaseOutputParser = Field(
default_factory=PairwiseStringResultOutputParser
)
class Config:
"""Configuration for the PairwiseStringEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return False
@property
def requires_input(self) -> bool:
"""Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
"""
return True
@property
def _skip_reference_warning(self) -> str:
"""Return the warning to show when reference is ignored.
Returns:
str: The warning to show when reference is ignored.
"""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use a reference, use the LabeledPairwiseStringEvalChain"
" (EvaluatorType.LABELED_PAIRWISE_STRING) instead."
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
**kwargs: Any,
) -> PairwiseStringEvalChain:
"""Initialize the PairwiseStringEvalChain from an LLM.
Args:
llm (BaseChatModel): The LLM to use (GPT-4 recommended).
prompt (PromptTemplate, optional): The prompt to use.
**kwargs (Any): Additional keyword arguments.
Returns:
PairwiseStringEvalChain: The initialized PairwiseStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
"""
if not (
isinstance(llm, (ChatOpenAI, AzureChatOpenAI))
and llm.model_name.startswith("gpt-4")
):
logger.warning(
"This chain was only tested with GPT-4. \
Performance may be significantly worse with other models."
)
expected_input_vars = {"prediction", "prediction_b", "input", "criteria"}
prompt_ = prompt or COMPARISON_TEMPLATE.partial(reference="")
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_pairwise_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" if v else k for k, v in criteria_.items())
criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else ""
return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
def _prepare_input(
self,
prediction: str,
prediction_b: str,
input: Optional[str],
reference: Optional[str],
) -> dict:
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {
"prediction": prediction,
"prediction_b": prediction_b,
"input": input,
}
if self.requires_reference:
input_["reference"] = reference
return input_
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
def _evaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
input: Optional[str] = None,
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = self(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
async def _aevaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = await self.acall(
inputs=input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain):
"""A chain for comparing two outputs, such as the outputs
of two models, prompts, or outputs of a single model on similar inputs,
with labeled preferences.
Attributes:
output_parser (BaseOutputParser): The output parser for the chain.
"""
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return True
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
criteria: Optional[Union[CRITERIA_TYPE, str]] = None,
**kwargs: Any,
) -> PairwiseStringEvalChain:
"""Initialize the LabeledPairwiseStringEvalChain from an LLM.
Args:
llm (BaseLanguageModel): The LLM to use.
prompt (PromptTemplate, optional): The prompt to use.
criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use.
**kwargs (Any): Additional keyword arguments.
Returns:
LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain.
Raises:
ValueError: If the input variables are not as expected.
""" # noqa: E501
expected_input_vars = {
"prediction",
"prediction_b",
"input",
"reference",
"criteria",
}
prompt_ = prompt or COMPARISON_TEMPLATE_WITH_REFERENCE
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
criteria_ = resolve_pairwise_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else ""
return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_facebook_chat.py | from pathlib import Path
from langchain.document_loaders import FacebookChatLoader
def test_facebook_chat_loader() -> None:
"""Test FacebookChatLoader."""
file_path = Path(__file__).parent.parent / "examples/facebook_chat.json"
loader = FacebookChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"User 2 on 2023-02-05 13:46:11: Bye!\n\n"
"User 1 on 2023-02-05 13:43:55: Oh no worries! Bye\n\n"
"User 2 on 2023-02-05 13:24:37: No Im sorry it was my mistake, "
"the blue one is not for sale\n\n"
"User 1 on 2023-02-05 13:05:40: I thought you were selling the blue one!\n\n"
"User 1 on 2023-02-05 13:05:09: Im not interested in this bag. "
"Im interested in the blue one!\n\n"
"User 2 on 2023-02-05 13:04:28: Here is $129\n\n"
"User 2 on 2023-02-05 13:04:05: Online is at least $100\n\n"
"User 1 on 2023-02-05 12:59:59: How much do you want?\n\n"
"User 2 on 2023-02-05 08:17:56: Goodmorning! $50 is too low.\n\n"
"User 1 on 2023-02-05 00:17:02: Hi! Im interested in your bag. "
"Im offering $50. Let me know if you are interested. Thanks!\n\n"
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~retrievers~document_compressors~test_cohere_reranker.py | """Test the cohere reranker."""
from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank
def test_cohere_reranker_init() -> None:
"""Test the cohere reranker initializes correctly."""
CohereRerank()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~schema~runnable~configurable.py | from __future__ import annotations
import enum
import threading
from abc import abstractmethod
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Type,
Union,
cast,
)
from weakref import WeakValueDictionary
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable.base import Runnable, RunnableSerializable
from langchain.schema.runnable.config import (
RunnableConfig,
get_config_list,
get_executor_for_config,
)
from langchain.schema.runnable.utils import (
AnyConfigurableField,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
Input,
Output,
gather_with_concurrency,
)
class DynamicRunnable(RunnableSerializable[Input, Output]):
"""A Serializable Runnable that can be dynamically configured."""
default: RunnableSerializable[Input, Output]
class Config:
arbitrary_types_allowed = True
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def InputType(self) -> Type[Input]:
return self.default.InputType
@property
def OutputType(self) -> Type[Output]:
return self.default.OutputType
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self._prepare(config).get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self._prepare(config).get_output_schema(config)
@abstractmethod
def _prepare(
self, config: Optional[RunnableConfig] = None
) -> Runnable[Input, Output]:
...
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
return self._prepare(config).invoke(input, config, **kwargs)
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
return await self._prepare(config).ainvoke(input, config, **kwargs)
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
configs = get_config_list(config, len(inputs))
prepared = [self._prepare(c) for c in configs]
if all(p is self.default for p in prepared):
return self.default.batch(
inputs, config, return_exceptions=return_exceptions, **kwargs
)
if not inputs:
return []
configs = get_config_list(config, len(inputs))
def invoke(
bound: Runnable[Input, Output],
input: Input,
config: RunnableConfig,
) -> Union[Output, Exception]:
if return_exceptions:
try:
return bound.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input, config, **kwargs)
# If there's only one input, don't bother with the executor
if len(inputs) == 1:
return cast(List[Output], [invoke(prepared[0], inputs[0], configs[0])])
with get_executor_for_config(configs[0]) as executor:
return cast(
List[Output], list(executor.map(invoke, prepared, inputs, configs))
)
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
configs = get_config_list(config, len(inputs))
prepared = [self._prepare(c) for c in configs]
if all(p is self.default for p in prepared):
return await self.default.abatch(
inputs, config, return_exceptions=return_exceptions, **kwargs
)
if not inputs:
return []
configs = get_config_list(config, len(inputs))
async def ainvoke(
bound: Runnable[Input, Output],
input: Input,
config: RunnableConfig,
) -> Union[Output, Exception]:
if return_exceptions:
try:
return await bound.ainvoke(input, config, **kwargs)
except Exception as e:
return e
else:
return await bound.ainvoke(input, config, **kwargs)
coros = map(ainvoke, prepared, inputs, configs)
return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
return self._prepare(config).stream(input, config, **kwargs)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
async for chunk in self._prepare(config).astream(input, config, **kwargs):
yield chunk
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
return self._prepare(config).transform(input, config, **kwargs)
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
async for chunk in self._prepare(config).atransform(input, config, **kwargs):
yield chunk
class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
"""A Runnable that can be dynamically configured."""
fields: Dict[str, AnyConfigurableField]
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return [
ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description
or self.default.__fields__[field_name].field_info.description,
annotation=spec.annotation
or self.default.__fields__[field_name].annotation,
default=getattr(self.default, field_name),
)
if isinstance(spec, ConfigurableField)
else make_options_spec(
spec, self.default.__fields__[field_name].field_info.description
)
for field_name, spec in self.fields.items()
]
def configurable_fields(
self, **kwargs: AnyConfigurableField
) -> RunnableSerializable[Input, Output]:
return self.default.configurable_fields(**{**self.fields, **kwargs})
def _prepare(
self, config: Optional[RunnableConfig] = None
) -> Runnable[Input, Output]:
config = config or {}
specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()}
configurable_fields = {
specs_by_id[k][0]: v
for k, v in config.get("configurable", {}).items()
if k in specs_by_id and isinstance(specs_by_id[k][1], ConfigurableField)
}
configurable_single_options = {
k: v.options[(config.get("configurable", {}).get(v.id) or v.default)]
for k, v in self.fields.items()
if isinstance(v, ConfigurableFieldSingleOption)
}
configurable_multi_options = {
k: [
v.options[o]
for o in config.get("configurable", {}).get(v.id, v.default)
]
for k, v in self.fields.items()
if isinstance(v, ConfigurableFieldMultiOption)
}
configurable = {
**configurable_fields,
**configurable_single_options,
**configurable_multi_options,
}
if configurable:
return self.default.__class__(**{**self.default.__dict__, **configurable})
else:
return self.default
# Before Python 3.11 native StrEnum is not available
class StrEnum(str, enum.Enum):
"""A string enum."""
pass
_enums_for_spec: WeakValueDictionary[
Union[
ConfigurableFieldSingleOption, ConfigurableFieldMultiOption, ConfigurableField
],
Type[StrEnum],
] = WeakValueDictionary()
_enums_for_spec_lock = threading.Lock()
class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
"""A Runnable that can be dynamically configured."""
which: ConfigurableField
alternatives: Dict[
str,
Union[Runnable[Input, Output], Callable[[], Runnable[Input, Output]]],
]
default_key: str = "default"
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
with _enums_for_spec_lock:
if which_enum := _enums_for_spec.get(self.which):
pass
else:
which_enum = StrEnum( # type: ignore[call-overload]
self.which.name or self.which.id,
(
(v, v)
for v in list(self.alternatives.keys()) + [self.default_key]
),
)
_enums_for_spec[self.which] = cast(Type[StrEnum], which_enum)
return [
ConfigurableFieldSpec(
id=self.which.id,
name=self.which.name,
description=self.which.description,
annotation=which_enum,
default=self.default_key,
),
*self.default.config_specs,
] + [
s
for alt in self.alternatives.values()
if isinstance(alt, RunnableSerializable)
for s in alt.config_specs
]
def configurable_fields(
self, **kwargs: AnyConfigurableField
) -> RunnableSerializable[Input, Output]:
return self.__class__(
which=self.which,
default=self.default.configurable_fields(**kwargs),
alternatives=self.alternatives,
)
def _prepare(
self, config: Optional[RunnableConfig] = None
) -> Runnable[Input, Output]:
config = config or {}
which = config.get("configurable", {}).get(self.which.id, self.default_key)
if which == self.default_key:
return self.default
elif which in self.alternatives:
alt = self.alternatives[which]
if isinstance(alt, Runnable):
return alt
else:
return alt()
else:
raise ValueError(f"Unknown alternative: {which}")
def make_options_spec(
spec: Union[ConfigurableFieldSingleOption, ConfigurableFieldMultiOption],
description: Optional[str],
) -> ConfigurableFieldSpec:
"""Make a ConfigurableFieldSpec for a ConfigurableFieldSingleOption or
ConfigurableFieldMultiOption."""
with _enums_for_spec_lock:
if enum := _enums_for_spec.get(spec):
pass
else:
enum = StrEnum( # type: ignore[call-overload]
spec.name or spec.id,
((v, v) for v in list(spec.options.keys())),
)
_enums_for_spec[spec] = cast(Type[StrEnum], enum)
if isinstance(spec, ConfigurableFieldSingleOption):
return ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description or description,
annotation=enum,
default=spec.default,
)
else:
return ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description or description,
annotation=Sequence[enum], # type: ignore[valid-type]
default=spec.default,
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~document_compressors~chain_extract.py | """DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
import asyncio
from typing import Any, Callable, Dict, Optional, Sequence
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseOutputParser, Document
from langchain.schema.language_model import BaseLanguageModel
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
outputs = await asyncio.gather(
*[
self.llm_chain.apredict_and_parse(
**self.get_input(query, doc), callbacks=callbacks
)
for doc in documents
]
)
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
return cls(llm_chain=llm_chain, get_input=_get_input)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~test_rspace_loader.py | import unittest
from langchain.document_loaders.rspace import RSpaceLoader
class TestRSpaceLoader(unittest.TestCase):
url = "https://community.researchspace.com"
api_key = "myapikey"
global_id = "SD12345"
def test_valid_arguments(self) -> None:
loader = RSpaceLoader(
url=TestRSpaceLoader.url,
api_key=TestRSpaceLoader.api_key,
global_id=TestRSpaceLoader.global_id,
)
self.assertEqual(TestRSpaceLoader.url, loader.url) # add assertion here
self.assertEqual(TestRSpaceLoader.api_key, loader.api_key) # add assertion here
self.assertEqual(
TestRSpaceLoader.global_id, loader.global_id
) # add assertion here
def test_missing_apikey_raises_validation_error(self) -> None:
with self.assertRaises(ValueError) as cm:
RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader.global_id)
e = cm.exception
self.assertRegex(str(e), r"Did not find api_key")
def test_missing_url_raises_validation_error(self) -> None:
with self.assertRaises(ValueError) as cm:
RSpaceLoader(
api_key=TestRSpaceLoader.api_key, global_id=TestRSpaceLoader.global_id
)
e = cm.exception
self.assertRegex(str(e), r"Did not find url")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~runnables~test_openai_functions.py | from typing import Any, List, Optional
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
from langchain.schema import ChatResult
from langchain.schema.messages import AIMessage, BaseMessage
from langchain.schema.output import ChatGeneration
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f'Revised draft: no more {kw["notes"]}!'
)
accept = mocker.Mock(side_effect=lambda kw: f'Accepted draft: {kw["draft"]}!')
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_scann.py | """Test ScaNN functionality."""
import datetime
import tempfile
import numpy as np
import pytest
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.vectorstores.scann import ScaNN, dependable_scann_import, normalize
from langchain.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
def test_scann() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_mips_l2() -> None:
"""Test vector similarity with MIPS and L2."""
texts = ["foo", "bar", "baz"]
euclidean_search = ScaNN.from_texts(texts, FakeEmbeddings())
output = euclidean_search.similarity_search_with_score("foo", k=1)
expected_euclidean = [(Document(page_content="foo", metadata={}), 0.0)]
assert output == expected_euclidean
mips_search = ScaNN.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("foo", k=1)
expected_mips = [(Document(page_content="foo", metadata={}), 1.0)]
assert output == expected_mips
def test_scann_with_config() -> None:
"""Test ScaNN with approximate search config."""
texts = [str(i) for i in range(10000)]
# Create a config with dimension = 10, k = 10.
# Tree: search 10 leaves in a search tree of 100 leaves.
# Quantization: uses 16-centroid quantizer every 2 dimension.
# Reordering: reorder top 100 results.
scann_config = (
dependable_scann_import()
.scann_ops_pybind.builder(np.zeros(shape=(0, 10)), 10, "squared_l2")
.tree(num_leaves=100, num_leaves_to_search=10)
.score_ah(2)
.reorder(100)
.create_config()
)
mips_search = ScaNN.from_texts(
texts,
ConsistentFakeEmbeddings(),
scann_config=scann_config,
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("42", k=1)
expected = [(Document(page_content="42", metadata={}), 0.0)]
assert output == expected
def test_scann_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_sim_with_score_threshold() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_score_by_vector() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
def test_similarity_search_with_score_by_vector_with_score_threshold() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec,
k=2,
score_threshold=0.2,
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
assert output[0][1] < 0.2
def test_scann_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_with_metadatas_and_filter() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
assert output == [Document(page_content="bar", metadata={"page": 1})]
def test_scann_with_metadatas_and_list_filter() -> None:
texts = ["foo", "bar", "baz", "foo", "qux"]
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
docsearch.index_to_docstore_id[3]: Document(
page_content="foo", metadata={"page": 3}
),
docsearch.index_to_docstore_id[4]: Document(
page_content="qux", metadata={"page": 3}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]})
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_scann_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = ScaNN.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
def test_scann_normalize_l2() -> None:
"""Test normalize L2."""
texts = ["foo", "bar", "baz"]
emb = np.array(FakeEmbeddings().embed_documents(texts))
# Test norm is 1.
np.testing.assert_allclose(1, np.linalg.norm(normalize(emb), axis=-1))
# Test that there is no NaN after normalization.
np.testing.assert_array_equal(False, np.isnan(normalize(np.zeros(10))))
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.