date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_vectara.py | import tempfile
import urllib.request
from langchain.docstore.document import Document
from langchain.vectorstores.vectara import Vectara
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
# For this test to run properly, please setup as follows
# 1. Create a corpus in Vectara, with a filter attribute called "test_num".
# 2. Create an API_KEY for this corpus with permissions for query and indexing
# 3. Setup environment variables:
# VECTARA_API_KEY, VECTARA_CORPUS_ID and VECTARA_CUSTOMER_ID
def get_abbr(s: str) -> str:
words = s.split(" ") # Split the string into words
first_letters = [word[0] for word in words] # Extract the first letter of each word
return "".join(first_letters) # Join the first letters into a single string
def test_vectara_add_documents() -> None:
"""Test end to end construction and search."""
# start with some initial texts
texts = ["grounded generation", "retrieval augmented generation", "data privacy"]
docsearch: Vectara = Vectara.from_texts(
texts,
embedding=FakeEmbeddings(),
metadatas=[
{"abbr": "gg", "test_num": "1"},
{"abbr": "rag", "test_num": "1"},
{"abbr": "dp", "test_num": "1"},
],
doc_metadata={"test_num": "1"},
)
# then add some additional documents
new_texts = ["large language model", "information retrieval", "question answering"]
docsearch.add_documents(
[Document(page_content=t, metadata={"abbr": get_abbr(t)}) for t in new_texts],
doc_metadata={"test_num": "1"},
)
# finally do a similarity search to see if all works okay
output = docsearch.similarity_search(
"large language model",
k=2,
n_sentence_context=0,
filter="doc.test_num = 1",
)
assert output[0].page_content == "large language model"
assert output[0].metadata == {"abbr": "llm"}
assert output[1].page_content == "information retrieval"
assert output[1].metadata == {"abbr": "ir"}
def test_vectara_from_files() -> None:
"""Test end to end construction and search."""
# download documents to local storage and then upload as files
# attention paper and deep learning book
urls = [
("https://arxiv.org/pdf/1706.03762.pdf"),
(
"https://www.microsoft.com/en-us/research/wp-content/uploads/"
"2016/02/Final-DengYu-NOW-Book-DeepLearn2013-ForLecturesJuly2.docx"
),
]
files_list = []
for url in urls:
name = tempfile.NamedTemporaryFile().name
urllib.request.urlretrieve(url, name)
files_list.append(name)
docsearch: Vectara = Vectara.from_files(
files=files_list,
embedding=FakeEmbeddings(),
metadatas=[{"url": url, "test_num": "2"} for url in urls],
)
# finally do a similarity search to see if all works okay
output = docsearch.similarity_search(
"By the commonly adopted machine learning tradition",
k=1,
n_sentence_context=0,
filter="doc.test_num = 2",
)
print(output)
assert output[0].page_content == (
"By the commonly adopted machine learning tradition "
"(e.g., Chapter 28 in Murphy, 2012; Deng and Li, 2013), it may be natural "
"to just classify deep learning techniques into deep discriminative models "
"(e.g., DNNs) and deep probabilistic generative models (e.g., DBN, Deep "
"Boltzmann Machine (DBM))."
)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~utilities~wikipedia.py | """Util that calls Wikipedia."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _page_to_document(self, page_title: str, wiki_page: Any) -> Document:
main_meta = {
"title": page_title,
"summary": wiki_page.summary,
"source": wiki_page.url,
}
add_meta = (
{
"categories": wiki_page.categories,
"page_url": wiki_page.url,
"image_urls": wiki_page.images,
"related_titles": wiki_page.links,
"parent_id": wiki_page.parent_id,
"references": wiki_page.references,
"revision_id": wiki_page.revision_id,
"sections": wiki_page.sections,
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=wiki_page.content[: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
def load(self, query: str) -> List[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
docs = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if doc := self._page_to_document(page_title, wiki_page):
docs.append(doc)
return docs
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
Document,
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get the Deprecated LangChainTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
TracerSessionV1: The LangChainTracer session.
Example:
>>> with tracing_enabled() as session:
... # Use the LangChainTracer session
"""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
) -> Generator[None, None, None]:
"""Instruct LangChain to log all runs in context to LangSmith.
Args:
project_name (str, optional): The name of the project.
Defaults to "default".
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
Returns:
None
Example:
>>> with tracing_v2_enabled():
... # LangChain code will automatically be traced
"""
# Issue a warning that this is experimental
warnings.warn(
"The tracing v2 API is in development. "
"This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
project_name=project_name,
)
tracing_v2_callback_var.set(cb)
yield
tracing_v2_callback_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
*,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManager, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
CallbackManager: The callback manager for the chain group.
Example:
>>> with trace_as_chain_group("group_name") as manager:
... # Use the callback manager for the chain group
... llm.predict("Foo", callbacks=manager)
"""
cb = LangChainTracer(
project_name=project_name,
example_id=example_id,
)
cm = CallbackManager.configure(
inheritable_callbacks=[cb],
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, {})
yield run_manager.get_child()
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
*,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManager, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Example:
>>> async with atrace_as_chain_group("group_name") as manager:
... # Use the async callback manager for the chain group
... await llm.apredict("Foo", callbacks=manager)
"""
cb = LangChainTracer(
project_name=project_name,
example_id=example_id,
)
cm = AsyncCallbackManager.configure(
inheritable_callbacks=[cb], inheritable_tags=tags
)
run_manager = await cm.on_chain_start({"name": group_name}, {})
try:
yield run_manager.get_child()
finally:
await run_manager.on_chain_end({})
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
"""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
"""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag to add to the child
callback manager. Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(RunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
_handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
_handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
if tag is not None:
manager.add_tags([tag], False)
return manager
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await _ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await _ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
def on_retriever_start(
self,
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> Any:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
async def on_retriever_start(
self,
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
Returns:
T: The configured callback manager.
"""
callback_manager = callback_manager_cls(handlers=[])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags,
inheritable_tags=inheritable_callbacks.inheritable_tags,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_project = os.environ.get(
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
)
debug = _get_debug()
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_project)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~chains~test_transform.py | """Test transform chain."""
from typing import Dict
import pytest
from langchain.chains.transform import TransformChain
def dummy_transform(inputs: Dict[str, str]) -> Dict[str, str]:
"""Transform a dummy input for tests."""
outputs = inputs
outputs["greeting"] = f"{inputs['first_name']} {inputs['last_name']} says hello"
del outputs["first_name"]
del outputs["last_name"]
return outputs
def test_tranform_chain() -> None:
"""Test basic transform chain."""
transform_chain = TransformChain(
input_variables=["first_name", "last_name"],
output_variables=["greeting"],
transform=dummy_transform,
)
input_dict = {"first_name": "Leroy", "last_name": "Jenkins"}
response = transform_chain(input_dict)
expected_response = {"greeting": "Leroy Jenkins says hello"}
assert response == expected_response
def test_transform_chain_bad_inputs() -> None:
"""Test basic transform chain."""
transform_chain = TransformChain(
input_variables=["first_name", "last_name"],
output_variables=["greeting"],
transform=dummy_transform,
)
input_dict = {"name": "Leroy", "last_name": "Jenkins"}
with pytest.raises(ValueError):
_ = transform_chain(input_dict)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~vectorstores~singlestoredb.py | """Wrapper around SingleStore DB."""
from __future__ import annotations
import enum
import json
from typing import Any, ClassVar, Collection, Iterable, List, Optional, Tuple, Type
from sqlalchemy.pool import QueuePool
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies for SingleStoreDB."""
EUCLIDEAN_DISTANCE = "EUCLIDEAN_DISTANCE"
DOT_PRODUCT = "DOT_PRODUCT"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.DOT_PRODUCT
ORDERING_DIRECTIVE: dict = {
DistanceStrategy.EUCLIDEAN_DISTANCE: "",
DistanceStrategy.DOT_PRODUCT: "DESC",
}
class SingleStoreDB(VectorStore):
"""
This class serves as a Pythonic interface to the SingleStore DB database.
The prerequisite for using this class is the installation of the ``singlestoredb``
Python package.
The SingleStoreDB vectorstore can be created by providing an embedding function and
the relevant parameters for the database connection, connection pool, and
optionally, the names of the table and the fields to use.
"""
def _get_connection(self: SingleStoreDB) -> Any:
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
"Could not import singlestoredb python package. "
"Please install it with `pip install singlestoredb`."
)
return s2.connect(**self.connection_kwargs)
def __init__(
self,
embedding: Embeddings,
*,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
table_name: str = "embeddings",
content_field: str = "content",
metadata_field: str = "metadata",
vector_field: str = "vector",
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30,
**kwargs: Any,
):
"""Initialize with necessary components.
Args:
embedding (Embeddings): A text embedding model.
distance_strategy (DistanceStrategy, optional):
Determines the strategy employed for calculating
the distance between vectors in the embedding space.
Defaults to DOT_PRODUCT.
Available options are:
- DOT_PRODUCT: Computes the scalar product of two vectors.
This is the default behavior
- EUCLIDEAN_DISTANCE: Computes the Euclidean distance between
two vectors. This metric considers the geometric distance in
the vector space, and might be more suitable for embeddings
that rely on spatial relationships.
table_name (str, optional): Specifies the name of the table in use.
Defaults to "embeddings".
content_field (str, optional): Specifies the field to store the content.
Defaults to "content".
metadata_field (str, optional): Specifies the field to store metadata.
Defaults to "metadata".
vector_field (str, optional): Specifies the field to store the vector.
Defaults to "vector".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="https://user:[email protected]:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import SingleStoreDB
os.environ['SINGLESTOREDB_URL'] = 'me:[email protected]/my_db'
vectorstore = SingleStoreDB(OpenAIEmbeddings())
"""
self.embedding = embedding
self.distance_strategy = distance_strategy
self.table_name = table_name
self.content_field = content_field
self.metadata_field = metadata_field
self.vector_field = vector_field
"""Pass the rest of the kwargs to the connection."""
self.connection_kwargs = kwargs
"""Add program name and version to connection attributes."""
if "conn_attrs" not in self.connection_kwargs:
self.connection_kwargs["conn_attrs"] = dict()
if "program_name" not in self.connection_kwargs["conn_attrs"]:
self.connection_kwargs["conn_attrs"][
"program_name"
] = "langchain python sdk"
self.connection_kwargs["conn_attrs"][
"program_version"
] = "0.0.205" # the version of SingleStoreDB VectorStore implementation
"""Create connection pool."""
self.connection_pool = QueuePool(
self._get_connection,
max_overflow=max_overflow,
pool_size=pool_size,
timeout=timeout,
)
self._create_table()
def _create_table(self: SingleStoreDB) -> None:
"""Create table if it doesn't exist."""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""CREATE TABLE IF NOT EXISTS {}
({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci,
{} BLOB, {} JSON);""".format(
self.table_name,
self.content_field,
self.vector_field,
self.metadata_field,
),
)
finally:
cur.close()
finally:
conn.close()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
Returns:
List[str]: empty list
"""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
# Write data to singlestore db
for i, text in enumerate(texts):
# Use provided values by default or fallback
metadata = metadatas[i] if metadatas else {}
embedding = (
embeddings[i]
if embeddings
else self.embedding.embed_documents([text])[0]
)
cur.execute(
"INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format(
self.table_name
),
(
text,
"[{}]".format(",".join(map(str, embedding))),
json.dumps(metadata),
),
)
finally:
cur.close()
finally:
conn.close()
return []
def similarity_search(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Document]:
"""Returns the most similar indexed documents to the query text.
Uses cosine similarity.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filter (dict): A dictionary of metadata fields and values to filter by.
Returns:
List[Document]: A list of documents that are most similar to the query text.
Examples:
.. code-block:: python
from langchain.vectorstores import SingleStoreDB
from langchain.embeddings import OpenAIEmbeddings
s2 = SingleStoreDB.from_documents(
docs,
OpenAIEmbeddings(),
host="username:password@localhost:3306/database"
)
s2.similarity_search("query text", 1,
{"metadata_field": "metadata_value"})
"""
docs_and_scores = self.similarity_search_with_score(
query=query, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[dict] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query. Uses cosine similarity.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: A dictionary of metadata fields and values to filter by.
Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
# Creates embedding vector from user query
embedding = self.embedding.embed_query(query)
conn = self.connection_pool.connect()
result = []
where_clause: str = ""
where_clause_values: List[Any] = []
if filter:
where_clause = "WHERE "
arguments = []
def build_where_clause(
where_clause_values: List[Any],
sub_filter: dict,
prefix_args: List[str] = [],
) -> None:
for key in sub_filter.keys():
if isinstance(sub_filter[key], dict):
build_where_clause(
where_clause_values, sub_filter[key], prefix_args + [key]
)
else:
arguments.append(
"JSON_EXTRACT_JSON({}, {}) = %s".format(
self.metadata_field,
", ".join(["%s"] * (len(prefix_args) + 1)),
)
)
where_clause_values += prefix_args + [key]
where_clause_values.append(json.dumps(sub_filter[key]))
build_where_clause(where_clause_values, filter)
where_clause += " AND ".join(arguments)
try:
cur = conn.cursor()
try:
cur.execute(
"""SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score
FROM {} {} ORDER BY __score {} LIMIT %s""".format(
self.content_field,
self.metadata_field,
self.distance_strategy,
self.vector_field,
self.table_name,
where_clause,
ORDERING_DIRECTIVE[self.distance_strategy],
),
("[{}]".format(",".join(map(str, embedding))),)
+ tuple(where_clause_values)
+ (k,),
)
for row in cur.fetchall():
doc = Document(page_content=row[0], metadata=row[1])
result.append((doc, float(row[2])))
finally:
cur.close()
finally:
conn.close()
return result
@classmethod
def from_texts(
cls: Type[SingleStoreDB],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
table_name: str = "embeddings",
content_field: str = "content",
metadata_field: str = "metadata",
vector_field: str = "vector",
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30,
**kwargs: Any,
) -> SingleStoreDB:
"""Create a SingleStoreDB vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new table for the embeddings in SingleStoreDB.
3. Adds the documents to the newly created table.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import SingleStoreDB
from langchain.embeddings import OpenAIEmbeddings
s2 = SingleStoreDB.from_texts(
texts,
OpenAIEmbeddings(),
host="username:password@localhost:3306/database"
)
"""
instance = cls(
embedding,
distance_strategy=distance_strategy,
table_name=table_name,
content_field=content_field,
metadata_field=metadata_field,
vector_field=vector_field,
pool_size=pool_size,
max_overflow=max_overflow,
timeout=timeout,
**kwargs,
)
instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs)
return instance
def as_retriever(self, **kwargs: Any) -> SingleStoreDBRetriever:
return SingleStoreDBRetriever(vectorstore=self, **kwargs)
class SingleStoreDBRetriever(VectorStoreRetriever):
"""Retriever for SingleStoreDB vector stores."""
vectorstore: SingleStoreDB
k: int = 4
allowed_search_types: ClassVar[Collection[str]] = ("similarity",)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError(
"SingleStoreDBVectorStoreRetriever does not support async"
)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~retrievers~docarray~test_backends.py | from typing import Any
import pytest
from vcr.request import Request
from langchain.retrievers import DocArrayRetriever
from tests.integration_tests.retrievers.docarray.fixtures import ( # noqa: F401
init_elastic,
init_hnsw,
init_in_memory,
init_qdrant,
init_weaviate,
)
@pytest.mark.parametrize(
"backend",
["init_hnsw", "init_in_memory", "init_qdrant", "init_elastic", "init_weaviate"],
)
def test_backends(request: Request, backend: Any) -> None:
index, filter_query, embeddings = request.getfixturevalue(backend)
# create a retriever
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
)
docs = retriever.get_relevant_documents("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
# create a retriever with filters
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
docs = retriever.get_relevant_documents("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
assert docs[0].metadata["year"] <= 90
# create a retriever with MMR search
retriever = DocArrayRetriever(
index=index,
embeddings=embeddings,
search_field="title_embedding",
search_type="mmr",
content_field="title",
filters=filter_query,
)
docs = retriever.get_relevant_documents("my docs")
assert len(docs) == 1
assert "My document" in docs[0].page_content
assert "id" in docs[0].metadata and "year" in docs[0].metadata
assert "other_emb" not in docs[0].metadata
assert docs[0].metadata["year"] <= 90
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~test_cache.py | """Test caching for LLMs and ChatModels."""
from typing import Dict, Generator, List, Union
import pytest
from _pytest.fixtures import FixtureRequest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import langchain
from langchain.cache import (
InMemoryCache,
SQLAlchemyCache,
)
from langchain.chat_models import FakeListChatModel
from langchain.chat_models.base import BaseChatModel, dumps
from langchain.llms import FakeListLLM
from langchain.llms.base import BaseLLM
from langchain.schema import (
ChatGeneration,
Generation,
)
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
def get_sqlite_cache() -> SQLAlchemyCache:
return SQLAlchemyCache(engine=create_engine("sqlite://"))
CACHE_OPTIONS = [
InMemoryCache,
get_sqlite_cache,
]
@pytest.fixture(autouse=True, params=CACHE_OPTIONS)
def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, None]:
# Will be run before each test
cache_instance = request.param
langchain.llm_cache = cache_instance()
if langchain.llm_cache:
langchain.llm_cache.clear()
else:
raise ValueError("Cache not set. This should never happen.")
yield
# Will be run after each test
if langchain.llm_cache:
langchain.llm_cache.clear()
else:
raise ValueError("Cache not set. This should never happen.")
def test_llm_caching() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)],
)
assert llm(prompt) == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def test_old_sqlite_llm_caching() -> None:
if isinstance(langchain.llm_cache, SQLAlchemyCache):
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
items = [
langchain.llm_cache.cache_schema(
prompt=prompt,
llm=create_llm_string(llm),
response=cached_response,
idx=0,
)
]
with Session(langchain.llm_cache.engine) as session, session.begin():
for item in items:
session.merge(item)
assert llm(prompt) == cached_response
def test_chat_model_caching() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def test_chat_model_caching_params() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt, functions=[])
assert isinstance(result, AIMessage)
assert result.content == cached_response
result_no_params = llm(prompt)
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) -> str:
_dict: Dict = llm.dict()
_dict["stop"] = None
return str(sorted([(k, v) for k, v in _dict.items()]))
| [
"How are you?"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~cache~test_redis_cache.py | """Test Redis cache functionality."""
import redis
import langchain
from langchain.cache import RedisCache, RedisSemanticCache
from langchain.schema import Generation, LLMResult
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_llm import FakeLLM
REDIS_TEST_URL = "redis://localhost:6379"
def test_redis_cache() -> None:
langchain.llm_cache = RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
print(output)
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
print(expected_output)
assert output == expected_output
langchain.llm_cache.redis.flushall()
def test_redis_semantic_cache() -> None:
langchain.llm_cache = RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
langchain.llm_cache.clear(llm_string=llm_string)
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
# expect different output now without cached result
assert output != expected_output
langchain.llm_cache.clear(llm_string=llm_string)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~chains~test_sequential.py | """Test pipeline functionality."""
from typing import Dict, List, Optional
import pytest
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.memory.simple import SimpleMemory
class FakeChain(Chain):
"""Fake Chain for testing purposes."""
input_variables: List[str]
output_variables: List[str]
@property
def input_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.output_variables
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
def test_sequential_usage_single_inputs() -> None:
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123"}
assert output == expected_output
def test_sequential_usage_multiple_inputs() -> None:
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
output = chain({"foo": "123", "test": "456"})
expected_output = {
"baz": "123 456foo 123foo",
"foo": "123",
"test": "456",
}
assert output == expected_output
def test_sequential_usage_memory() -> None:
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={"zab": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123", "zab": "rab"}
assert output == expected_output
memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
def test_sequential_usage_multiple_outputs() -> None:
"""Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {
"baz": "123foo 123foo",
"foo": "123",
}
assert output == expected_output
def test_sequential_missing_inputs() -> None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
with pytest.raises(ValueError):
# Also needs "test" as an input
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
def test_sequential_bad_outputs() -> None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is not present as an output variable.
SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["test"],
)
def test_sequential_valid_outputs() -> None:
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["bar", "baz"],
)
output = chain({"foo": "123"}, return_only_outputs=True)
expected_output = {"baz": "123foofoo", "bar": "123foo"}
assert output == expected_output
def test_sequential_overlapping_inputs() -> None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is specified as an input, but also is an output of one step
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
def test_simple_sequential_functionality() -> None:
"""Test simple sequential functionality."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SimpleSequentialChain(chains=[chain_1, chain_2])
output = chain({"input": "123"})
expected_output = {"output": "123foofoo", "input": "123"}
assert output == expected_output
def test_multi_input_errors() -> None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
def test_multi_output_errors() -> None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~chains~router~embedding_router.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.router.base import RouterChain
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
class EmbeddingRouterChain(RouterChain):
"""Class that uses embeddings to route between options."""
vectorstore: VectorStore
routing_keys: List[str] = ["query"]
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the LLM chain prompt expects.
:meta private:
"""
return self.routing_keys
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_input = ", ".join([inputs[k] for k in self.routing_keys])
results = self.vectorstore.similarity_search(_input, k=1)
return {"next_inputs": inputs, "destination": results[0].metadata["name"]}
@classmethod
def from_names_and_descriptions(
cls,
names_and_descriptions: Sequence[Tuple[str, Sequence[str]]],
vectorstore_cls: Type[VectorStore],
embeddings: Embeddings,
**kwargs: Any,
) -> EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
for description in descriptions:
documents.append(
Document(page_content=description, metadata={"name": name})
)
vectorstore = vectorstore_cls.from_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~agents~agent_toolkits~file_management~toolkit.py | """Toolkit for interacting with the local filesystem."""
from __future__ import annotations
from typing import List, Optional
from pydantic import root_validator
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.file_management.copy import CopyFileTool
from langchain.tools.file_management.delete import DeleteFileTool
from langchain.tools.file_management.file_search import FileSearchTool
from langchain.tools.file_management.list_dir import ListDirectoryTool
from langchain.tools.file_management.move import MoveFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
_FILE_TOOLS = {
tool_cls.__fields__["name"].default: tool_cls
for tool_cls in [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with a Local Files."""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@root_validator
def validate_tools(cls, values: dict) -> dict:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore
return tools
__all__ = ["FileManagementToolkit"]
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~retrievers~test_time_weighted_retriever.py | """Tests for the time-weighted retriever class."""
from datetime import datetime
from typing import Any, Iterable, List, Optional, Tuple, Type
import pytest
from langchain.embeddings.base import Embeddings
from langchain.retrievers.time_weighted_retriever import (
TimeWeightedVectorStoreRetriever,
_get_hours_passed,
)
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
def _get_example_memories(k: int = 4) -> List[Document]:
return [
Document(
page_content="foo",
metadata={
"buffer_idx": i,
"last_accessed_at": datetime(2023, 4, 14, 12, 0),
},
)
for i in range(k)
]
class MockVectorStore(VectorStore):
"""Mock invalid vector store."""
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
return list(texts)
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore."""
raise NotImplementedError
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
return []
@classmethod
def from_documents(
cls: Type["MockVectorStore"],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> "MockVectorStore":
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
@classmethod
def from_texts(
cls: Type["MockVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "MockVectorStore":
"""Return VectorStore initialized from texts and embeddings."""
return cls()
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and similarity scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
return [(doc, 0.5) for doc in _get_example_memories()]
@pytest.fixture
def time_weighted_retriever() -> TimeWeightedVectorStoreRetriever:
vectorstore = MockVectorStore()
return TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, memory_stream=_get_example_memories()
)
def test__get_hours_passed() -> None:
time1 = datetime(2023, 4, 14, 14, 30)
time2 = datetime(2023, 4, 14, 12, 0)
expected_hours_passed = 2.5
hours_passed = _get_hours_passed(time1, time2)
assert hours_passed == expected_hours_passed
def test_get_combined_score(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
document = Document(
page_content="Test document",
metadata={"last_accessed_at": datetime(2023, 4, 14, 12, 0)},
)
vector_salience = 0.7
expected_hours_passed = 2.5
current_time = datetime(2023, 4, 14, 14, 30)
combined_score = time_weighted_retriever._get_combined_score(
document, vector_salience, current_time
)
expected_score = (
1.0 - time_weighted_retriever.decay_rate
) ** expected_hours_passed + vector_salience
assert combined_score == pytest.approx(expected_score)
def test_get_salient_docs(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
docs_and_scores = time_weighted_retriever.get_salient_docs(query)
assert isinstance(docs_and_scores, dict)
def test_get_relevant_documents(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
relevant_documents = time_weighted_retriever.get_relevant_documents(query)
assert isinstance(relevant_documents, list)
def test_add_documents(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
documents = [Document(page_content="test_add_documents document")]
added_documents = time_weighted_retriever.add_documents(documents)
assert isinstance(added_documents, list)
assert len(added_documents) == 1
assert (
time_weighted_retriever.memory_stream[-1].page_content
== documents[0].page_content
)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~chains~router~multi_retrieval_qa.py | """Use a single chain to route an input to one of multiple retrieval qa chains."""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.base_language import BaseLanguageModel
from langchain.chains import ConversationChain
from langchain.chains.base import Chain
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langchain.chains.router.base import MultiRouteChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_retrieval_prompt import (
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
)
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import BaseRetriever
class MultiRetrievalQAChain(MultiRouteChain):
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains."""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
def output_keys(self) -> List[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: List[Dict[str, Any]],
default_retriever: Optional[BaseRetriever] = None,
default_prompt: Optional[PromptTemplate] = None,
default_chain: Optional[Chain] = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
if default_prompt and not default_retriever:
raise ValueError(
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm, prompt=default_prompt, retriever=default_retriever
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template, input_variables=["history", "query"]
)
_default_chain = ConversationChain(
llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result"
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
| [
"input"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_singlestoredb.py | """Test SingleStoreDB functionality."""
from typing import List
import numpy as np
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.singlestoredb import DistanceStrategy, SingleStoreDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"
TEST_SINGLE_RESULT = [Document(page_content="foo")]
TEST_SINGLE_WITH_METADATA_RESULT = [Document(page_content="foo", metadata={"a": "b"})]
TEST_RESULT = [Document(page_content="foo"), Document(page_content="foo")]
try:
import singlestoredb as s2
singlestoredb_installed = True
except ImportError:
singlestoredb_installed = False
def drop(table_name: str) -> None:
with s2.connect(TEST_SINGLESTOREDB_URL) as conn:
conn.autocommit(True)
with conn.cursor() as cursor:
cursor.execute(f"DROP TABLE IF EXISTS {table_name};")
class NormilizedFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings with normalization. For testing purposes."""
def normalize(self, vector: List[float]) -> List[float]:
"""Normalize vector."""
return [float(v / np.linalg.norm(vector)) for v in vector]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.normalize(v) for v in super().embed_documents(texts)]
def embed_query(self, text: str) -> List[float]:
return self.normalize(super().embed_query(text))
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb(texts: List[str]) -> None:
"""Test end to end construction and search."""
table_name = "test_singlestoredb"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == TEST_SINGLE_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_new_vector(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_new_vector"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_euclidean_distance(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_euclidean_distance"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_from_existing(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_from_existing"
drop(table_name)
SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
# Test creating from an existing
docsearch2 = SingleStoreDB(
NormilizedFakeEmbeddings(),
table_name="test_singlestoredb_from_existing",
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch2.similarity_search("foo", k=1)
assert output == TEST_SINGLE_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_from_documents(texts: List[str]) -> None:
"""Test from_documents constructor."""
table_name = "test_singlestoredb_from_documents"
drop(table_name)
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = SingleStoreDB.from_documents(
docs,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == TEST_SINGLE_WITH_METADATA_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_add_texts_to_existing(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_add_texts_to_existing"
drop(table_name)
# Test creating from an existing
SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch = SingleStoreDB(
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata(texts: List[str]) -> None:
"""Test filtering by metadata"""
table_name = "test_singlestoredb_filter_metadata"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i}) for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"index": 2})
assert output == [Document(page_content="baz", metadata={"index": 2})]
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_2(texts: List[str]) -> None:
"""Test filtering by metadata field that is similar for each document"""
table_name = "test_singlestoredb_filter_metadata_2"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"category": "budget"})
assert output == [
Document(page_content="foo", metadata={"index": 0, "category": "budget"})
]
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_3(texts: List[str]) -> None:
"""Test filtering by two metadata fields"""
table_name = "test_singlestoredb_filter_metadata_3"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "index": 1}
)
assert output == [
Document(page_content="bar", metadata={"index": 1, "category": "budget"})
]
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_4(texts: List[str]) -> None:
"""Test no matches"""
table_name = "test_singlestoredb_filter_metadata_4"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"category": "vacation"})
assert output == []
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_5(texts: List[str]) -> None:
"""Test complex metadata path"""
table_name = "test_singlestoredb_filter_metadata_5"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={
"index": i,
"category": "budget",
"subfield": {"subfield": {"idx": i, "other_idx": i + 1}},
},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "subfield": {"subfield": {"idx": 2}}}
)
assert output == [
Document(
page_content="baz",
metadata={
"index": 2,
"category": "budget",
"subfield": {"subfield": {"idx": 2, "other_idx": 3}},
},
)
]
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_6(texts: List[str]) -> None:
"""Test filtering by other bool"""
table_name = "test_singlestoredb_filter_metadata_6"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={"index": i, "category": "budget", "is_good": i == 1},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "is_good": True}
)
assert output == [
Document(
page_content="bar",
metadata={"index": 1, "category": "budget", "is_good": True},
)
]
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_7(texts: List[str]) -> None:
"""Test filtering by float"""
table_name = "test_singlestoredb_filter_metadata_7"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={"index": i, "category": "budget", "score": i + 0.5},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"bar", k=1, filter={"category": "budget", "score": 2.5}
)
assert output == [
Document(
page_content="baz",
metadata={"index": 2, "category": "budget", "score": 2.5},
)
]
drop(table_name)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~agents~agent_toolkits~gmail~toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.gmail.create_draft import GmailCreateDraft
from langchain.tools.gmail.get_message import GmailGetMessage
from langchain.tools.gmail.get_thread import GmailGetThread
from langchain.tools.gmail.search import GmailSearch
from langchain.tools.gmail.send_message import GmailSendMessage
from langchain.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
SCOPES = ["https://mail.google.com/"]
class GmailToolkit(BaseToolkit):
"""Toolkit for interacting with Gmail."""
api_resource: Resource = Field(default_factory=build_resource_service)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
GmailCreateDraft(api_resource=self.api_resource),
GmailSendMessage(api_resource=self.api_resource),
GmailSearch(api_resource=self.api_resource),
GmailGetMessage(api_resource=self.api_resource),
GmailGetThread(api_resource=self.api_resource),
]
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~agents~mrkl~output_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:"
class MRKLOutputParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
"Parsing LLM output produced both a final answer "
f"and a parse-able action: {text}"
)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation="Invalid Format: Missing 'Action:' after 'Thought:'",
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation="Invalid Format:"
" Missing 'Action Input:' after 'Action:'",
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "mrkl"
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~chains~test_hyde.py | """Test HyDE."""
from typing import Any, List, Optional
import numpy as np
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.embeddings.base import Embeddings
from langchain.llms.base import BaseLLM
from langchain.schema import Generation, LLMResult
class FakeEmbeddings(Embeddings):
"""Fake embedding class for tests."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return random floats."""
return [list(np.random.uniform(0, 1, 10)) for _ in range(10)]
def embed_query(self, text: str) -> List[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]])
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def test_hyde_from_llm() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(), FakeEmbeddings(), key
)
embedding.embed_query("foo")
def test_hyde_from_llm_with_multiple_n() -> None:
"""Test loading HyDE from all prompts."""
for key in PROMPT_MAP:
embedding = HypotheticalDocumentEmbedder.from_llm(
FakeLLM(n=8), FakeEmbeddings(), key
)
embedding.embed_query("foo")
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~agents~agent_toolkits~jira~toolkit.py | """Jira Toolkit."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.jira.tool import JiraAction
from langchain.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit."""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
actions = jira_api_wrapper.list()
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in actions
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~test_bash.py | """Test the bash utility."""
import re
import subprocess
import sys
from pathlib import Path
import pytest
from langchain.utilities.bash import BashProcess
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_pwd_command() -> None:
"""Test correct functionality."""
session = BashProcess()
commands = ["pwd"]
output = session.run(commands)
assert output == subprocess.check_output("pwd", shell=True).decode()
@pytest.mark.skip(reason="flaky on GHA, TODO to fix")
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_pwd_command_persistent() -> None:
"""Test correct functionality when the bash process is persistent."""
session = BashProcess(persistent=True, strip_newlines=True)
commands = ["pwd"]
output = session.run(commands)
assert subprocess.check_output("pwd", shell=True).decode().strip() in output
session.run(["cd .."])
new_output = session.run(["pwd"])
# Assert that the new_output is a parent of the old output
assert Path(output).parent == Path(new_output)
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_incorrect_command() -> None:
"""Test handling of incorrect command."""
session = BashProcess()
output = session.run(["invalid_command"])
assert output == "Command 'invalid_command' returned non-zero exit status 127."
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_incorrect_command_return_err_output() -> None:
"""Test optional returning of shell output on incorrect command."""
session = BashProcess(return_err_output=True)
output = session.run(["invalid_command"])
assert re.match(r"^/bin/sh:.*invalid_command.*not found.*$", output)
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_create_directory_and_files(tmp_path: Path) -> None:
"""Test creation of a directory and files in a temporary directory."""
session = BashProcess(strip_newlines=True)
# create a subdirectory in the temporary directory
temp_dir = tmp_path / "test_dir"
temp_dir.mkdir()
# run the commands in the temporary directory
commands = [
f"touch {temp_dir}/file1.txt",
f"touch {temp_dir}/file2.txt",
f"echo 'hello world' > {temp_dir}/file2.txt",
f"cat {temp_dir}/file2.txt",
]
output = session.run(commands)
assert output == "hello world"
# check that the files were created in the temporary directory
output = session.run([f"ls {temp_dir}"])
assert output == "file1.txt\nfile2.txt"
@pytest.mark.skip(reason="flaky on GHA, TODO to fix")
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_create_bash_persistent() -> None:
"""Test the pexpect persistent bash terminal"""
session = BashProcess(persistent=True)
response = session.run("echo hello")
response += session.run("echo world")
assert "hello" in response
assert "world" in response
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~document_loaders~word_document.py | """Loader that loads word documents."""
import os
import tempfile
from abc import ABC
from typing import List
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class Docx2txtLoader(BaseLoader, ABC):
"""Loads a DOCX with docx2txt and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load word documents."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~evaluation~run_evaluators~test_implementations.py | """Test run evaluator implementations basic functionality."""
from uuid import UUID
import pytest
from langchainplus_sdk.schemas import Example, Run
from langchain.evaluation.run_evaluators import get_criteria_evaluator, get_qa_evaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture
def run() -> Run:
return Run(
id=UUID("f77cd087-48f7-4c62-9e0e-297842202107"),
name="My Run",
inputs={"input": "What is the answer to life, the universe, and everything?"},
outputs={"output": "The answer is 42."},
start_time="2021-07-20T15:00:00.000000+00:00",
end_time="2021-07-20T15:00:00.000000+00:00",
run_type="chain",
execution_order=1,
)
@pytest.fixture
def example() -> Example:
return Example(
id=UUID("f77cd087-48f7-4c62-9e0e-297842202106"),
dataset_id=UUID("f77cd087-48f7-4c62-9e0e-297842202105"),
inputs={"input": "What is the answer to life, the universe, and everything?"},
outputs={"output": "The answer is 42."},
created_at="2021-07-20T15:00:00.000000+00:00",
)
def test_get_qa_evaluator(run: Run, example: Example) -> None:
"""Test get_qa_evaluator."""
eval_llm = FakeLLM(
queries={"a": "This checks out.\nCORRECT"}, sequential_responses=True
)
qa_evaluator = get_qa_evaluator(eval_llm)
res = qa_evaluator.evaluate_run(run, example)
assert res.value == "CORRECT"
assert res.score == 1
def test_get_criteria_evaluator(run: Run, example: Example) -> None:
"""Get a criteria evaluator."""
eval_llm = FakeLLM(queries={"a": "This checks out.\nY"}, sequential_responses=True)
criteria_evaluator = get_criteria_evaluator(eval_llm, criteria="conciseness")
res = criteria_evaluator.evaluate_run(run, example)
assert res.value == "Y"
assert res.score == 1
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~retrievers~self_query~test_myscale.py | from typing import Any, Tuple
import pytest
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
)
from langchain.retrievers.self_query.myscale import MyScaleTranslator
DEFAULT_TRANSLATOR = MyScaleTranslator()
@pytest.mark.parametrize(
"triplet",
[
(Comparator.LT, 2, "metadata.foo < 2"),
(Comparator.LTE, 2, "metadata.foo <= 2"),
(Comparator.GT, 2, "metadata.foo > 2"),
(Comparator.GTE, 2, "metadata.foo >= 2"),
(Comparator.CONTAIN, 2, "has(metadata.foo,2)"),
(Comparator.LIKE, "bar", "metadata.foo ILIKE '%bar%'"),
],
)
def test_visit_comparison(triplet: Tuple[Comparator, Any, str]) -> None:
comparator, value, expected = triplet
comp = Comparison(comparator=comparator, attribute="foo", value=value)
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_operation() -> None:
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
],
)
expected = "metadata.foo < 2 AND metadata.bar = 'baz'"
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~llms~test_anthropic.py | """Test Anthropic API wrapper."""
from typing import Generator
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.llms.anthropic import Anthropic
from langchain.schema import LLMResult
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
llm = Anthropic(model="test")
output = llm("Say foo:")
assert isinstance(output, str)
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model="test")
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token["completion"], str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
llm("Write me a sentence with 100 words.")
assert callback_handler.llm_streams > 1
@pytest.mark.asyncio
async def test_anthropic_async_generate() -> None:
"""Test async generate."""
llm = Anthropic()
output = await llm.agenerate(["How many toes do dogs have?"])
assert isinstance(output, LLMResult)
@pytest.mark.asyncio
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~chat_models~test_openai.py | """Test ChatOpenAI wrapper."""
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_chat_openai() -> None:
"""Test ChatOpenAI wrapper."""
chat = ChatOpenAI(max_tokens=10)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_openai_model() -> None:
"""Test ChatOpenAI wrapper handles model_name."""
chat = ChatOpenAI(model="foo")
assert chat.model_name == "foo"
chat = ChatOpenAI(model_name="bar")
assert chat.model_name == "bar"
def test_chat_openai_system_message() -> None:
"""Test ChatOpenAI wrapper with system message."""
chat = ChatOpenAI(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_openai_generate() -> None:
"""Test ChatOpenAI wrapper with generate."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_chat_openai_multiple_completions() -> None:
"""Test ChatOpenAI wrapper with multiple completions."""
chat = ChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
def test_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
def test_chat_openai_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatOpenAI(max_tokens=10)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_openai_streaming_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatOpenAI(max_tokens=10, streaming=True)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_openai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
@pytest.mark.asyncio
async def test_async_chat_openai() -> None:
"""Test async generation."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@pytest.mark.asyncio
async def test_async_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_chat_openai_extra_kwargs() -> None:
"""Test extra kwargs to chat openai."""
# Check that foo is saved in extra_kwargs.
llm = ChatOpenAI(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatOpenAI(foo=3, model_kwargs={"foo": 2})
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
ChatOpenAI(model_kwargs={"temperature": 0.2})
# Test that "model" cannot be specified in kwargs
with pytest.raises(ValueError):
ChatOpenAI(model_kwargs={"model": "text-davinci-003"})
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~utilities~test_pupmed.py | """Integration test for PubMed API Wrapper."""
from typing import Any, List
import pytest
from langchain.agents.load_tools import load_tools
from langchain.schema import Document
from langchain.tools.base import BaseTool
from langchain.utilities import PubMedAPIWrapper
@pytest.fixture
def api_client() -> PubMedAPIWrapper:
return PubMedAPIWrapper()
def test_run_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns the correct answer"""
output = api_client.run("1605.08386")
assert "Heat-bath random walks with Markov bases" in output
def test_run_returns_several_docs(api_client: PubMedAPIWrapper) -> None:
"""Test that returns several docs"""
output = api_client.run("Caprice Stanley")
assert "On Mixing Behavior of a Family of Random Walks" in output
def test_run_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good PubMed Result was found" == output
def assert_docs(docs: List[Document]) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"}
def test_load_success(api_client: PubMedAPIWrapper) -> None:
"""Test that returns one document"""
docs = api_client.load_docs("1605.08386")
assert len(docs) == 1
assert_docs(docs)
def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None:
"""Test that returns no docs"""
docs = api_client.load("1605.08386WWW")
assert len(docs) == 0
def test_load_returns_limited_docs() -> None:
"""Test that returns several docs"""
expected_docs = 2
api_client = PubMedAPIWrapper(load_max_docs=expected_docs)
docs = api_client.load_docs("ChatGPT")
assert len(docs) == expected_docs
assert_docs(docs)
def test_load_returns_full_set_of_metadata() -> None:
"""Test that returns several docs"""
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load_docs("ChatGPT")
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
{"Published", "Title", "Authors", "Summary"}
)
print(doc.metadata)
assert len(set(doc.metadata)) > 4
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
tools = load_tools(["pupmed"], **kwargs)
assert len(tools) == 1, "loaded more than 1 tool"
return tools[0]
def test_load_pupmed_from_universal_entry() -> None:
pupmed_tool = _load_pubmed_from_universal_entry()
output = pupmed_tool("Caprice Stanley")
assert (
"On Mixing Behavior of a Family of Random Walks" in output
), "failed to fetch a valid result"
def test_load_pupmed_from_universal_entry_with_params() -> None:
params = {
"top_k_results": 1,
"load_max_docs": 10,
"load_all_available_meta": True,
}
pupmed_tool = _load_pubmed_from_universal_entry(**params)
assert isinstance(pupmed_tool, PubMedAPIWrapper)
wp = pupmed_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
assert wp.load_max_docs == 10, "failed to assert load_max_docs"
assert (
wp.load_all_available_meta is True
), "failed to assert load_all_available_meta"
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~utilities~wolfram_alpha.py | """Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}"
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~retrievers~self_query~weaviate.py | """Logic for converting internal query language to a valid Weaviate query."""
from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
class WeaviateTranslator(Visitor):
"""Logic for converting internal query language elements to valid filters."""
allowed_operators = [Operator.AND, Operator.OR]
"""Subset of allowed logical operators."""
allowed_comparators = [Comparator.EQ]
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
# https://weaviate.io/developers/weaviate/api/graphql/filters
map_dict = {Operator.AND: "And", Operator.OR: "Or", Comparator.EQ: "Equal"}
return map_dict[func]
def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {"operator": self._format_func(operation.operator), "operands": args}
def visit_comparison(self, comparison: Comparison) -> Dict:
return {
"path": [comparison.attribute],
"operator": self._format_func(comparison.comparator),
"valueText": comparison.value,
}
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"where_filter": structured_query.filter.accept(self)}
return structured_query.query, kwargs
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~load~test_load.py | """Test for Serializable base class"""
import pytest
from langchain.chains.llm import LLMChain
from langchain.llms.openai import OpenAI
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.prompts.prompt import PromptTemplate
class NotSerializable:
pass
@pytest.mark.requires("openai")
def test_load_openai_llm() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
llm_string = dumps(llm)
llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert llm2 == llm
assert dumps(llm2) == llm_string
assert isinstance(llm2, OpenAI)
@pytest.mark.requires("openai")
def test_load_llmchain() -> None:
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello")
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
@pytest.mark.requires("openai")
def test_load_llmchain_env() -> None:
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm = OpenAI(model="davinci", temperature=0.5)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain)
chain2 = loads(chain_string)
assert chain2 == chain
assert dumps(chain2) == chain_string
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_load_llmchain_with_non_serializable_arg() -> None:
llm = OpenAI(
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
chain_string = dumps(chain, pretty=True)
with pytest.raises(NotImplementedError):
loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"})
| [
"hello {name}!"
] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~docstore~test_inmemory.py | """Test in memory docstore."""
import pytest
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
def test_document_found() -> None:
"""Test document found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("foo")
assert isinstance(output, Document)
assert output.page_content == "bar"
def test_document_not_found() -> None:
"""Test when document is not found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("bar")
assert output == "ID bar not found."
def test_adding_document() -> None:
"""Test that documents are added correctly."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
new_dict = {"bar": Document(page_content="foo")}
docstore.add(new_dict)
# Test that you can find new document.
foo_output = docstore.search("bar")
assert isinstance(foo_output, Document)
assert foo_output.page_content == "foo"
# Test that old document is the same.
bar_output = docstore.search("foo")
assert isinstance(bar_output, Document)
assert bar_output.page_content == "bar"
def test_adding_document_already_exists() -> None:
"""Test that error is raised if document id already exists."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
new_dict = {"foo": Document(page_content="foo")}
# Test that error is raised.
with pytest.raises(ValueError):
docstore.add(new_dict)
# Test that old document is the same.
bar_output = docstore.search("foo")
assert isinstance(bar_output, Document)
assert bar_output.page_content == "bar"
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~chat_models~test_anthropic.py | """Test Anthropic API wrapper."""
from typing import List
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.schema import (
ChatGeneration,
LLMResult,
)
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model="test")
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model="test")
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model="test", streaming=True)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
chat([message])
assert callback_handler.llm_streams > 1
@pytest.mark.asyncio
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
chat_messages: List[BaseMessage] = [
HumanMessage(content="How many toes do dogs have?")
]
result: LLMResult = await chat.agenerate([chat_messages])
assert callback_handler.llm_streams > 1
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
def test_formatting() -> None:
chat = ChatAnthropic()
chat_messages: List[BaseMessage] = [HumanMessage(content="Hello")]
result = chat._convert_messages_to_prompt(chat_messages)
assert result == "\n\nHuman: Hello\n\nAssistant:"
chat_messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
result = chat._convert_messages_to_prompt(chat_messages)
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
| [
"How many toes do dogs have?",
"Write me a sentence with 10 words.",
"Answer:",
"Hello"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_pgvector.py | """Test PGVector functionality."""
import os
from typing import List
from sqlalchemy.orm import Session
from langchain.docstore.document import Document
from langchain.vectorstores.pgvector import PGVector
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver=os.environ.get("TEST_PGVECTOR_DRIVER", "psycopg2"),
host=os.environ.get("TEST_PGVECTOR_HOST", "localhost"),
port=int(os.environ.get("TEST_PGVECTOR_PORT", "5432")),
database=os.environ.get("TEST_PGVECTOR_DATABASE", "postgres"),
user=os.environ.get("TEST_PGVECTOR_USER", "postgres"),
password=os.environ.get("TEST_PGVECTOR_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_pgvector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = PGVector.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_pgvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406)
]
def test_pgvector_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_pgvector_collection_with_metadata() -> None:
"""Test end to end collection construction"""
pgvector = PGVector(
collection_name="test_collection",
collection_metadata={"foo": "bar"},
embedding_function=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
session = Session(pgvector.connect())
collection = pgvector.get_collection(session)
if collection is None:
assert False, "Expected a CollectionStore object but received None"
else:
assert collection.name == "test_collection"
assert collection.cmetadata == {"foo": "bar"}
def test_pgvector_with_filter_in_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter={"page": {"IN": ["0", "2"]}}
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~retrievers~llama_index.py | from typing import Any, Dict, List, cast
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.schema import BaseRetriever, Document
class LlamaIndexRetriever(BaseRetriever, BaseModel):
"""Question-answering with sources over an LlamaIndex data structure."""
index: Any
query_kwargs: Dict = Field(default_factory=dict)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.indices.base import BaseGPTIndex
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
index = cast(BaseGPTIndex, self.index)
response = index.query(query, response_mode="no_text", **self.query_kwargs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError("LlamaIndexRetriever does not support async")
class LlamaIndexGraphRetriever(BaseRetriever, BaseModel):
"""Question-answering with sources over an LlamaIndex graph data structure."""
graph: Any
query_configs: List[Dict] = Field(default_factory=list)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.composability.graph import (
QUERY_CONFIG_TYPE,
ComposableGraph,
)
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
graph = cast(ComposableGraph, self.graph)
# for now, inject response_mode="no_text" into query configs
for query_config in self.query_configs:
query_config["response_mode"] = "no_text"
query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs)
response = graph.query(query, query_configs=query_configs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError("LlamaIndexGraphRetriever does not support async")
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~document_loaders~test_confluence.py | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "[email protected]"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
url=self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="[email protected]",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
url=self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
url=self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(self, page_id: str) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {"storage": {"value": f"<p>Content {page_id}</p>"}},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| [
"/rest/api/content/PLACEHOLDER"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_faiss.py | """Test FAISS functionality."""
import datetime
import math
import tempfile
import pytest
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.docstore.wikipedia import Wikipedia
from langchain.vectorstores.faiss import FAISS
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_faiss() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_faiss_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
def test_faiss_mmr() -> None:
texts = ["foo", "foo", "fou", "foy"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
query_vec = FakeEmbeddings().embed_query(text="foo")
# make sure we can have k > docstore size
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=10, lambda_mult=0.1
)
assert len(output) == len(texts)
assert output[0][0] == Document(page_content="foo")
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content="foo")
def test_faiss_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_faiss_with_metadatas_and_filter() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
assert output == [Document(page_content="bar", metadata={"page": 1})]
def test_faiss_with_metadatas_and_list_filter() -> None:
texts = ["foo", "bar", "baz", "foo", "qux"]
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
docsearch.index_to_docstore_id[3]: Document(
page_content="foo", metadata={"page": 3}
),
docsearch.index_to_docstore_id[4]: Document(
page_content="qux", metadata={"page": 3}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]})
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_faiss_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_faiss_add_texts() -> None:
"""Test end to end adding of texts."""
# Create initial doc store.
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
# Test adding a similar document as before.
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
def test_faiss_add_texts_not_supported() -> None:
"""Test adding of texts to a docstore that doesn't support it."""
docsearch = FAISS(FakeEmbeddings().embed_query, None, Wikipedia(), {})
with pytest.raises(ValueError):
docsearch.add_texts(["foo"])
def test_faiss_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
def test_faiss_similarity_search_with_relevance_scores() -> None:
"""Test the similarity search with normalized similarities."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(
texts,
FakeEmbeddings(),
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2),
)
outputs = docsearch.similarity_search_with_relevance_scores("foo", k=1)
output, score = outputs[0]
assert output == Document(page_content="foo")
assert score == 1.0
def test_faiss_invalid_normalize_fn() -> None:
"""Test the similarity search with normalized similarities."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(
texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0
)
with pytest.warns(Warning, match="scores must be between"):
docsearch.similarity_search_with_relevance_scores("foo", k=1)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~memory~chat_message_histories~test_sql.py | from pathlib import Path
from typing import Tuple
import pytest
from langchain.memory.chat_message_histories import SQLChatMessageHistory
from langchain.schema.messages import AIMessage, HumanMessage
# @pytest.fixture(params=[("SQLite"), ("postgresql")])
@pytest.fixture(params=[("SQLite")])
def sql_histories(request, tmp_path: Path): # type: ignore
if request.param == "SQLite":
file_path = tmp_path / "db.sqlite3"
con_str = f"sqlite:///{file_path}"
elif request.param == "postgresql":
con_str = "postgresql://postgres:postgres@localhost/postgres"
message_history = SQLChatMessageHistory(
session_id="123", connection_string=con_str, table_name="test_table"
)
# Create history for other session
other_history = SQLChatMessageHistory(
session_id="456", connection_string=con_str, table_name="test_table"
)
yield (message_history, other_history)
message_history.clear()
other_history.clear()
def test_add_messages(
sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory]
) -> None:
sql_history, other_history = sql_histories
sql_history.add_user_message("Hello!")
sql_history.add_ai_message("Hi there!")
messages = sql_history.messages
assert len(messages) == 2
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert messages[0].content == "Hello!"
assert messages[1].content == "Hi there!"
def test_multiple_sessions(
sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory]
) -> None:
sql_history, other_history = sql_histories
sql_history.add_user_message("Hello!")
sql_history.add_ai_message("Hi there!")
sql_history.add_user_message("Whats cracking?")
# Ensure the messages are added correctly in the first session
assert len(sql_history.messages) == 3, "waat"
assert sql_history.messages[0].content == "Hello!"
assert sql_history.messages[1].content == "Hi there!"
assert sql_history.messages[2].content == "Whats cracking?"
# second session
other_history.add_user_message("Hellox")
assert len(other_history.messages) == 1
assert len(sql_history.messages) == 3
assert other_history.messages[0].content == "Hellox"
assert sql_history.messages[0].content == "Hello!"
assert sql_history.messages[1].content == "Hi there!"
assert sql_history.messages[2].content == "Whats cracking?"
def test_clear_messages(
sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory]
) -> None:
sql_history, other_history = sql_histories
sql_history.add_user_message("Hello!")
sql_history.add_ai_message("Hi there!")
assert len(sql_history.messages) == 2
# Now create another history with different session id
other_history.add_user_message("Hellox")
assert len(other_history.messages) == 1
assert len(sql_history.messages) == 2
# Now clear the first history
sql_history.clear()
assert len(sql_history.messages) == 0
assert len(other_history.messages) == 1
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~retrievers~self_query~qdrant.py | """Logic for converting internal query language to a valid Qdrant query."""
from __future__ import annotations
from typing import TYPE_CHECKING, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from qdrant_client.http import models as rest
class QdrantTranslator(Visitor):
"""Logic for converting internal query language elements to valid filters."""
def __init__(self, metadata_key: str):
self.metadata_key = metadata_key
def visit_operation(self, operation: Operation) -> rest.Filter:
from qdrant_client.http import models as rest
args = [arg.accept(self) for arg in operation.arguments]
operator = {
Operator.AND: "must",
Operator.OR: "should",
Operator.NOT: "must_not",
}[operation.operator]
return rest.Filter(**{operator: args})
def visit_comparison(self, comparison: Comparison) -> rest.FieldCondition:
from qdrant_client.http import models as rest
self._validate_func(comparison.comparator)
attribute = self.metadata_key + "." + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(
key=attribute, match=rest.MatchValue(value=comparison.value)
)
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {"filter": filter}
return structured_query.query, kwargs
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~utilities~test_jira_api.py | """Integration test for JIRA API Wrapper."""
from langchain.utilities.jira import JiraAPIWrapper
def test_search() -> None:
"""Test for Searching issues on JIRA"""
jql = "project = TP"
jira = JiraAPIWrapper()
output = jira.run("jql", jql)
assert "issues" in output
def test_getprojects() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
output = jira.run("get_projects", "")
assert "projects" in output
def test_create_ticket() -> None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description",'
' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper()
output = jira.run("create_issue", issue_string)
assert "id" in output
assert "key" in output
def test_create_confluence_page() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
create_page_dict = (
'{"space": "ROC", "title":"This is the title",'
'"body":"This is the body. You can use '
'<strong>HTML tags</strong>!"}'
)
output = jira.run("create_page", create_page_dict)
assert "type" in output
assert "page" in output
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~evaluation~qa~eval_chain.py | """LLM Chain specifically for evaluating question answering."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from langchain import PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT
def _parse_string_eval_output(text: str) -> dict:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
splits = text.strip().rsplit("\n", maxsplit=1)
if len(splits) == 1:
verdict = splits[0]
reasoning = None
else:
reasoning, verdict = splits
reasoning = reasoning.strip()
score = (
1
if verdict.upper() == "CORRECT"
else (0 if verdict.upper() == "INCORRECT" else None)
)
return {
"reasoning": reasoning,
"value": verdict,
"score": score,
}
class QAEvalChain(LLMChain):
"""LLM Chain specifically for evaluating question answering."""
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, prompt: PromptTemplate = PROMPT, **kwargs: Any
) -> QAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
expected_input_vars = {"query", "answer", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: Sequence[dict],
predictions: Sequence[dict],
question_key: str = "query",
answer_key: str = "answer",
prediction_key: str = "result",
*,
callbacks: Callbacks = None,
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"answer": example[answer_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs, callbacks=callbacks)
def evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): the LLM or chain prediction to evaluate.
reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
**kwargs: additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
result = self.evaluate(
examples=[{"query": input, "answer": reference}],
predictions=[{"result": prediction}],
callbacks=callbacks,
)[0]
return _parse_string_eval_output(result["text"])
async def aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
result = await self.acall(
inputs={"query": input, "answer": reference, "result": prediction},
callbacks=callbacks,
)
return _parse_string_eval_output(result["text"])
class ContextQAEvalChain(LLMChain):
"""LLM Chain specifically for evaluating QA w/o GT based on context"""
@classmethod
def _validate_input_vars(cls, prompt: PromptTemplate) -> None:
expected_input_vars = {"query", "context", "result"}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt.input_variables}"
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: PromptTemplate = CONTEXT_PROMPT,
**kwargs: Any,
) -> ContextQAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'query', 'context' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
ContextQAEvalChain: the loaded QA eval chain.
"""
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
def evaluate(
self,
examples: List[dict],
predictions: List[dict],
question_key: str = "query",
context_key: str = "context",
prediction_key: str = "result",
*,
callbacks: Callbacks = None,
) -> List[dict]:
"""Evaluate question answering examples and predictions."""
inputs = [
{
"query": example[question_key],
"context": example[context_key],
"result": predictions[i][prediction_key],
}
for i, example in enumerate(examples)
]
return self.apply(inputs, callbacks=callbacks)
def evaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
**kwargs: Any,
) -> dict:
result = self.evaluate(
examples=[{"query": input, "context": reference}],
predictions=[{"result": prediction}],
callbacks=kwargs.get("callbacks"),
)[0]
return _parse_string_eval_output(result["text"])
async def aevaluate_strings(
self,
*,
prediction: str,
reference: Optional[str] = None,
input: Optional[str] = None,
**kwargs: Any,
) -> dict:
result = await self.acall(
inputs={"query": input, "context": reference, "result": prediction},
callbacks=kwargs.get("callbacks"),
)
return _parse_string_eval_output(result["text"])
class CotQAEvalChain(ContextQAEvalChain):
"""LLM Chain specifically for evaluating QA using chain of thought reasoning."""
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, prompt: PromptTemplate = COT_PROMPT, **kwargs: Any
) -> CotQAEvalChain:
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~test_document_transformers.py | """Unit tests for document transformers."""
from langchain.document_transformers import _filter_similar_embeddings
from langchain.math_utils import cosine_similarity
def test__filter_similar_embeddings() -> None:
threshold = 0.79
embedded_docs = [[1.0, 2.0], [1.0, 2.0], [2.0, 1.0], [2.0, 0.5], [0.0, 0.0]]
expected = [1, 3, 4]
actual = _filter_similar_embeddings(embedded_docs, cosine_similarity, threshold)
assert expected == actual
def test__filter_similar_embeddings_empty() -> None:
assert len(_filter_similar_embeddings([], cosine_similarity, 0.0)) == 0
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_elasticsearch.py | """Test ElasticSearch functionality."""
import logging
import os
import uuid
from typing import Generator, List, Union
import pytest
from elasticsearch import Elasticsearch
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
class TestElasticsearch:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the elasticsearch url."""
url = "http://localhost:9200"
yield url
es = Elasticsearch(hosts=url)
# Clear all indexes
index_names = es.indices.get(index="_all").keys()
for index_name in index_names:
# print(index_name)
es.indices.delete(index=index_name)
def test_similarity_search_without_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_ssl_verify(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with ssl verify."""
ssl_verify = {
"verify_certs": True,
"basic_auth": ("ES_USER", "ES_PASSWORD"),
"ca_certs": "ES_CA_CERTS_PATH",
}
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
elasticsearch_url=elasticsearch_url,
ssl_verify=ssl_verify,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
elasticsearch_url=elasticsearch_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_default_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
)
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_add_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'add_documents'."""
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch(
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
elastic_vector_search.add_documents(documents)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
def test_custom_index_add_documents_to_exists_store(self) -> None:
# TODO: implement it
pass
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~llms~test_baseten.py | """Test Baseten API wrapper."""
import os
import baseten
import pytest
from langchain.llms.baseten import Baseten
@pytest.mark.requires(baseten)
def test_baseten_call() -> None:
"""Test valid call to Baseten."""
baseten.login(os.environ["BASETEN_API_KEY"])
llm = Baseten(model=os.environ["BASETEN_MODEL_ID"])
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~embeddings~vertexai.py | """Wrapper around Google VertexAI embedding models."""
from typing import Dict, List
from pydantic import root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.vertexai import _VertexAICommon
from langchain.utilities.vertexai import raise_vertex_import_error
class VertexAIEmbeddings(_VertexAICommon, Embeddings):
model_name: str = "textembedding-gecko"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
from vertexai.preview.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
return values
def embed_documents(
self, texts: List[str], batch_size: int = 5
) -> List[List[float]]:
"""Embed a list of strings. Vertex AI currently
sets a max batch size of 5 strings.
Args:
texts: List[str] The list of strings to embed.
batch_size: [int] The batch size of embeddings to send to the model
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for batch in range(0, len(texts), batch_size):
text_batch = texts[batch : batch + batch_size]
embeddings_batch = self.client.get_embeddings(text_batch)
embeddings.extend([el.values for el in embeddings_batch])
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = self.client.get_embeddings([text])
return embeddings[0].values
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~memory~test_mongodb.py | import json
import os
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import MongoDBChatMessageHistory
from langchain.schema.messages import _message_to_dict
# Replace these with your mongodb connection string
connection_string = os.environ["MONGODB_CONNECTION_STRING"]
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup MongoDB as a message store
message_history = MongoDBChatMessageHistory(
connection_string=connection_string, session_id="test-session"
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Azure Cosmos DB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def poetry_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)["tool"]["poetry"]
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = poetry_conf["dependencies"]
required_dependencies = [
package_name
for package_name, requirements in dependencies.items()
if isinstance(requirements, str) or not requirements.get("optional", False)
]
assert sorted(required_dependencies) == [
"PyYAML",
"SQLAlchemy",
"aiohttp",
"async-timeout",
"dataclasses-json",
"langchainplus-sdk",
"numexpr",
"numpy",
"openapi-schema-pydantic",
"pydantic",
"python",
"requests",
"tenacity",
]
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
assert test_group_deps == [
"duckdb-engine",
"freezegun",
"lark",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"responses",
"syrupy",
]
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
from langchain.callbacks import OpenAICallbackHandler # noqa: F401
from langchain.chains import LLMChain # noqa: F401
from langchain.chat_models import ChatOpenAI # noqa: F401
from langchain.document_loaders import BSHTMLLoader # noqa: F401
from langchain.embeddings import OpenAIEmbeddings # noqa: F401
from langchain.llms import OpenAI # noqa: F401
from langchain.prompts import BasePromptTemplate # noqa: F401
from langchain.retrievers import VespaRetriever # noqa: F401
from langchain.tools import DuckDuckGoSearchResults # noqa: F401
from langchain.utilities import SerpAPIWrapper # noqa: F401
from langchain.vectorstores import FAISS # noqa: F401
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~vectorstores~redis.py | """Wrapper around Redis vector database."""
from __future__ import annotations
import json
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
)
import numpy as np
from pydantic import BaseModel, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from redis.client import Redis as RedisType
from redis.commands.search.query import Query
# required modules
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20400},
{"name": "searchlight", "ver": 20400},
]
# distance mmetrics
REDIS_DISTANCE_METRICS = Literal["COSINE", "IP", "L2"]
def _check_redis_module_exist(client: RedisType, required_modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in required_modules:
if module["name"] in installed_modules and int(
installed_modules[module["name"]][b"ver"]
) >= int(module["ver"]):
return
# otherwise raise error
error_message = (
"Redis cannot be used as a vector database without RediSearch >=2.4"
"Please head to https://redis.io/docs/stack/search/quick_start/"
"to know more about installing the RediSearch module within Redis Stack."
)
logging.error(error_message)
raise ValueError(error_message)
def _check_index_exists(client: RedisType, index_name: str) -> bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except: # noqa: E722
logger.info("Index does not exist")
return False
logger.info("Index already exists")
return True
def _redis_key(prefix: str) -> str:
"""Redis key schema for a given prefix."""
return f"{prefix}:{uuid.uuid4().hex}"
def _redis_prefix(index_name: str) -> str:
"""Redis key prefix for a given index."""
return f"doc:{index_name}"
def _default_relevance_score(val: float) -> float:
return 1 - val
class Redis(VectorStore):
"""Wrapper around Redis vector database.
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Redis(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_relevance_score,
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis>=4.1.0`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
# connect to redis from url
redis_client = redis.from_url(redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
self.client = redis_client
self.content_key = content_key
self.metadata_key = metadata_key
self.vector_key = vector_key
self.relevance_score_fn = relevance_score_fn
def _create_index(
self, dim: int = 1536, distance_metric: REDIS_DISTANCE_METRICS = "COSINE"
) -> None:
try:
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Check if index exists
if not _check_index_exists(self.client, self.index_name):
# Define schema
schema = (
TextField(name=self.content_key),
TextField(name=self.metadata_key),
VectorField(
self.vector_key,
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
},
),
)
prefix = _redis_prefix(self.index_name)
# Create Redis Index
self.client.ft(self.index_name).create_index(
fields=schema,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (List[str]) or ids (List[str]): Identifiers of entries.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
"""
ids = []
prefix = _redis_prefix(self.index_name)
# Get keys or ids from kwargs
# Other vectorstores use ids
keys_or_ids = kwargs.get("keys", kwargs.get("ids"))
# Write data to redis
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
# Use provided values by default or fallback
key = keys_or_ids[i] if keys_or_ids else _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
embedding = embeddings[i] if embeddings else self.embedding_function(text)
pipeline.hset(
key,
mapping={
self.content_key: text,
self.vector_key: np.array(embedding, dtype=np.float32).tobytes(),
self.metadata_key: json.dumps(metadata),
},
)
ids.append(key)
# Write batch
if i % batch_size == 0:
pipeline.execute()
# Cleanup final batch
pipeline.execute()
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def similarity_search_limit_score(
self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, score in docs_and_scores if score < score_threshold]
def _prepare_query(self, k: int) -> Query:
try:
from redis.commands.search.query import Query
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Prepare the Query
hybrid_fields = "*"
base_query = (
f"{hybrid_fields}=>[KNN {k} @{self.vector_key} $vector AS vector_score]"
)
return_fields = [self.metadata_key, self.content_key, "vector_score"]
return (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Creates Redis query
redis_query = self._prepare_query(k)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding) # type: ignore
.astype(dtype=np.float32)
.tobytes()
}
# Perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)
# Prepare document results
docs = [
(
Document(
page_content=result.content, metadata=json.loads(result.metadata)
),
float(result.vector_score),
)
for result in results.docs
]
return docs
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
if self.relevance_score_fn is None:
raise ValueError(
"relevance_score_fn must be provided to"
" Redis constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
@classmethod
def from_texts_return_keys(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
distance_metric: REDIS_DISTANCE_METRICS = "COSINE",
**kwargs: Any,
) -> Tuple[Redis, List[str]]:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
4. Returns the keys of the newly created documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch, keys = RediSearch.from_texts_return_keys(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if "redis_url" in kwargs:
kwargs.pop("redis_url")
# Name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
# Create instance
instance = cls(
redis_url,
index_name,
embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
# Create the search index
instance._create_index(dim=len(embeddings[0]), distance_metric=distance_metric)
# Add data to Redis
keys = instance.add_texts(texts, metadatas, embeddings)
return instance, keys
@classmethod
def from_texts(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
instance, _ = cls.from_texts_return_keys(
texts,
embedding,
metadatas=metadatas,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
return instance
@staticmethod
def delete(
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> bool:
"""
Delete a Redis entry.
Args:
ids: List of ids (keys) to delete.
Returns:
bool: Whether or not the deletions were successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if ids is None:
raise ValueError("'ids' (keys)() were not provided.")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.delete(*ids)
logger.info("Entries deleted")
return True
except: # noqa: E722
# ids does not exist
return False
@staticmethod
def drop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.ft(index_name).dropindex(delete_documents)
logger.info("Drop index")
return True
except: # noqa: E722
# Index not exist
return False
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
# ensure that the index already exists
assert _check_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(
redis_url,
index_name,
embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> RedisVectorStoreRetriever:
return RedisVectorStoreRetriever(vectorstore=self, **kwargs)
class RedisVectorStoreRetriever(VectorStoreRetriever, BaseModel):
vectorstore: Redis
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "similarity_limit"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
elif self.search_type == "similarity_limit":
docs = self.vectorstore.similarity_search_limit_score(
query, k=self.k, score_threshold=self.score_threshold
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError("RedisVectorStoreRetriever does not support async")
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
return await self.vectorstore.aadd_documents(documents, **kwargs)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_deeplake.py | """Test Deep Lake functionality."""
import deeplake
import pytest
from pytest import FixtureRequest
from langchain.docstore.document import Document
from langchain.vectorstores import DeepLake
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def deeplake_datastore() -> DeepLake:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path",
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddings(),
)
return docsearch
@pytest.fixture(params=["L1", "L2", "max", "cos"])
def distance_metric(request: FixtureRequest) -> str:
return request.param
def test_deeplake() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_deeplake_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_deeplakewith_persistence() -> None:
"""Test end to end construction and search, with persistence."""
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.delete_dataset()
# Persist doesn't need to be called again
# Data will be automatically persisted on object deletion
# Or on program exit
def test_deeplake_overwrite_flag() -> None:
"""Test overwrite behavior"""
dataset_path = "./tests/persist_dir"
if deeplake.exists(dataset_path):
deeplake.delete(dataset_path)
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path=dataset_path,
texts=texts,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (implicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with no overwrite (explicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=False,
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory, with overwrite
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
with pytest.raises(ValueError):
output = docsearch.similarity_search("foo", k=1)
def test_similarity_search(deeplake_datastore: DeepLake, distance_metric: str) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo", k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
deeplake_datastore.delete_dataset()
def test_similarity_search_by_vector(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search by vector."""
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.similarity_search_by_vector(
embeddings[1], k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_similarity_search_with_score(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search with score."""
output, score = deeplake_datastore.similarity_search_with_score(
"foo", k=1, distance_metric=distance_metric
)[0]
assert output == Document(page_content="foo", metadata={"page": "0"})
if distance_metric == "cos":
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
def test_similarity_search_with_filter(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo",
k=1,
distance_metric=distance_metric,
filter={"metadata": {"page": "1"}},
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_max_marginal_relevance_search(deeplake_datastore: DeepLake) -> None:
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search("foo", k=1, fetch_k=2)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(
embeddings[0], k=1, fetch_k=2
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_ids(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()["value"][0]
deeplake_datastore.delete(ids=[id])
assert (
deeplake_datastore.similarity_search(
"foo", k=1, filter={"metadata": {"page": "0"}}
)
== []
)
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_filter(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
deeplake_datastore.delete(filter={"metadata": {"page": "1"}})
assert (
deeplake_datastore.similarity_search(
"bar", k=1, filter={"metadata": {"page": "1"}}
)
== []
)
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
def test_delete_by_path(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~document_loaders~blob_loaders~youtube_audio.py | from typing import Iterable, List
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader
class YoutubeAudioLoader(BlobLoader):
"""Load YouTube urls as audio file(s)."""
def __init__(self, urls: List[str], save_dir: str):
if not isinstance(urls, list):
raise TypeError("urls must be a list")
self.urls = urls
self.save_dir = save_dir
def yield_blobs(self) -> Iterable[Blob]:
"""Yield audio blobs for each url."""
try:
import yt_dlp
except ImportError:
raise ValueError(
"yt_dlp package not found, please install it with "
"`pip install yt_dlp`"
)
# Use yt_dlp to download audio given a YouTube url
ydl_opts = {
"format": "m4a/bestaudio/best",
"noplaylist": True,
"outtmpl": self.save_dir + "/%(title)s.%(ext)s",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "m4a",
}
],
}
for url in self.urls:
# Download file
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download(url)
# Yield the written blobs
loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a")
for blob in loader.yield_blobs():
yield blob
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~llms~test_mosaicml.py | """Test MosaicML API wrapper."""
import pytest
from langchain.llms.mosaicml import PROMPT_FOR_GENERATION_FORMAT, MosaicML
def test_mosaicml_llm_call() -> None:
"""Test valid call to MosaicML."""
llm = MosaicML(model_kwargs={})
output = llm("Say foo:")
assert isinstance(output, str)
def test_mosaicml_endpoint_change() -> None:
"""Test valid call to MosaicML."""
new_url = "https://models.hosted-on.mosaicml.hosting/dolly-12b/v1/predict"
llm = MosaicML(endpoint_url=new_url)
assert llm.endpoint_url == new_url
output = llm("Say foo:")
assert isinstance(output, str)
def test_mosaicml_extra_kwargs() -> None:
llm = MosaicML(model_kwargs={"max_new_tokens": 1})
assert llm.model_kwargs == {"max_new_tokens": 1}
output = llm("Say foo:")
assert isinstance(output, str)
# should only generate one new token (which might be a new line or whitespace token)
assert len(output.split()) <= 1
def test_instruct_prompt() -> None:
"""Test instruct prompt."""
llm = MosaicML(inject_instruction_format=True, model_kwargs={"do_sample": False})
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
output = llm(prompt)
assert isinstance(output, str)
def test_retry_logic() -> None:
"""Tests that two queries (which would usually exceed the rate limit) works"""
llm = MosaicML(inject_instruction_format=True, model_kwargs={"do_sample": False})
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
output = llm(prompt)
assert isinstance(output, str)
output = llm(prompt)
assert isinstance(output, str)
def test_short_retry_does_not_loop() -> None:
"""Tests that two queries with a short retry sleep does not infinite loop"""
llm = MosaicML(
inject_instruction_format=True,
model_kwargs={"do_sample": False},
retry_sleep=0.1,
)
instruction = "Repeat the word foo"
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
with pytest.raises(
ValueError,
match="Error raised by inference API: Rate limit exceeded: 1 per 1 second",
):
output = llm(prompt)
assert isinstance(output, str)
output = llm(prompt)
assert isinstance(output, str)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~embeddings~elasticsearch.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from langchain.embeddings.base import Embeddings
class ElasticsearchEmbeddings(Embeddings):
"""
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
@classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
@classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~model_laboratory.py | """Experiment with different models."""
from __future__ import annotations
from typing import List, Optional, Sequence
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.input import get_color_mapping, print_text
from langchain.llms.base import BaseLLM
from langchain.prompts.prompt import PromptTemplate
class ModelLaboratory:
"""Experiment with different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None):
"""Initialize with chains to experiment with.
Args:
chains: list of chains to experiment with.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n")
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
| [
"_input",
"{_input}"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_opensearch.py | """Test OpenSearch functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.opensearch_vector_search import (
PAINLESS_SCRIPTING_SEARCH,
SCRIPT_SCORING_SEARCH,
OpenSearchVectorSearch,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
DEFAULT_OPENSEARCH_URL = "http://localhost:9200"
texts = ["foo", "bar", "baz"]
def test_opensearch() -> None:
"""Test end to end indexing and search using Approximate Search."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_score() -> None:
"""Test similarity search with score using Approximate Search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search_with_score("foo", k=2)
assert output == [
(Document(page_content="foo", metadata={"page": 0}), 1.0),
(Document(page_content="bar", metadata={"page": 1}), 0.5),
]
def test_opensearch_with_custom_field_name() -> None:
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
vector_field="my_vector",
text_field="custom_text",
)
output = docsearch.similarity_search(
"foo", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
text_input = ["test", "add", "text", "method"]
OpenSearchVectorSearch.add_texts(
docsearch, text_input, vector_field="my_vector", text_field="custom_text"
)
output = docsearch.similarity_search(
"add", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
def test_opensearch_with_metadatas() -> None:
"""Test end to end indexing and search with metadata."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_add_text() -> None:
"""Test adding additional text elements to existing index."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
docids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas)
assert len(docids) == len(text_input)
def test_opensearch_script_scoring() -> None:
"""Test end to end indexing and search using Script Scoring Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "bar"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="bar")]
def test_add_text_script_scoring() -> None:
"""Test adding additional text elements and validating using Script Scoring."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=SCRIPT_SCORING_SEARCH, space_type="innerproduct"
)
assert output == [Document(page_content="test")]
def test_opensearch_painless_scripting() -> None:
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "baz"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="baz")]
def test_add_text_painless_scripting() -> None:
"""Test adding additional text elements and validating using Painless Scripting."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, space_type="cosineSimilarity"
)
assert output == [Document(page_content="test")]
def test_opensearch_invalid_search_type() -> None:
"""Test to validate similarity_search by providing invalid search_type."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
with pytest.raises(ValueError):
docsearch.similarity_search("foo", k=1, search_type="invalid_search_type")
def test_opensearch_embedding_size_zero() -> None:
"""Test to validate indexing when embedding size is zero."""
with pytest.raises(RuntimeError):
OpenSearchVectorSearch.from_texts(
[], FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
def test_appx_search_with_boolean_filter() -> None:
"""Test Approximate Search with Boolean Filter."""
boolean_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search(
"foo", k=3, boolean_filter=boolean_filter_val, subquery_clause="should"
)
assert output == [Document(page_content="bar")]
def test_appx_search_with_lucene_filter() -> None:
"""Test Approximate Search with Lucene Filter."""
lucene_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
output = docsearch.similarity_search("foo", k=3, lucene_filter=lucene_filter_val)
assert output == [Document(page_content="bar")]
def test_opensearch_with_custom_field_name_appx_true() -> None:
"""Test Approximate Search with custom field name appx true."""
text_input = ["add", "test", "text", "method"]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=True,
)
output = docsearch.similarity_search("add", k=1)
assert output == [Document(page_content="add")]
def test_opensearch_with_custom_field_name_appx_false() -> None:
"""Test Approximate Search with custom field name appx true."""
text_input = ["add", "test", "text", "method"]
docsearch = OpenSearchVectorSearch.from_texts(
text_input, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
output = docsearch.similarity_search("add", k=1)
assert output == [Document(page_content="add")]
| [] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~test_math_utils.py | """Test math utility functions."""
from typing import List
import numpy as np
import pytest
from langchain.math_utils import cosine_similarity, cosine_similarity_top_k
@pytest.fixture
def X() -> List[List[float]]:
return [[1.0, 2.0, 3.0], [0.0, 1.0, 0.0], [1.0, 2.0, 0.0]]
@pytest.fixture
def Y() -> List[List[float]]:
return [[0.5, 1.0, 1.5], [1.0, 0.0, 0.0], [2.0, 5.0, 2.0], [0.0, 0.0, 0.0]]
def test_cosine_similarity_zero() -> None:
X = np.zeros((3, 3))
Y = np.random.random((3, 3))
expected = np.zeros((3, 3))
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
def test_cosine_similarity_identity() -> None:
X = np.random.random((4, 4))
expected = np.ones(4)
actual = np.diag(cosine_similarity(X, X))
assert np.allclose(expected, actual)
def test_cosine_similarity_empty() -> None:
empty_list: List[List[float]] = []
assert len(cosine_similarity(empty_list, empty_list)) == 0
assert len(cosine_similarity(empty_list, np.random.random((3, 3)))) == 0
def test_cosine_similarity(X: List[List[float]], Y: List[List[float]]) -> None:
expected = [
[1.0, 0.26726124, 0.83743579, 0.0],
[0.53452248, 0.0, 0.87038828, 0.0],
[0.5976143, 0.4472136, 0.93419873, 0.0],
]
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
def test_cosine_similarity_top_k(X: List[List[float]], Y: List[List[float]]) -> None:
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2), (2, 0)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579, 0.5976143]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
def test_cosine_similarity_score_threshold(
X: List[List[float]], Y: List[List[float]]
) -> None:
expected_idxs = [(0, 0), (2, 2)]
expected_scores = [1.0, 0.93419873]
actual_idxs, actual_scores = cosine_similarity_top_k(
X, Y, top_k=None, score_threshold=0.9
)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
def test_cosine_similarity_top_k_and_score_threshold(
X: List[List[float]], Y: List[List[float]]
) -> None:
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, score_threshold=0.8)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_azuresearch.py | import os
import time
import openai
import pytest
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.azuresearch import AzureSearch
load_dotenv()
# Azure OpenAI settings
openai.api_type = "azure"
openai.api_base = os.getenv("OPENAI_API_BASE", "")
openai.api_version = "2023-05-15"
openai.api_key = os.getenv("OPENAI_API_KEY", "")
model: str = os.getenv("OPENAI_EMBEDDINGS_ENGINE_DOC", "text-embedding-ada-002")
# Vector store settings
vector_store_address: str = os.getenv("AZURE_SEARCH_ENDPOINT", "")
vector_store_password: str = os.getenv("AZURE_SEARCH_ADMIN_KEY", "")
index_name: str = "embeddings-vector-store-test"
@pytest.fixture
def similarity_search_test() -> None:
"""Test end to end construction and search."""
# Create Embeddings
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
# Create Vector store
vector_store: AzureSearch = AzureSearch(
azure_search_endpoint=vector_store_address,
azure_search_key=vector_store_password,
index_name=index_name,
embedding_function=embeddings.embed_query,
)
# Add texts to vector store and perform a similarity search
vector_store.add_texts(
["Test 1", "Test 2", "Test 3"],
[
{"title": "Title 1", "any_metadata": "Metadata 1"},
{"title": "Title 2", "any_metadata": "Metadata 2"},
{"title": "Title 3", "any_metadata": "Metadata 3"},
],
)
time.sleep(1)
res = vector_store.similarity_search(query="Test 1", k=3)
assert len(res) == 3
def from_text_similarity_search_test() -> None:
"""Test end to end construction and search."""
# Create Embeddings
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
# Create Vector store
vector_store: AzureSearch = AzureSearch.from_texts(
azure_search_endpoint=vector_store_address,
azure_search_key=vector_store_password,
index_name=index_name,
texts=["Test 1", "Test 2", "Test 3"],
embedding=embeddings,
)
time.sleep(1)
# Perform a similarity search
res = vector_store.similarity_search(query="Test 1", k=3)
assert len(res) == 3
def test_semantic_hybrid_search() -> None:
"""Test end to end construction and search."""
# Create Embeddings
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
# Create Vector store
vector_store: AzureSearch = AzureSearch(
azure_search_endpoint=vector_store_address,
azure_search_key=vector_store_password,
index_name=index_name,
embedding_function=embeddings.embed_query,
semantic_configuration_name="default",
)
# Add texts to vector store and perform a semantic hybrid search
vector_store.add_texts(
["Test 1", "Test 2", "Test 3"],
[
{"title": "Title 1", "any_metadata": "Metadata 1"},
{"title": "Title 2", "any_metadata": "Metadata 2"},
{"title": "Title 3", "any_metadata": "Metadata 3"},
],
)
time.sleep(1)
res = vector_store.semantic_hybrid_search(query="What's Azure Search?", k=3)
assert len(res) == 3
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~embeddings~test_self_hosted.py | """Test self-hosted embeddings."""
from typing import Any
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
gpu.install_packages(["pip:./"])
return gpu
def test_self_hosted_huggingface_embedding_documents() -> None:
"""Test self-hosted huggingface embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_embedding_query() -> None:
"""Test self-hosted huggingface embeddings."""
document = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_query(document)
assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_instructor_embedding_query() -> None:
"""Test self-hosted huggingface instruct embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_query(query)
assert len(output) == 768
def get_pipeline() -> Any:
"""Get pipeline for testing."""
model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
def inference_fn(pipeline: Any, prompt: str) -> Any:
"""Inference function for testing."""
# Return last hidden state of the model
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
def test_self_hosted_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
def test_self_hosted_embedding_query() -> None:
"""Test self-hosted custom embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_query(query)
assert len(output) == 50265
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~agents~agent_toolkits~azure_cognitive_services~toolkit.py | from __future__ import annotations
import sys
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
)
from langchain.tools.base import BaseTool
class AzureCognitiveServicesToolkit(BaseToolkit):
"""Toolkit for Azure Cognitive Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools = [
AzureCogsFormRecognizerTool(),
AzureCogsSpeech2TextTool(),
AzureCogsText2SpeechTool(),
]
# TODO: Remove check once azure-ai-vision supports MacOS.
if sys.platform.startswith("linux") or sys.platform.startswith("win"):
tools.append(AzureCogsImageAnalysisTool())
return tools
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~document_loaders~test_blockchain.py | import os
import time
import pytest
from langchain.document_loaders import BlockchainDocumentLoader
from langchain.document_loaders.blockchain import BlockchainType
if "ALCHEMY_API_KEY" in os.environ:
alchemyKeySet = True
apiKey = os.environ["ALCHEMY_API_KEY"]
else:
alchemyKeySet = False
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_valid_contract() -> None:
max_alchemy_tokens = 100
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # CoolCats contract address
)
result = BlockchainDocumentLoader(contract_address).load()
print("Tokens returend for valid contract: ", len(result))
assert len(result) == max_alchemy_tokens, (
f"Wrong number of NFTs returned. "
f"Expected {max_alchemy_tokens}, got {len(result)}"
)
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_with_pagination() -> None:
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # CoolCats contract address
)
startToken = "0x0000000000000000000000000000000000000000000000000000000000000077"
result = BlockchainDocumentLoader(
contract_address,
BlockchainType.ETH_MAINNET,
api_key=apiKey,
startToken=startToken,
).load()
print("Tokens returend for contract with offset: ", len(result))
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_polygon() -> None:
contract_address = (
"0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9" # Polygon contract address
)
result = BlockchainDocumentLoader(
contract_address, BlockchainType.POLYGON_MAINNET
).load()
print("Tokens returend for contract on Polygon: ", len(result))
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_nfts_invalid_contract() -> None:
contract_address = (
"0x111D4e82EA7eCA7F62c3fdf6D39A541be95Bf111" # Invalid contract address
)
with pytest.raises(ValueError) as error_NoNfts:
BlockchainDocumentLoader(contract_address).load()
assert (
str(error_NoNfts.value)
== "No NFTs found for contract address " + contract_address
)
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_all() -> None:
start_time = time.time()
contract_address = (
"0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9" # Polygon contract address
)
result = BlockchainDocumentLoader(
contract_address=contract_address,
blockchainType=BlockchainType.POLYGON_MAINNET,
api_key=os.environ["ALCHEMY_API_KEY"],
startToken="100",
get_all_tokens=True,
).load()
end_time = time.time()
print(
f"Tokens returned for {contract_address} "
f"contract: {len(result)} in {end_time - start_time} seconds"
)
assert len(result) > 0, "No NFTs returned"
@pytest.mark.skipif(not alchemyKeySet, reason="Alchemy API key not provided.")
def test_get_all_10sec_timeout() -> None:
start_time = time.time()
contract_address = (
"0x1a92f7381b9f03921564a437210bb9396471050c" # Cool Cats contract address
)
with pytest.raises(RuntimeError):
BlockchainDocumentLoader(
contract_address=contract_address,
blockchainType=BlockchainType.ETH_MAINNET,
api_key=os.environ["ALCHEMY_API_KEY"],
get_all_tokens=True,
max_execution_time=10,
).load()
end_time = time.time()
print("Execution took ", end_time - start_time, " seconds")
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~evaluation~qa~generate_chain.py | """LLM Chain specifically for generating examples for question answering."""
from __future__ import annotations
from typing import Any
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.generate_prompt import PROMPT
class QAGenerateChain(LLMChain):
"""LLM Chain specifically for generating examples for question answering."""
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""
return cls(llm=llm, prompt=PROMPT, **kwargs)
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~memory~test_cosmos_db.py | import json
import os
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import CosmosDBChatMessageHistory
from langchain.schema.messages import _message_to_dict
# Replace these with your Azure Cosmos DB endpoint and key
endpoint = os.environ["COSMOS_DB_ENDPOINT"]
credential = os.environ["COSMOS_DB_KEY"]
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup Azure Cosmos DB as a message store
message_history = CosmosDBChatMessageHistory(
cosmos_endpoint=endpoint,
cosmos_database="chat_history",
cosmos_container="messages",
credential=credential,
session_id="my-test-session",
user_id="my-test-user",
ttl=10,
)
message_history.prepare_cosmos()
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from Azure Cosmos DB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~chat_models~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK (that is is the private
preview) and be whitelisted to list the models themselves:
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.25.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
from unittest.mock import Mock, patch
import pytest
from langchain.chat_models import ChatVertexAI
from langchain.chat_models.vertexai import _MessagePair, _parse_chat_history
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
def test_vertexai_single_call() -> None:
model = ChatVertexAI()
message = HumanMessage(content="Hello")
response = model([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
assert model._llm_type == "vertexai"
assert model.model_name == model.client._model_id
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI()
raw_context = (
"My name is Ned. You are my personal assistant. My favorite movies "
"are Lord of the Rings and Hobbit."
)
question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_parse_chat_history_correct() -> None:
text_context = (
"My name is Ned. You are my personal assistant. My "
"favorite movies are Lord of the Rings and Hobbit."
)
context = SystemMessage(content=text_context)
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer])
assert history.system_message == context
assert len(history.history) == 2
assert history.history[0] == _MessagePair(question=question, answer=answer)
def test_parse_chat_history_wrong_sequence() -> None:
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
with pytest.raises(ValueError) as exc_info:
_ = _parse_chat_history([question, question])
assert (
str(exc_info.value)
== "A human message should follow a bot one, got human, human."
)
def test_vertexai_single_call_failes_no_message() -> None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert (
str(exc_info.value)
== "You should provide at least one message to start the chat!"
)
def test_vertexai_args_passed() -> None:
response_text = "Goodbye"
user_prompt = "Hello"
prompt_params = {
"max_output_tokens": 1,
"temperature": 10000.0,
"top_k": 10,
"top_p": 0.5,
}
# Mock the library to ensure the args are passed correctly
with patch(
"vertexai.language_models._language_models.ChatSession.send_message"
) as send_message:
mock_response = Mock(text=response_text)
send_message.return_value = mock_response
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
response = model([message])
assert response.content == response_text
send_message.assert_called_once_with(
user_prompt,
**prompt_params,
)
| [
"{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}",
"Hello"
] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~vectorstores~test_mongodb_atlas.py | """Test MongoDB Atlas Vector Search functionality."""
from __future__ import annotations
import os
from time import sleep
from typing import TYPE_CHECKING
import pytest
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch
if TYPE_CHECKING:
from pymongo import MongoClient
INDEX_NAME = "langchain-test-index"
NAMESPACE = "langchain_test_db.langchain_test_collection"
CONNECTION_STRING = os.environ.get("MONGODB_ATLAS_URI")
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
# Instantiate as constant instead of pytest fixture to prevent needing to make multiple
# connections.
TEST_CLIENT: MongoClient = MongoClient(CONNECTION_STRING)
collection = TEST_CLIENT[DB_NAME][COLLECTION_NAME]
class TestMongoDBAtlasVectorSearch:
@classmethod
def setup_class(cls) -> None:
# insure the test collection is empty
assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501
@classmethod
def teardown_class(cls) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
@pytest.fixture(autouse=True)
def setup(self) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
def test_from_documents(self, embedding_openai: Embeddings) -> None:
"""Test end to end construction and search."""
documents = [
Document(page_content="Dogs are tough.", metadata={"a": 1}),
Document(page_content="Cats have fluff.", metadata={"b": 1}),
Document(page_content="What is a sandwich?", metadata={"c": 1}),
Document(page_content="That fence is purple.", metadata={"d": 1, "e": 2}),
]
vectorstore = MongoDBAtlasVectorSearch.from_documents(
documents,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
def test_from_texts(self, embedding_openai: Embeddings) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"That fence is purple.",
]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
def test_from_texts_with_metadatas(self, embedding_openai: Embeddings) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
def test_from_texts_with_metadatas_and_pre_filter(
self, embedding_openai: Embeddings
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search(
"Sandwich", k=1, pre_filter={"range": {"lte": 0, "path": "c"}}
)
assert output == []
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~tools~gmail~create_draft.py | import base64
from email.message import EmailMessage
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.gmail.base import GmailBaseTool
class CreateDraftSchema(BaseModel):
message: str = Field(
...,
description="The message to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class GmailCreateDraft(GmailBaseTool):
name: str = "create_gmail_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftSchema] = CreateDraftSchema
def _prepare_draft_message(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
) -> dict:
draft_message = EmailMessage()
draft_message.set_content(message)
draft_message["To"] = ", ".join(to)
draft_message["Subject"] = subject
if cc is not None:
draft_message["Cc"] = ", ".join(cc)
if bcc is not None:
draft_message["Bcc"] = ", ".join(bcc)
encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode()
return {"message": {"raw": encoded_message}}
def _run(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
create_message = self._prepare_draft_message(message, to, subject, cc, bcc)
draft = (
self.api_resource.users()
.drafts()
.create(userId="me", body=create_message)
.execute()
)
output = f'Draft created. Draft Id: {draft["id"]}'
return output
except Exception as e:
raise Exception(f"An error occurred: {e}")
async def _arun(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError(f"The tool {self.name} does not support async yet.")
| [] |
2024-01-10 | ktr03rtk/langchain | tests~integration_tests~llms~test_self_hosted_llm.py | """Test Self-hosted LLMs."""
import pickle
from typing import Any, List, Optional
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
model_reqs = ["pip:./", "transformers", "torch"]
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2",
task="text-generation",
model_kwargs={"n_positions": 1024},
hardware=gpu,
model_reqs=model_reqs,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text2text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_summarization() -> None:
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="facebook/bart-large-cnn",
task="summarization",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm("Say foo:")
assert isinstance(output, str)
def load_pipeline() -> Any:
"""Load pipeline for testing."""
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Inference function for testing."""
return pipeline(prompt)[0]["generated_text"]
def test_init_with_local_pipeline() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_path() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_fn() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~tools~playwright~extract_text.py | from __future__ import annotations
from typing import Optional, Type
from pydantic import BaseModel, root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
class ExtractTextTool(BaseBrowserTool):
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = BaseModel
@root_validator
def check_acheck_bs_importrgs(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
| [
"Extract all the text on the current webpage"
] |
2024-01-10 | ktr03rtk/langchain | tests~unit_tests~agents~test_public_api.py | from langchain.agents import __all__ as agents_all
_EXPECTED = [
"Agent",
"AgentExecutor",
"AgentOutputParser",
"AgentType",
"BaseMultiActionAgent",
"BaseSingleActionAgent",
"ConversationalAgent",
"ConversationalChatAgent",
"LLMSingleActionAgent",
"MRKLChain",
"OpenAIFunctionsAgent",
"OpenAIMultiFunctionsAgent",
"ReActChain",
"ReActTextWorldAgent",
"SelfAskWithSearchChain",
"StructuredChatAgent",
"Tool",
"ZeroShotAgent",
"create_csv_agent",
"create_json_agent",
"create_openapi_agent",
"create_pandas_dataframe_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_dataframe_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"get_all_tool_names",
"initialize_agent",
"load_agent",
"load_huggingface_tool",
"load_tools",
"tool",
]
def test_public_api() -> None:
"""Test for regressions or changes in the agents public API."""
assert sorted(agents_all) == sorted(_EXPECTED)
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~utilities~bing_search.py | """Util that calls Bing Search.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
from typing import Dict, List
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
bing_subscription_key: str
bing_search_url: str
k: int = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
self.bing_search_url, headers=headers, params=params # type: ignore
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
# default="https://api.bing.microsoft.com/v7.0/search",
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~vectorstores~tigris.py | from __future__ import annotations
import itertools
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import VectorStore
if TYPE_CHECKING:
from tigrisdb import TigrisClient
from tigrisdb import VectorStore as TigrisVectorStore
from tigrisdb.types.filters import Filter as TigrisFilter
from tigrisdb.types.vector import Document as TigrisDocument
class Tigris(VectorStore):
def __init__(self, client: TigrisClient, embeddings: Embeddings, index_name: str):
"""Initialize Tigris vector store"""
try:
import tigrisdb # noqa: F401
except ImportError:
raise ValueError(
"Could not import tigrisdb python package. "
"Please install it with `pip install tigrisdb`"
)
self._embed_fn = embeddings
self._vector_store = TigrisVectorStore(client.get_search(), index_name)
@property
def search_index(self) -> TigrisVectorStore:
return self._vector_store
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids for documents.
Ids will be autogenerated if not provided.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
docs = self._prep_docs(texts, metadatas, ids)
result = self.search_index.add_documents(docs)
return [r.id for r in result]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[TigrisFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query."""
docs_with_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_with_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[TigrisFilter] = None,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[TigrisFilter]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
vector = self._embed_fn.embed_query(query)
result = self.search_index.similarity_search(
vector=vector, k=k, filter_by=filter
)
docs: List[Tuple[Document, float]] = []
for r in result:
docs.append(
(
Document(
page_content=r.doc["text"], metadata=r.doc.get("metadata")
),
r.score,
)
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
client: Optional[TigrisClient] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> Tigris:
"""Return VectorStore initialized from texts and embeddings."""
if not index_name:
raise ValueError("`index_name` is required")
if not client:
client = TigrisClient()
store = cls(client, embedding, index_name)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return store
def _prep_docs(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]],
ids: Optional[List[str]],
) -> List[TigrisDocument]:
embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts))
docs: List[TigrisDocument] = []
for t, m, e, _id in itertools.zip_longest(
texts, metadatas or [], embeddings or [], ids or []
):
doc: TigrisDocument = {
"text": t,
"embeddings": e or [],
"metadata": m or {},
}
if _id:
doc["id"] = _id
docs.append(doc)
return docs
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~embeddings~modelscope_hub.py | """Wrapper around ModelScopeHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
class ModelScopeEmbeddings(BaseModel, Embeddings):
"""Wrapper around modelscope_hub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id)
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id)
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~docstore~wikipedia.py | """Wrapper around wikipedia API."""
from typing import Union
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
class Wikipedia(Docstore):
"""Wrapper around wikipedia API."""
def __init__(self) -> None:
"""Check that wikipedia package is installed."""
try:
import wikipedia # noqa: F401
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
def search(self, search: str) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
"""
import wikipedia
try:
page_content = wikipedia.page(search).content
url = wikipedia.page(search).url
result: Union[str, Document] = Document(
page_content=page_content, metadata={"page": url}
)
except wikipedia.PageError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
except wikipedia.DisambiguationError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
return result
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~output_parsers~loading.py | from langchain.output_parsers.regex import RegexParser
def load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parsers" in config:
if config["output_parsers"] is not None:
_config = config["output_parsers"]
output_parser_type = _config["_type"]
if output_parser_type == "regex_parser":
output_parser = RegexParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parsers"] = output_parser
return config
| [] |
2024-01-10 | ktr03rtk/langchain | langchain~embeddings~cohere.py | """Wrapper around Cohere embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
class CohereEmbeddings(BaseModel, Embeddings):
"""Wrapper around Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(
model=self.model, texts=[text], truncate=self.truncate
).embeddings[0]
return list(map(float, embedding))
| [] |
2024-01-10 | COS301-SE-2023/AI-Photo-Editor | src~electron~lib~ai~python~models~functions~graphFunc.py | from typing import Optional, Type
from langchain.tools import format_tool_to_openai_function
from pydantic import BaseModel, Field
from typing import Type, List
from langchain.tools.base import BaseTool,ToolException
def _handle_error(error: ToolException) -> str:
return (
"The following errors occurred during tool execution:"
+ error.args[0]
+ "Please try another tool."
)
class addNodeInput(BaseModel):
signature : str = Field(description="Signature/type of the node e.g 'math-plugin.binary', 'math-plugin.unary'")
class removeNodeInput(BaseModel):
id : str = Field(description="id of the node to be deleted e.g '15s2k3', '1m9j0kl'")
class addEdgeInput(BaseModel):
output : str = Field(description="Id of the output anchor connected to the edge. e.g 'l40plq', 'j5nm33'")
input : str = Field(description="Id of the input anchor connected to the edge. e.g 'az22m3', '0lpm5i'")
class removeEdgeInput(BaseModel):
id : str = Field(description="Id of the edge to be removed. e.g '8kn5la', '1m9j0kl'")
class addNodesInput(BaseModel):
signatures: List[str] = Field(
...,
description="List of node signatures to be added to the graph. e.g math.sum, math.divide",
)
class removeNodesInput(BaseModel):
ids: List[str] = Field(
...,
description="List of ids of nodes to be removed. e.g 2de2d4",
)
class addEdgesInput(BaseModel):
edges: List[addEdgeInput] = Field(
...,
description="List of of objects representing edges to be added",
)
class removeEdgesInput(BaseModel):
ids: List[str] = Field(
...,
description="List of ids of edges to be removed. e.g 2de2d4",
)
class Functions:
api = None
class addNodeTool(BaseTool):
"""
Class to represent addNode function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
Methods
-------
run(self,signature)
Pushes the command to add a node to the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "addNode"
description: str = "Adds a new node to the graph"
def _run(
self,
signature: str = "hello-plugin.Jake",
) -> str:
response = Functions.api.commands.addNode(signature)
# Functions.api.logs.append(response["message"] + "\n Parameters: input anchor ids: " + ''.join(response["data"]["inputs"]) +"\n output anchor ids: "+''.join(response["data"]["outputs"])+"\n")
if(response["status"] == "error"):
return response["message"]
else:
return response["message"] + "\n Parameters: input anchor ids: " + ','.join(response["data"]["inputs"]) +"\n output anchor ids: "+','.join(response["data"]["outputs"])+"\n"
async def _arun(
self,
) -> str:
raise NotImplementedError("This tool does not support async execution yet")
args_schema: Optional[Type[BaseModel]] = addNodeInput
class removeNodeTool( BaseTool):
"""
Class to represent removeNode function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
Methods
-------
run(self,signature)
Pushes the command to remove a node from the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "removeNode"
description: str = "Remove a node from the graph"
def _run(
self,
id: str,
) -> str:
Functions.api.logs.append("removeNode command added\n")
return Functions.api.commands.removeNode(id)
async def _arun(
self,
) -> str:
raise NotImplementedError("This tool does not support async execution yet")
args_schema: Optional[Type[BaseModel]] = removeNodeInput
class addEdgeTool( BaseTool):
"""
Class to represent addEdge function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
Methods
-------
run(self,From,To)
Pushes the command to add an edge (from output to input) to the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "addEdge"
description: str = "Adds an edge between an output anchor of a node and an input anchor of another node using these anchor id's"
def _run(
self,
output : str,
input : str,
) -> str:
response = Functions.api.commands.addEdge(output,input)
# Functions.api.logs.append(response["message"] + "\n Parameters: nodeids : " + ''.join(response["data"]["edgeId"]) +"\n")
if(response["status"] == "error"):
return response["message"]
else:
Functions.api.logs.append(response["message"] + "\n Parameters: nodeids : " + ''.join(response["data"]["edgeId"]) +"\n")
return response["message"] + "\n Parameters: nodeids : " + ''.join(response["data"]["edgeId"]) +"\n"
async def _arun(
self,
) -> str:
raise NotImplementedError("This tool does not support async execution yet")
args_schema: Optional[Type[BaseModel]] = addEdgeInput
class removeEdgeTool( BaseTool):
"""
Class to represent removeEdge function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
Methods
-------
run(self,signature)
Pushes the command to remove an Edge from the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "removeEdge"
description: str = "Removes an edge between an output anchor of a node and an input anchor of another node using the edge id"
def _run(
self,
id: str,
) -> str:
Functions.api.logs.append("removeEdge command added\n")
return Functions.api.commands.removeEdge(id)
async def _arun(
self,
) -> str:
raise NotImplementedError("This tool does not support async execution yet")
args_schema: Optional[Type[BaseModel]] = removeEdgeInput
class addNodes(BaseTool):
"""
Class to represent addNodes function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
Methods
-------
run(self,signature)
Pushes the command to add Nodes to the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "addNodes"
description: str = "Add new nodes to the graph"
args_schema: Type[BaseModel] = addNodesInput
handle_tool_error = _handle_error
def _run(self, signatures: List[str]):
# commands.append({ "command": "addNodes", "signatures": signatures})
return "addNodes command added\n"
async def _arun(self) -> str:
raise NotImplementedError("This tool does not support async execution")
class removeNodes(BaseTool):
"""
Class to represent removeNodes function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
args_schema : Type[BaseModel]
handle_tool_error : Callable[[ToolException], str]
Methods
-------
run(self,ids)
Pushes the command to remove Nodes from the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "removeNodes"
description: str = "Remove nodes from the graph"
args_schema: Type[BaseModel] = removeNodesInput
handle_tool_error = _handle_error
def _run(self, ids: List[str]):
# commands.append({ "command": "removeNodes", "ids": ids})
return "removeNodes command added\n"
async def _arun(self) -> str:
raise NotImplementedError("This tool does not support async execution")
class addEdges(BaseTool):
"""
Class to represent addEdges function to language model
...
class addEdgesInput(BaseModel):
edges: List[addEdgeInput] = Field(
...,
description="List of of objects representing edges to be added",
)
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
args_schema : Type[BaseModel]
handle_tool_error : Callable[[ToolException], str]
Methods
-------
run(self,ids)
Pushes the command to addEdges to the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "addEdges"
description: str = (
"Used to connect the output anchor from one node to the input anchor of another node."
)
args_schema: Type[BaseModel] = addEdgesInput
handle_tool_error = _handle_error
def _run(self, edges: List[addEdgeInput]):
# commands.append({ "command": "addEdges", "edges": edges})
return "addEdges command added\n"
async def _arun(self) -> str:
raise NotImplementedError("This tool does not support async execution")
class removeEdges(BaseTool):
"""
Class to represent removeEdges function to language model
...
Attributes
----------
name : str
a formatted string to display the name of the function
description : str
a string that provides a descriptive summary of the function
args_schema : Type[BaseModel]
handle_tool_error : Callable[[ToolException], str]
Methods
-------
run(self,ids)
Pushes the command to removeEdges from the graph to the commands list
_arun(self)
Raises an error as this function does not support async execution
"""
name: str = "removeEdges"
description: str = "Remove edges from the graph"
args_schema: Type[BaseModel] = removeEdgesInput
handle_tool_error = _handle_error
def _run(self, ids: List[str]):
# commands.append({ "command": "removeEdges", "ids": ids})
return "removeEdges command added\n"
async def _arun(self) -> str:
raise NotImplementedError("This tool does not support async execution")
tools = [addNodeTool(),removeNodeTool(),addEdgeTool(),removeEdgeTool()]
# To view the functions as scheme
# def write_dict_to_file(dict, path):
# with open(path, "w") as f:
# f.write(json.dumps(dict, indent=2))
# write_dict_to_file(functions, "functions.json") #This will w
# rite to root [email protected] | [
"Adds a new node to the graph",
"Remove nodes from the graph",
"Remove edges from the graph",
"Removes an edge between an output anchor of a node and an input anchor of another node using the edge id",
"Remove a node from the graph",
"Add new nodes to the graph",
"Adds an edge between an output anchor of a node and an input anchor of another node using these anchor id's"
] |
2024-01-10 | COS301-SE-2023/AI-Photo-Editor | src~electron~lib~ai~python~prompts~generic.py | from langchain.prompts import PromptTemplate
template = """
You are a helpful assistant that can manipulate a graph by calling some functions. You are only allowed to fulfill this role and nothing else.
The graph consist of nodes and edges. Each node executes some sort of operation on the graph as an output.
Each node has input and output anchors that are used to connect edges, when asked to connect nodes, always connect the output anchor of one node to the input anchor of another node.
One output anchor can map to multiple input anchors, but the input anchor can only map to one output anchor.
An edge can ONLY connect anchors that are of the same type.
No input anchor may be used twice, and not all anchors have to always be connected.
Do not tell the user how to use the provided data, instead you must use the data to assist the user.
If you receive an error message, retry with different parameters.
To create a functioning graph the graph must contain at least one input node and one output node that is connected to the graph.
If there is no output node connected to the graph, always add it unless the user specifically asks you not to.
Always add all nodes before adding edges.
Provided is the graph's nodes :
{nodes}
For math nodes create multiple nodes for binary operations, for example to add two numbers, create a node for each number and then connect them to a node that adds them together.
For image manipulation, the image must be connected to type sharp first and then back to image for the output node
Edges can only be connected from the output anchors of one node with the input anchors of another node, provided they are of the same type. Thus no cycles are allowed
Additionally the following edges are provided :
{edges}
The following nodes are relevant to you :
{plugins}
The user provides the following prompt :
{prompt}
"""
prompt_template = PromptTemplate(input_variables=["prompt","nodes","edges","plugins"],template=template) | [
"plugins",
"\nYou are a helpful assistant that can manipulate a graph by calling some functions. You are only allowed to fulfill this role and nothing else.\n\nThe graph consist of nodes and edges. Each node executes some sort of operation on the graph as an output. \nEach node has input and output anchors that are used to connect edges, when asked to connect nodes, always connect the output anchor of one node to the input anchor of another node.\nOne output anchor can map to multiple input anchors, but the input anchor can only map to one output anchor.\nAn edge can ONLY connect anchors that are of the same type.\nNo input anchor may be used twice, and not all anchors have to always be connected.\n\nDo not tell the user how to use the provided data, instead you must use the data to assist the user.\nIf you receive an error message, retry with different parameters.\nTo create a functioning graph the graph must contain at least one input node and one output node that is connected to the graph.\nIf there is no output node connected to the graph, always add it unless the user specifically asks you not to.\n\nAlways add all nodes before adding edges.\n\nProvided is the graph's nodes : \n{nodes}\nFor math nodes create multiple nodes for binary operations, for example to add two numbers, create a node for each number and then connect them to a node that adds them together.\nFor image manipulation, the image must be connected to type sharp first and then back to image for the output node\n\nEdges can only be connected from the output anchors of one node with the input anchors of another node, provided they are of the same type. Thus no cycles are allowed\nAdditionally the following edges are provided :\n{edges}\n\n\nThe following nodes are relevant to you :\n{plugins}\n\nThe user provides the following prompt :\n{prompt}\n\n",
"nodes"
] |
2024-01-10 | AlexApps99/naskar | naskar~clean.py | def clean(gpt, prune=True, log_pruned=True):
'''
takes output from OpenAI and cleans and filters for valid sonnets
'''
# half-finished sonnets are no good, and if it isn't finished it's too long
sonnets = [s["text"] for s in gpt["choices"] if s["finish_reason"] == "stop"]
# strip each line
sonnets = ["\n".join([l.strip() for l in s.splitlines()]) for s in sonnets]
# split into stanzas
sonnets = [[q.strip() for q in s.split("\n\n") if q.strip()] for s in sonnets]
# split stanzas into lines
sonnets = [[q.split("\n") for q in s] for s in sonnets]
def validate_sonnet(sonnet):
# Too few/many stanzas
if len(sonnet) < 3 or len(sonnet) > 5: return False
for stanza in sonnet:
# Stanza is too long
if len(stanza) > 4: return False
# It's probably good enough
return True
def regroup_sonnet(sonnet, n):
i = 0
while i < len(sonnet) - 1:
while (i < len(sonnet) - 1) and len(sonnet[i]) + len(sonnet[i+1]) <= n:
sonnet[i:i+2] = [sonnet[i] + sonnet[i+1]]
i += 1
return sonnet
# merge stanzas together until there would be more than 4 (or 3) lines per stanza
def try_regroup_sonnet(sonnet):
for n in [4, 3]:
new_sonnet = sonnet.copy()
new_sonnet = regroup_sonnet(new_sonnet, n)
if validate_sonnet(new_sonnet):
return new_sonnet
return None
def reconstitute(sonnet):
return "\n\n".join(["\n".join(q) for q in sonnet])
regrouped_sonnets = [try_regroup_sonnet(s) for s in sonnets]
if prune:
def pruned(sonnet):
v = not bool(sonnet)
if log_pruned and v: print(sonnet)
return v
return [reconstitute(s) for s in regrouped_sonnets if not pruned(s)]
else:
return [{
"text": reconstitute(s if s else sonnets[i]),
"clean": bool(s),
} for i, s in enumerate(regrouped_sonnets)]
| [] |
2024-01-10 | lichuanqi/Python_Learn_Note | packages~langchain_~chains_combine_documents.py | from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms.fake import FakeListLLM
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = FakeListLLM(responses=['111','222','333'], verbose=True)
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
result = llm_chain.run('你好')
print(result)
chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name)
chain.run('你好啊') | [
"{page_content}",
"Summarize this content: {context}",
"page_content"
] |
2024-01-10 | lichuanqi/Python_Learn_Note | packages~langchain_~vectorstores_chroma.py | import sys
from datetime import datetime
from pathlib import Path
from pprint import pprint
from dataclasses import dataclass, field
import pandas as pd
from langchain.document_loaders import TextLoader
from langchain.text_splitter import TokenTextSplitter, RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
embedding_model_name = 'packages/langchain_/models/shibing624_text2vec-base-chinese'
persist_directory = 'packages/langchain_/vectordb'
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
vectordb = Chroma(embedding_function=embeddings,
persist_directory=persist_directory)
# 增加一个文档
file_path = 'packages/langchain_/test.txt'
loader = TextLoader(file_path=file_path, encoding='utf-8')
text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
doc_texts = loader.load_and_split(text_splitter=text_splitter)
docids = vectordb.add_documents(documents=doc_texts)
vectordb.persist()
# 问题检索
query = "用2-3句话解释一下寄递市场库"
docs = vectordb.similarity_search_with_score(query)
print('检索问题: %s'%query)
pprint('检索结果: \n%s'%docs) | [] |
2024-01-10 | lichuanqi/Python_Learn_Note | packages~langchain_~memory_.py | from abc import ABC, abstractmethod
from typing import Dict,List,Any,Optional,Tuple,Union,Mapping
from pprint import pprint
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.schema import BaseMessage, HumanMessage, AIMessage
class NewMessageHistory(ChatMessageHistory):
def wenxin_format(self):
"""把历史消息记录转换为百度文心格式
格式 - 单轮请求
[
{"role":"user","content":"介绍一下你自己"}
]
格式 - 多轮请求示例
[
{"role":"user","content":"请介绍一下你自己"},
{"role":"assistant","content":"我是百度公司开发的人工智能语言模型,我的中文名是文心一言,英文名是ERNIE Bot,可以协助您完成范围广泛的任务并提供有关各种主题的信息,比如回答问题,提供定义和解释及建议。如果您有任何问题,请随时向我提问。"},
{"role":"user","content": "我在上海,周末可以去哪里玩?"}
]
"""
wenxins = []
if len(self.messages)%2 != 1:
print('请确保消息数量为奇数')
return
for i in range(0, len(self.messages)-1, 2):
wenxins.append({"role": "user", "content": self.messages[i].content})
wenxins.append({"role": "assistant", "content": self.messages[i+1].content})
# 添加最后一条问题
wenxins.append({"role": "assistant", "content": self.messages[-1].content})
return wenxins
def chatbot_format(self):
"""把历史消息记录转换为Gradio的Chatbot格式
格式
[
["请介绍一下你自己", "百度公司开发的人工智能语言模型"],
[..., ...]
]
"""
chatbots = []
if len(self.messages)%2 == 1:
print('请确保消息数量为偶数')
return
for i in range(0, len(self.messages), 2):
chatbots.append([self.messages[i].content, self.messages[i+1].content])
return chatbots
class VectorySearchMessage(BaseMessage):
"""Type of message that is spoken by the human."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "vectory search"
class NewMessageHistoryV2(ChatMessageHistory):
def add_vectory_search_message(self, message):
self.messages.append(VectorySearchMessage(content=message))
def wenxin_format(self):
"""把历史消息记录转换为百度文心格式
格式 - 单轮请求
[
{"role":"user","content":"介绍一下你自己"}
]
格式 - 多轮请求示例
[
{"role":"user","content":"请介绍一下你自己"},
{"role":"assistant","content":"我是百度公司开发的人工智能语言模型,我的中文名是文心一言,英文名是ERNIE Bot,可以协助您完成范围广泛的任务并提供有关各种主题的信息,比如回答问题,提供定义和解释及建议。如果您有任何问题,请随时向我提问。"},
{"role":"user","content": "我在上海,周末可以去哪里玩?"}
]
"""
wenxins = []
if len(self.messages)%3 != 2:
print('请确保消息数量为3n+2条')
return
for i in range(0, len(self.messages)-2, 3):
wenxins.append({"role": "user", "content": self.messages[i].content})
wenxins.append({"role": "assistant", "content": self.messages[i+2].content})
# 添加最后一条问题
wenxins.append({"role": "assistant", "content": self.messages[-2].content})
return wenxins
def chatbot_format(self):
"""把历史消息记录转换为Gradio的Chatbot格式
格式
[
["请介绍一下你自己", "百度公司开发的人工智能语言模型"],
[..., ...]
]
"""
chatbots = []
if len(self.messages)%3 != 0:
print('请确保消息数量为3n条')
return
for i in range(0, len(self.messages), 3):
chatbots.append([self.messages[i].content, self.messages[i+2].content])
return chatbots
def test_message_history():
# messageHistory = NewMessageHistory()
messageHistory = NewMessageHistoryV2()
# 增加点测试数据
messageHistory.add_user_message('你好啊')
messageHistory.add_vectory_search_message('检索内容1')
messageHistory.add_ai_message('还行')
messageHistory.add_user_message('你好啊2')
messageHistory.add_vectory_search_message('检索内容2')
messageHistory.add_ai_message('还行2')
# 增加一个用户消息
question = '我又来了'
messageHistory.add_user_message(question)
messageHistory.add_vectory_search_message('检索内容3')
# 转换成百度文心一言的格式
pprint(messageHistory.wenxin_format())
# 转换成chatbot格式回显
# pprint(messageHistory.chatbot_format())
# 拿到返回值后增加一个大模型消息
answer = '还是欢迎'
messageHistory.add_ai_message(answer)
# 转换成chatbot格式回显
pprint(messageHistory.chatbot_format())
if __name__ == '__main__':
test_message_history() | [] |
2024-01-10 | lichuanqi/Python_Learn_Note | packages~langchain_~chain_.py | import sys
sys.path.append('packages/langchain_')
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain,RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory,ConversationBufferWindowMemory
from llm_ import ChatGlmLLM
def test_base_chain():
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题,问题:{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["question"])
llm = ChatGlmLLM()
chain = LLMChain(llm=llm, prompt=prompt)
result = chain.run({"question":'你好'})
print(result)
def memory_chain():
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
llm_chain = LLMChain(
llm=ChatGlmLLM(),
prompt=prompt,
# verbose=True,
memory=memory)
llm_chain.predict(human_input="Hi there my friend 11")
llm_chain.predict(human_input="Hi there my friend 22")
llm_chain.predict(human_input="Hi there my friend 33")
print(memory.chat_memory.messages)
def vectordb_chain():
embedding_model_name='packages/langchain_/models/shibing624_text2vec-base-chinese'
persist_directory = 'packages/langchain_/vectordb'
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
vectordb = Chroma(embedding_function=embeddings,
persist_directory=persist_directory)
llm = ChatGlmLLM()
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectordb.as_retriever(),
return_source_documents=True)
query = "中央主题教育工作会议什么时候召开的"
result = qa(query)
print(result['source_documents'])
# test_base_chain()
memory_chain()
# vectordb_chain()
| [
"chat_history",
"You are a chatbot having a conversation with a human.\n\n {chat_history}\n Human: {human_input}\n Chatbot:",
"question",
"human_input",
"基于以下已知信息,请简洁并专业地回答用户的问题,问题:{question}"
] |
2024-01-10 | voynow/leetcode-analysis | scripts~code_to_embeddings.py | import logging
import concurrent
from datasets import load_dataset
from dotenv import load_dotenv
import openai
import os
import pickle
from concurrent.futures import ThreadPoolExecutor
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logging.info("Application started")
load_dotenv()
if not os.environ.get("OPENAI_API_KEY"):
raise ValueError(
"Set your OPENAI_API_KEY via export OPENAI_API_KEY=... or in a .env file"
)
openai.api_key = os.environ["OPENAI_API_KEY"]
logging.info("OpenAI API key loaded")
EMBEDDINGS_MODEL = "text-embedding-ada-002"
def extract_texts_from_folder(folder_path):
logging.info(f"Extracting texts from folder: {folder_path}")
texts = {}
for filename in os.listdir(folder_path):
if filename.endswith(".py"):
with open(os.path.join(folder_path, filename), "r") as file:
texts[filename] = file.read()
return texts
def get_embedding_for_text(filename, text):
logging.info(f"Getting embedding for {filename}")
response = openai.Embedding.create(input=text, model=EMBEDDINGS_MODEL)
embedding = response["data"][0]["embedding"]
return {filename: {"embedding": embedding, "text": text}}
def get_embeddings(texts):
embeddings_data = {}
with ThreadPoolExecutor() as executor:
future_to_file = {
executor.submit(get_embedding_for_text, filename, text): filename
for filename, text in texts.items()
}
for future in concurrent.futures.as_completed(future_to_file):
embeddings_data.update(future.result())
return embeddings_data
def save_embeddings_to_file(embeddings, file_path):
logging.info(f"Saving embeddings to file: {file_path}")
pickle.dump(embeddings, open(file_path, "wb"))
def process_my_solutions():
folder_path = "solutions"
texts = extract_texts_from_folder(folder_path)
embeddings = get_embeddings(texts)
save_embeddings_to_file(embeddings, "data/embeddings.pkl")
def process_huggingface_solutions():
dataset = load_dataset("mhhmm/leetcode-solutions-python")
df = dataset["train"].to_pandas()
texts = df["code_with_problem"].to_dict()
embeddings_data = get_embeddings(texts)
save_embeddings_to_file(embeddings_data, "data/hf_embeddings.pkl")
process_my_solutions()
process_huggingface_solutions()
logging.info("Application completed successfully")
| [] |
2024-01-10 | FracturedPlane/SMBAE | util~SimulationUtil.py | import copy
import sys
sys.setrecursionlimit(50000)
import os
import json
from numpy import dtype
sys.path.append("../")
sys.path.append("../env")
sys.path.append("../characterSimAdapter/")
sys.path.append("../simbiconAdapter/")
sys.path.append("../simAdapter/")
import math
import numpy as np
from pydoc import locate
from util.ExperienceMemory import ExperienceMemory
# from ModelEvaluation import *
# from RLVisualize import RLVisualize
# from NNVisualize import NNVisualize
import random
# import cPickle
import dill
import dill as pickle
import dill as cPickle
# import cProfile, pstats, io
# import memory_profiler
# import psutil
import gc
# from guppy import hpy; h=hpy()
# from memprof import memprof
# import pathos.multiprocessing
# import multiprocessing
from model.ModelUtil import scale_action
def loadNetwork(net_file_path):
print("Loading model: ", net_file_path)
f = open(net_file_path, 'rb')
model = dill.load(f)
f.close()
return model
def getDataDirectory(settings):
return getBaseDataDirectory(settings)+settings["model_type"]+"/"
def getBaseDataDirectory(settings):
return getRootDataDirectory(settings)+"/"+settings["data_folder"]+"/"
def getRootDataDirectory(settings):
return settings["environment_type"]+"/"+settings["agent_name"]
def getAgentName(settings=None):
return 'agent'
def getTaskDataDirectory(settings):
return settings["environment_type"]+"/"+settings["agent_name"]+"/"+settings["task_data_folder"]+"/"+settings["model_type"]+"/"
def addDataToTarBall(tarfile_, settings, fileName=None):
import os
import tarfile
## add all json and py files
if ( fileName is None ):
dir = getDataDirectory(settings)
for filename_tmp in os.listdir(dir):
print("Possible include file: ", os.path.splitext(filename_tmp))
split_ = os.path.splitext(filename_tmp)
if (split_[1] in ['.py', '.json']):
print("Adding file: ", filename_tmp)
tarfile_.add(dir+filename_tmp)
"""
fileName_ = dir+"trainingData_" + str(settings['agent_name']) + ".json"
if os.path.exists(fileName_):
tarfile_.add(fileName_)
else:
print ( "File does not exists: ", fileName_)
"""
if ( not ( fileName is None) ):
if os.path.exists(fileName):
tarfile_.add(fileName)
else:
print ( "File does not exists: ", fileName)
# tarfile.add('/README.md')
def addPicturesToTarBall(tarfile_, settings, fileName=None, data_folder=None):
import os
import tarfile
## add all json and py files
if ( fileName is None ):
if (data_folder is not None):
dir = data_folder
else:
dir = getRootDataDirectory(settings)+"/" + settings['data_folder'] + "/"
# dir = getDataDirectory(settings)
for filename_tmp in os.listdir(dir):
print("Possible include file: ", os.path.splitext(filename_tmp))
split_ = os.path.splitext(filename_tmp)
if (split_[1] in ['.png', '.svg']):
print("Adding file: ", filename_tmp)
tarfile_.add(dir+filename_tmp)
# tarfile.add('/README.md')
def my_import(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def validateSettings(settings):
"""
This method is used to check and overwrite any settings that are not going to work properly
for example, check if there is a display screen
"""
"""
## This doesn't work as well as I was hoping...
if ( not ( "DISPLAY" in os.environ)): # No screen on this computer
settings['visulaize_forward_dynamics'] = False
settings['visualize_learning'] = False
"""
return settings
def createNetworkModel(model_type, state_bounds, action_bounds, reward_bounds, settings):
if settings['action_space_continuous']:
n_out_ = len(action_bounds[0])
else:
n_out_ = len(action_bounds)
if (settings['load_saved_model'] == True):
return None
elif (model_type == "Deep_Dropout" ):
from model.DeepDropout import DeepDropout
model = DeepDropout(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_NN" ):
from model.DeepNN import DeepNN
model = DeepNN(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_CNN" ):
from model.DeepCNN import DeepCNN
model = DeepCNN(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_CNN_2D" ):
from model.DeepCNN2D import DeepCNN2D
model = DeepCNN2D(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_CNN_Dropout" ):
from model.DeepCNNDropout import DeepCNNDropout
model = DeepCNNDropout(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_NN_Dropout" ):
from model.DeepNNDropout import DeepNNDropout
model = DeepNNDropout(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_NN_SingleNet" ):
from model.DeepNNSingleNet import DeepNNSingleNet
model = DeepNNSingleNet(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_CNN_SingleNet" ):
from model.DeepCNNSingleNet import DeepCNNSingleNet
model = DeepCNNSingleNet(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_NN_SingleNet_Dropout" ):
from model.DeepNNSingleNetDropout import DeepNNSingleNetDropout
model = DeepNNSingleNetDropout(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_NN_Wide" ):
from model.DeepNNWide import DeepNNWide
model = DeepNNWide(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (model_type == "Deep_CNN_KERAS" ):
from model.DeepCNNKeras import DeepCNNKeras
print("Creating network model: ", model_type)
model = DeepCNNKeras(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_NN_KERAS" ):
from model.DeepNNKeras import DeepNNKeras
print("Creating network model: ", model_type)
model = DeepNNKeras(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_NN_Dropout_Critic" ):
from model.DeepNNDropoutCritic import DeepNNDropoutCritic
print("Creating network model: ", model_type)
model = DeepNNDropoutCritic(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_CNN_Dropout_Critic" ):
from model.DeepCNNDropoutCritic import DeepCNNDropoutCritic
print("Creating network model: ", model_type)
model = DeepCNNDropoutCritic(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_NN_Wide_Dropout_Critic" ):
from model.DeepNNWideDropoutCritic import DeepNNWideDropoutCritic
print("Creating network model: ", model_type)
model = DeepNNWideDropoutCritic(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_NN_TanH" ):
from model.DeepNNTanH import DeepNNTanH
print("Creating network model: ", model_type)
model = DeepNNTanH(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_NN_TanH_SingleNet" ):
from model.DeepNNTanHSingleNet import DeepNNTanHSingleNet
print("Creating network model: ", model_type)
model = DeepNNTanHSingleNet(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_CNN_TanH_SingleNet" ):
from model.DeepCNNTanHSingleNet import DeepCNNTanHSingleNet
print("Creating network model: ", model_type)
model = DeepCNNTanHSingleNet(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "Deep_CNN_SingleNet_Big" ):
from model.DeepCNNSingleNetBig import DeepCNNSingleNetBig
print("Creating network model: ", model_type)
model = DeepCNNSingleNetBig(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
return model
elif (model_type == "DumbModel" ):
model = DumbModel(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
else:
from model.ModelInterface import ModelInterface
# modelClass = my_import(path_)
modelClass = locate(model_type)
if ( issubclass(modelClass, ModelInterface)): ## Double check this load will work
model = modelClass(n_in=len(state_bounds[0]), n_out=n_out_, state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
print("Created model: ", model)
return model
else:
print ("Unknown network model type: ", str(model_type), " I hope you know what you are doing....")
# sys.exit(2)
return
import lasagne
print (" network type: ", model_type, " : ", model)
print ("Number of Critic network parameters", lasagne.layers.count_params(model.getCriticNetwork()))
print ("Number of Actor network parameters", lasagne.layers.count_params(model.getActorNetwork()))
if (settings['train_forward_dynamics'] and (settings['forward_dynamics_model_type'] == 'SingleNet')):
print ("Number of Forward Dynamics network parameters", lasagne.layers.count_params(model.getForwardDynamicsNetwork()))
print ("Number of Reward predictor network parameters", lasagne.layers.count_params(model.getRewardNetwork()))
return model
def createRLAgent(algorihtm_type, state_bounds, discrete_actions, reward_bounds, settings):
action_bounds = np.array(settings['action_bounds'])
networkModel = createNetworkModel(settings["model_type"], state_bounds, action_bounds, reward_bounds, settings)
num_actions= discrete_actions.shape[0] # number of rows
if settings['action_space_continuous']:
action_bounds = np.array(settings["action_bounds"], dtype=float)
num_actions = action_bounds.shape[1]
if (settings['load_saved_model'] == True):
directory= getDataDirectory(settings)
print ("Loading pre compiled network")
file_name=directory+getAgentName()+"_Best.pkl"
f = open(file_name, 'rb')
model = dill.load(f)
model.setSettings(settings)
f.close()
elif ( "Deep_NN2" == algorihtm_type):
from model.RLDeepNet import RLDeepNet
model = RLDeepNet(n_in=len(state_bounds[0]), n_out=num_actions, state_bounds=state_bounds,
action_bounds=None, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_NN3" ):
from model.DeepRLNet3 import DeepRLNet3
model = DeepRLNet3(n_in=len(state_bounds[0]), n_out=num_actions, state_bounds=state_bounds,
action_bounds=None, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_CACLA" ):
from model.DeepCACLA import DeepCACLA
model = DeepCACLA(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_CACLA2" ):
from model.DeepCACLA2 import DeepCACLA2
model = DeepCACLA2(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_CACLA_Dropout" ):
from model.DeepCACLADropout import DeepCACLADropout
model = DeepCACLADropout(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_CACLA_DQ" ):
from model.DeepCACLADQ import DeepCACLADQ
model = DeepCACLADQ(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "DeepCACLADV" ):
from model.DeepCACLADV import DeepCACLADV
model = DeepCACLADV(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_DPG" ):
from model.DeepDPG import DeepDPG
model = DeepDPG(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_DPG_DQ" ):
from model.DeepDPGDQ import DeepDPGDQ
model = DeepDPGDQ(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Deep_DPG_2" ):
from model.DeepDPG2 import DeepDPG2
model = DeepDPG2(n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLA" ):
from algorithm.CACLA import CACLA
model = CACLA(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLA2" ):
from algorithm.CACLA import CACLA
model = CACLA2(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLADV" ):
from algorithm.CACLADV import CACLADV
model = CACLADV(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLADVTarget" ):
from algorithm.CACLADVTarget import CACLADVTarget
model = CACLADVTarget(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "DeepQNetwork" ):
from algorithm.DeepQNetwork import DeepQNetwork
print ("Using model type ", algorihtm_type , " with ", len(action_bounds), " actions")
model = DeepQNetwork(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "DoubleDeepQNetwork" ):
from algorithm.DoubleDeepQNetwork import DoubleDeepQNetwork
print ("Using model type ", algorihtm_type , " with ", len(action_bounds), " actions")
model = DoubleDeepQNetwork(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "A_CACLA" ):
from algorithm.A_CACLA import A_CACLA
model = A_CACLA(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "A3C2" ):
from algorithm.A3C2 import A3C2
model = A3C2(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "TRPO" ):
from algorithm.TRPO import TRPO
model = TRPO(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "PPO" ):
from algorithm.PPO import PPO
model = PPO(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "AP_CACLA" ):
from algorithm.AP_CACLA import AP_CACLA
model = AP_CACLA(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "PPO_Critic" ):
from algorithm.PPOCritic import PPOCritic
model = PPOCritic(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "PPO_Critic_2" ):
from algorithm.PPOCritic2 import PPOCritic2
model = PPOCritic2(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "TRPO_Critic" ):
from algorithm.TRPOCritic import TRPOCritic
model = TRPOCritic(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLA_KERAS" ):
from algorithm.CACLA_KERAS import CACLA_KERAS
model = CACLA_KERAS(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "CACLA_Entropy" ):
from algorithm.CACLAEntropy import CACLAEntropy
model = CACLAEntropy(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
elif (algorihtm_type == "Distillation" ):
from algorithm.Distillation import Distillation
model = Distillation(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
else:
from algorithm.AlgorithmInterface import AlgorithmInterface
# modelClass = my_import(path_)
modelAlgorithm = locate(algorihtm_type)
if ( issubclass(modelAlgorithm, AlgorithmInterface)): ## Double check this load will work
model = modelAlgorithm(networkModel, n_in=len(state_bounds[0]), n_out=len(action_bounds[0]), state_bounds=state_bounds,
action_bounds=action_bounds, reward_bound=reward_bounds, settings_=settings)
print("Loaded algorithm: ", model)
# return model
else:
print ("Unknown learning algorithm type: " + str(algorihtm_type))
raise ValueError("Unknown learning algorithm type: " + str(algorihtm_type))
# sys.exit(2)
if (settings['load_saved_model'] == "network_and_scales"):
### In this case we want to change algroithm but want to keep the policy network
directory= getDataDirectory(settings)
print ("Loading pre compiled network and scaling values, not learing algorithm.")
file_name=directory+getAgentName()+"_Best.pkl"
f = open(file_name, 'rb')
model_ = dill.load(f)
model_.setSettings(settings)
model.setNetworkParameters(model_.getNetworkParameters())
# model.setTargetModel(model_.getTargetModel())
model.setStateBounds(model_.getStateBounds())
model.setActionBounds(model_.getActionBounds())
model.setRewardBounds(model_.getRewardBounds())
f.close()
print ("Using model type ", algorihtm_type , " : ", model)
return model
def createEnvironment(config_file, env_type, settings, render=False, index=None):
### For multitasking, can specify a list of config files
# if ( isinstance(config_file, list ) ):
if type(config_file) is list:
config_file = config_file[index]
print ("Using config file: ", config_file)
else:
print("Not a list hoser, it is a ", type(config_file), " for ", config_file)
print (config_file[0])
print("Creating sim Type: ", env_type)
if env_type == 'ballgame_2d':
from env.BallGame2D import BallGame2D
from sim.BallGame2DEnv import BallGame2DEnv
file = open(config_file)
conf = json.load(file)
# print ("Settings: " + str(json.dumps(conf)))
file.close()
conf['render'] = render
exp = BallGame2D(conf)
exp = BallGame2DEnv(exp, settings)
return exp
elif env_type == 'ballgame_1d':
from env.BallGame1D import BallGame1D
from sim.BallGame1DEnv import BallGame1DEnv
file = open(config_file)
conf = json.load(file)
# print ("Settings: " + str(json.dumps(conf)))
file.close()
conf['render'] = render
exp = BallGame1D(conf)
exp = BallGame1DEnv(exp, settings)
return exp
elif env_type == 'gapgame_1d':
from env.GapGame1D import GapGame1D
from sim.GapGame1DEnv import GapGame1DEnv
file = open(config_file)
conf = json.load(file)
# print ("Settings: " + str(json.dumps(conf)))
file.close()
conf['render'] = render
exp = GapGame1D(conf)
exp = GapGame1DEnv(exp, settings)
return exp
elif env_type == 'gapgame_2d':
from env.GapGame2D import GapGame2D
from sim.GapGame2DEnv import GapGame2DEnv
file = open(config_file)
conf = json.load(file)
# print ("Settings: " + str(json.dumps(conf)))
file.close()
conf['render'] = render
exp = GapGame2D(conf)
exp = GapGame2DEnv(exp, settings)
return exp
elif env_type == 'nav_Game':
from env.NavGame import NavGame
from sim.NavGameEnv import NavGameEnv
# file = open(config_file)
# conf = json.load(file)
conf = copy.deepcopy(settings)
# print ("Settings: " + str(json.dumps(conf)))
# file.close()
conf['render'] = render
exp = NavGame(conf)
exp = NavGameEnv(exp, settings)
return exp
elif env_type == 'Particle_Sim':
from env.ParticleGame import ParticleGame
from sim.ParticleSimEnv import ParticleSimEnv
# file = open(config_file)
# conf = json.load(file)
conf = copy.deepcopy(settings)
# print ("Settings: " + str(json.dumps(conf)))
# file.close()
conf['render'] = render
exp = ParticleGame(conf)
exp = ParticleSimEnv(exp, settings)
return exp
elif env_type == 'open_AI_Gym':
import gym
from gym import wrappers
from gym import envs
from sim.OpenAIGymEnv import OpenAIGymEnv
try:
import roboschool
except:
print ("roboschool not installed")
pass
try:
import gymdrl
except:
print ("Membrane/gymdrl not installed")
pass
from OpenGL import GL
# load_roboschool
# print(envs.registry.all())
# env = gym.make('CartPole-v0')
env_name = config_file
env = gym.make(env_name)
# file = open(config_file)
# conf = json.load(file)
conf = copy.deepcopy(settings)
conf['render'] = render
exp = OpenAIGymEnv(env, conf)
exp = exp
return exp
elif ((env_type == 'simbiconBiped2D') or (env_type == 'simbiconBiped3D') or (env_type == 'Imitate3D') or
(env_type == 'simbiconBiped2DTerrain') or (env_type == 'hopper_2D')):
import simbiconAdapter
from sim.SimbiconEnv import SimbiconEnv
c = simbiconAdapter.Configuration(config_file)
print ("Num state: ", c._NUMBER_OF_STATES)
c._RENDER = render
sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = SimbiconEnv(sim, settings)
exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
elif ((env_type == 'mocapImitation2D') or (env_type == 'mocapImitation3D')):
import simbiconAdapter
from sim.MocapImitationEnv import MocapImitationEnv
c = simbiconAdapter.Configuration(config_file)
print ("Num state: ", c._NUMBER_OF_STATES)
c._RENDER = render
sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = MocapImitationEnv(sim, settings)
exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
elif env_type == 'terrainRLSimOld':
# terrainRL_PATH = os.environ['TERRAINRL_PATH']
# sys.path.append(terrainRL_PATH+'/lib')
# from simAdapter import terrainRLAdapter
# from sim.TerrainRLEnv import TerrainRLEnv
from simAdapter import terrainRLSim
from sim.OpenAIGymEnv import OpenAIGymEnv
env = terrainRLSim.getEnv(env_name=config_file, render=render)
print ("Using Environment Type: " + str(env_type) + ", " + str(config_file))
# sim.setRender(render)
# sim.init()
conf = copy.deepcopy(settings)
conf['render'] = render
exp = OpenAIGymEnv(env, conf)
# env.getEnv().setRender(render)
# exp = TerrainRLEnv(env.getEnv(), settings)
return exp
elif ( (env_type == 'GymMultiChar')
or (env_type == 'terrainRLSim')
):
# terrainRL_PATH = os.environ['TERRAINRL_PATH']
# sys.path.append(terrainRL_PATH+'/lib')
# from simAdapter import terrainRLAdapter
# from sim.TerrainRLEnv import TerrainRLEnv
from simAdapter import terrainRLSim
from sim.GymMultiCharEnv import GymMultiCharEnv
env = terrainRLSim.getEnv(env_name=config_file, render=render)
print ("Using Environment Type: " + str(env_type) + ", " + str(config_file))
# sim.setRender(render)
# sim.init()
conf = copy.deepcopy(settings)
conf['render'] = render
exp = GymMultiCharEnv(env, conf)
# env.getEnv().setRender(render)
# exp = TerrainRLEnv(env.getEnv(), settings)
return exp
elif env_type == 'terrainRLBiped2D':
terrainRL_PATH = os.environ['TERRAINRL_PATH']
sys.path.append(terrainRL_PATH+'/lib')
from simAdapter import terrainRLAdapter
from sim.TerrainRLEnv import TerrainRLEnv
sim = terrainRLAdapter.cSimAdapter(['train', '-arg_file=', terrainRL_PATH+'/'+config_file, '-relative_file_path=', terrainRL_PATH+'/'])
sim.setRender(render)
# sim.init(['train', '-arg_file=', config_file])
# print ("Num state: ", c._NUMBER_OF_STATES)
# sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = TerrainRLEnv(sim, settings)
# exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
elif env_type == 'terrainRLFlatBiped2D':
terrainRL_PATH = os.environ['TERRAINRL_PATH']
sys.path.append(terrainRL_PATH+'/lib')
from simAdapter import terrainRLAdapter
from sim.TerrainRLFlatEnv import TerrainRLFlatEnv
sim = terrainRLAdapter.cSimAdapter(['train', '-arg_file=', terrainRL_PATH+'/'+config_file, '-relative_file_path=', terrainRL_PATH+'/'])
sim.setRender(render)
# sim.init(['train', '-arg_file=', config_file])
# print ("Num state: ", c._NUMBER_OF_STATES)
# sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = TerrainRLFlatEnv(sim, settings)
# exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
elif (env_type == 'terrainRLImitateBiped2D' or (env_type == 'terrainRLImitateBiped3D')):
terrainRL_PATH = os.environ['TERRAINRL_PATH']
sys.path.append(terrainRL_PATH+'/lib')
from simAdapter import terrainRLAdapter
from sim.TerrainRLImitateEnv import TerrainRLImitateEnv
sim = terrainRLAdapter.cSimAdapter(['train', '-arg_file=', terrainRL_PATH+'/'+config_file, '-relative_file_path=', terrainRL_PATH+'/'])
sim.setRender(render)
# sim.init(['train', '-arg_file=', config_file])
# print ("Num state: ", c._NUMBER_OF_STATES)
# sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = TerrainRLImitateEnv(sim, settings)
# exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
elif ((env_type == 'terrainRLHLCBiped3D')):
terrainRL_PATH = os.environ['TERRAINRL_PATH']
sys.path.append(terrainRL_PATH+'/lib')
from simAdapter import terrainRLAdapter
from sim.TerrainRLHLCEnv import TerrainRLHLCEnv
sim = terrainRLAdapter.cSimAdapter(['train', '-arg_file=', terrainRL_PATH+'/'+config_file, '-relative_file_path=', terrainRL_PATH+'/'])
sim.setRender(render)
# sim.init(['train', '-arg_file=', config_file])
# print ("Num state: ", c._NUMBER_OF_STATES)
# sim = simbiconAdapter.SimbiconWrapper(c)
print ("Using Environment Type: " + str(env_type))
exp = TerrainRLHLCEnv(sim, settings)
# exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
import characterSim
c = characterSim.Configuration(config_file)
# print ("Num state: ", c._NUMBER_OF_STATES)
c._RENDER = render
exp = characterSim.Experiment(c)
# print ("Num state: ", exp._config._NUMBER_OF_STATES)
if env_type == 'pendulum_env_state':
from sim.PendulumEnvState import PendulumEnvState
print ("Using Environment Type: " + str(env_type))
exp = PendulumEnvState(exp, settings)
elif env_type == 'pendulum_env':
from sim.PendulumEnv import PendulumEnv
print ("Using Environment Type: " + str(env_type))
exp = PendulumEnv(exp, settings)
elif env_type == 'pendulum3D_env':
from sim.PendulumEnv import PendulumEnv
print ("Using Environment Type: " + str(env_type))
exp = PendulumEnv(exp, settings)
elif env_type == 'pendulum_3D_env':
from sim.PendulumEnv import PendulumEnv
print ("Using Environment Type: " + str(env_type))
exp = PendulumEnv(exp, settings)
elif env_type == 'paperGibbon_env':
from sim.PaperGibbonEnv import PaperGibbonEnv
print ("Using Environment Type: " + str(env_type))
exp = PaperGibbonEnv(exp, settings)
else:
print ("Invalid environment type: " + str(env_type))
raise ValueError("Invalid environment type: " + str(env_type))
# sys.exit()
exp._conf = c # OMFG HACK so that python does not garbage collect the configuration and F everything up!
return exp
def createActor(env_type, settings, experience):
actor=None
if env_type == 'ballgame_2d':
from actor.BallGame2DActor import BallGame2DActor
actor = BallGame2DActor(settings, experience)
elif env_type == 'ballgame_1d':
from actor.BallGame1DActor import BallGame1DActor
actor = BallGame1DActor(settings, experience)
elif env_type == 'gapgame_1d':
from actor.GapGame1DActor import GapGame1DActor
actor = GapGame1DActor(settings, experience)
elif env_type == 'gapgame_2d':
from actor.GapGame2DActor import GapGame2DActor
actor = GapGame2DActor(settings, experience)
elif (env_type == 'nav_Game'):
from actor.NavGameActor import NavGameActor
actor = NavGameActor(settings, experience)
elif (env_type == 'Particle_Sim'):
from actor.ParticleSimActor import ParticleSimActor
actor = ParticleSimActor(settings, experience)
elif ((env_type == 'simbiconBiped2D') or (env_type == 'simbiconBiped3D') or
(env_type == 'simbiconBiped2DTerrain')):
from actor.SimbiconActor import SimbiconActor
actor = SimbiconActor(settings, experience)
elif ((env_type == 'mocapImitation2D') or (env_type == 'mocapImitation3D')):
from actor.MocapImitationActor import MocapImitationActor
actor = MocapImitationActor(settings, experience)
elif ((env_type == 'hopper_2D')):
from actor.Hopper2DActor import Hopper2DActor
actor = Hopper2DActor(settings, experience)
elif (env_type == 'Imitate3D') :
from actor.ImitationActor import ImitationActor
actor = ImitationActor(settings, experience)
elif env_type == 'terrainRLBiped2D' or (env_type == 'terrainRLFlatBiped2D'):
from actor.TerrainRLActor import TerrainRLActor
actor = TerrainRLActor(settings, experience)
elif ( env_type == 'terrainRLImitateBiped2D' or (env_type == 'terrainRLImitateBiped3D')
# or (env_type == 'terrainRLSim')
):
from actor.TerrainRLImitationActor import TerrainRLImitationActor
actor = TerrainRLImitationActor(settings, experience)
elif (env_type == 'terrainRLHLCBiped3D'):
from actor.TerrainRLHLCActor import TerrainRLHLCActor
actor = TerrainRLHLCActor(settings, experience)
elif (env_type == 'paperGibbon_env'):
from actor.PaperGibbonAgent import PaperGibbonAgent
actor = PaperGibbonAgent(settings, experience)
elif (env_type == 'pendulum'):
from actor.ActorInterface import ActorInterface
actor = ActorInterface(settings, experience)
elif (env_type == 'open_AI_Gym'
or (env_type == 'terrainRLSim')
):
from actor.OpenAIGymActor import OpenAIGymActor
actor = OpenAIGymActor(settings, experience)
elif env_type == 'GymMultiChar':
from actor.GymMultiCharActor import GymMultiCharActor
actor = GymMultiCharActor(settings, experience)
else:
print("Error actor type unknown: ", env_type)
raise ValueError("Error actor type unknown: ", env_type)
# sys.exit()
return actor
def createSampler(settings, exp):
actor=None
if (settings['sampling_method'] == 'simple'):
print ("Using Sampling Method: " + str(settings['sampling_method']))
sampler = Sampler(settings)
elif (settings['sampling_method'] == 'bruteForce'):
print ("Using Sampling Method: " + str(settings['sampling_method']))
sampler = BruteForceSampler()
elif (settings['sampling_method'] == 'SequentialMC'):
print ("Using Sampling Method: " + str(settings['sampling_method']))
sampler = SequentialMCSampler(exp, settings['look_ahead_planning_steps'], settings)
elif (settings['sampling_method'] == 'ForwardPlanner'):
print ("Using Sampling Method: " + str(settings['sampling_method']))
sampler = ForwardPlanner(exp, settings['look_ahead_planning_steps'])
else:
print ("Sampler method not supported: " + str(settings['sampling_method']) )
sys.exit()
return sampler
def createForwardDynamicsModel(settings, state_bounds, action_bounds, actor, exp, agentModel):
if settings["forward_dynamics_predictor"] == "simulator":
from model.ForwardDynamicsSimulator import ForwardDynamicsSimulator
print ("Using forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
forwardDynamicsModel = ForwardDynamicsSimulator(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, actor, exp, settings)
elif settings["forward_dynamics_predictor"] == "simulator_parallel":
from model.ForwardDynamicsSimulatorParallel import ForwardDynamicsSimulatorParallel
print ("Using forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
forwardDynamicsModel = ForwardDynamicsSimulatorParallel(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, actor, exp, settings)
forwardDynamicsModel.init(len(state_bounds[0]), len(action_bounds[0]), state_bounds, action_bounds, actor, exp, settings)
elif settings["forward_dynamics_predictor"] == "saved_network":
# from model.ForwardDynamicsNetwork import ForwardDynamicsNetwork
print ("Using forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
file_name_dynamics=data_folder+"forward_dynamics_"+"_Best.pkl"
forwardDynamicsModel = dill.load(open(file_name_dynamics))
elif settings["forward_dynamics_predictor"] == "network":
print ("Using forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
if (settings['load_saved_model'] == True):
print ("Loading pre compiled network")
directory= getDataDirectory(settings)
file_name_dynamics=directory+"forward_dynamics_"+"_Best.pkl"
f = open(file_name_dynamics, 'rb')
forwardDynamicsModel = dill.load(f)
f.close()
elif (('load_saved_fd_model' in settings and
(settings['load_saved_fd_model']))):
print ("Loading pre trained network")
directory= getDataDirectory(settings)
file_name_dynamics=directory+"forward_dynamics_"+"_Best_pretrain.pkl"
f = open(file_name_dynamics, 'rb')
forwardDynamicsModel = dill.load(f)
f.close()
else:
if ( settings['forward_dynamics_model_type'] == "SingleNet"):
## Hopefully this will allow for parameter sharing across both models...
fd_net = agentModel.getModel()
else:
fd_net = createForwardDynamicsNetwork(state_bounds, action_bounds, settings)
if ('fd_algorithm' in settings ):
from algorithm.AlgorithmInterface import AlgorithmInterface
algorihtm_type = settings['fd_algorithm']
# modelClass = my_import(path_)
modelAlgorithm = locate(algorihtm_type)
if ( issubclass(modelAlgorithm, AlgorithmInterface)): ## Double check this load will work
forwardDynamicsModel = modelAlgorithm(fd_net, state_length=len(state_bounds[0]), action_length=len(action_bounds[0]),
state_bounds=state_bounds,
action_bounds=action_bounds, settings_=settings)
print("Loaded FD algorithm: ", forwardDynamicsModel)
# return model
else:
print ("Unknown learning algorithm type: " + str(algorihtm_type))
raise ValueError("Unknown learning algorithm type: " + str(algorihtm_type))
# sys.exit(2)
elif ('train_state_encoding' in settings and (settings['train_state_encoding'])):
from algorithm.EncodingModel import EncodingModel
forwardDynamicsModel = EncodingModel(fd_net, state_length=len(state_bounds[0]), action_length=len(action_bounds[0]),
state_bounds=state_bounds, action_bounds=action_bounds, settings_=settings)
elif ('train_gan' in settings and (settings['train_gan'])):
from algorithm.GAN import GAN
forwardDynamicsModel = GAN(fd_net, state_length=len(state_bounds[0]), action_length=len(action_bounds[0]),
state_bounds=state_bounds, action_bounds=action_bounds, settings_=settings)
else:
from algorithm.ForwardDynamics import ForwardDynamics
forwardDynamicsModel = ForwardDynamics(fd_net, state_length=len(state_bounds[0]), action_length=len(action_bounds[0]),
state_bounds=state_bounds, action_bounds=action_bounds, settings_=settings)
else:
print ("Unrecognized forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
raise ValueError("Unrecognized forward dynamics method: " + str(settings["forward_dynamics_predictor"]))
# sys.exit()
return forwardDynamicsModel
def createForwardDynamicsNetwork(state_bounds, action_bounds, settings):
if settings["forward_dynamics_model_type"] == "Deep_NN":
from model.ForwardDynamicsNetwork import ForwardDynamicsNetwork
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsNetwork(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_NN_Dropout":
from model.ForwardDynamicsNNDropout import ForwardDynamicsNNDropout
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsNNDropout(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_CNN":
from model.ForwardDynamicsCNN import ForwardDynamicsCNN
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsCNN(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_CNN_Tile":
from model.ForwardDynamicsCNNTile import ForwardDynamicsCNNTile
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsCNNTile(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_CNN2":
from model.ForwardDynamicsCNN2 import ForwardDynamicsCNN2
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsCNN2(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_CNN3":
from model.ForwardDynamicsCNN3 import ForwardDynamicsCNN3
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsCNN3(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_CNN_Dropout":
from model.ForwardDynamicsCNNDropout import ForwardDynamicsCNNDropout
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsCNNDropout(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
elif settings["forward_dynamics_model_type"] == "Deep_Dense_NN_Dropout":
from model.ForwardDynamicsDenseNetworkDropout import ForwardDynamicsDenseNetworkDropout
print ("Using forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
forwardDynamicsNetwork = ForwardDynamicsDenseNetworkDropout(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
else:
from model.ModelInterface import ModelInterface
# modelClass = my_import(path_)
print("Loading FD model type:", settings["forward_dynamics_model_type"])
modelClass = locate(settings["forward_dynamics_model_type"])
if ( issubclass(modelClass, ModelInterface)): ## Double check this load will work
model = modelClass(len(state_bounds[0]), len(action_bounds[0]),
state_bounds, action_bounds, settings)
print("Created model: ", model)
return model
else:
print ("Unrecognized forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
raise ValueError("Unrecognized forward dynamics network type: " + str(settings["forward_dynamics_model_type"]))
# sys.exit()
import lasagne
print ("Number of Forward Dynamics network parameters", lasagne.layers.count_params(forwardDynamicsNetwork.getForwardDynamicsNetwork()))
print ("Number of Reward predictor network parameters", lasagne.layers.count_params(forwardDynamicsNetwork.getRewardNetwork()))
return forwardDynamicsNetwork
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~weightguidance.py | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
TODO
Parameters:
'searchdepth':
The maximum length of a path to search.
default: 10
'searchorder':
'shortestfirst': Always search the shortest unfinished path next.
'bestfirst': Always search the best unfinished path next.
default: 'shortestfirst'
'maxtransitions':
The maximum number of transitions to go through in the search.
default: 10000
'greedy':
0: Search until it's time to stop. Then select the best path.
1: Choose the very first coverage-improving path found.
default: 0
'searchconstraint':
searchconstraint sets some limits on to which paths will be searched.
^
B|
o<---,
^ |E
A| C |
-->o--->o
^ |
F| |D
o<---'
'nocrossingpaths':
Breadth-first kind of search. If a transition has already been
searched, it won't be searched again as a part of any other path.
Eg. if a path A-B has already been searched, C-E-B won't be searched
'noloops':
A transition can appear only once in a path.
Eg. C-D-F-C won't not searched (C-D-F will be though).
'noconstraint':
No constraints, all the paths can be searched.
Eg. C-D-F-C-D-F-C-E is possible
default: 'noloops'
'transitionweight':
TODO
"""
import random
from heapq import heappush, heappop
from tema.guidance.guidance import Guidance as GuidanceBase
version = '0.1'
SEARCH_CONSTRAINTS = (NONE,NO_LOOPS,NO_CROSSING_PATHS) = range(3)
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
# default parameters:
self.setParameter("transitionweight",0)
self.setParameter("searchdepth",10)
self.setParameter("searchorder","shortestfirst")
self.setParameter("maxtransitions",10000)
self.setParameter("greedy",0)
self.setParameter("searchconstraint","noloops")
def setParameter(self,name,value):
accepted = ("transitionweight","searchorder","searchdepth",
"maxtransitions","searchconstraint")
if name == "transitionweight":
if isinstance(value,str) and value.startswith('kw:'):
kww = float(value[3:])
self._transitionweight = \
lambda t: kww if 'kw_' in str(t.getAction()) else 0
else:
if value < 0:
self.log("WARNING! Negative transition weight "+
"doesn't make sense!")
self._transitionweight = lambda t: value
elif name == "searchorder":
if value == "bestfirst":
self._toHeap = lambda p,badness: (badness,len(p),p)
self._fromHeap = lambda values: (values[2],values[0])
elif value == "shortestfirst":
self._toHeap = lambda p,badness: (len(p),badness,p)
self._fromHeap = lambda values: (values[2],values[1])
else:
raise ValueError("Invalid searchorder: '%s'" % (value,))
elif name in ("searchdepth", "searchradius"):
self._searchDepth = value
elif name == "maxtransitions":
self._maxTransitions = value
elif name == "greedy":
self._greedy = value
elif name == "searchconstraint":
if value == "nocrossingpaths":
self._seco = NO_CROSSING_PATHS
elif value == "noloops":
self._seco = NO_LOOPS
elif value == "noconstraint":
self._seco = NONE
else:
raise ValueError("Invalid searchconstraint '%s'"%value)
else:
print __doc__
raise ValueError("Invalid parameter '%s' for newguidance. "%name +
"Accepted parameters: %s" % ",".join(accepted))
GuidanceBase.setParameter(self,name,value)
def _kwWeight(self,transition):
if "kw_" in str(transition.getAction()):
return 1
return 0
def prepareForRun(self):
self._thePlan = []
def suggestAction(self, fromState):
if not self._thePlan:
self.log("Computing a new path...")
# reverse the path so we can pop() the next transition...
self._thePlan = [t for t in reversed(self._search(fromState))]
self._testmodel.clearCache()
nextTrans = self._thePlan.pop()
# != operator not defined for States!
if not nextTrans.getSourceState() == fromState:
# we ended up in a state that wasn't in _thePlan.
# usually (always?) this happens when we suggested a path with
# action A but ~A was actually executed, or vice versa.
# TODO: something to deal with this "nondetermism" in the search,
# or no?
self.log("We've fallen off the path I once suggested! "+\
"I'll suggest a new path.")
self._thePlan = []
return self.suggestAction(fromState)
return nextTrans.getAction()
def _search(self,fromState):
""" Searches from the given state until:
- all the paths with length 'searchdepth' have been searched
- OR 'maxtransitions' transitions seen
- OR 'greedy' is enabled and any path improving coverage is found.
Returns the best path found.
Goodness of a path =
covreq.transitionPoints(t) - _transitionweight(t)
for each transition t in the path.
If not transitionPoints defined in covreq, then goodness =
covreq.getPercentage() difference between the end and the beginning.
'searchorder'=='shortestfirst':
always check the shortest unfinished path next
'searchorder'=='bestfirst':
always check the best unfinished path found so far
"""
if len(self._requirements) > 1:
raise NotImplementedError("Only one requirement, please.")
req = self._requirements[0]
startCov = req.getPercentage()
# If the req has transitionPoints method, we'll use that.
# Otherwise, using getPercentage()
useTP = hasattr(req,"transitionPoints")
# pathHeap contains the paths whose search is in progress.
# the goodness of the last transition of each of the paths has not been
# determined yet.
startingTrans = [t for t in fromState.getOutTransitions()]
pathHeap = [self._toHeap((t,),0) for t in startingTrans]
seenTrans = set(startingTrans)
# because heapq is smallest-first, measuring the badness instead of
# goodness of path...
bestPaths = []
leastBadness = 0
SEARCH_TRANSITIONS = self._maxTransitions
MAX_LENGTH = self._searchDepth
# the paths whose length is max. their search has been thus stopped.
maxLenPaths = []
# the paths that can't be continued, even though their length < max.
deadEnds = []
transitionsSearched = 0
while True: # searching until there's some reason to stop (break).
if not pathHeap or MAX_LENGTH==0:
self.log("Search ended: searched all the paths "+
"up to length %i." % MAX_LENGTH)
break
if transitionsSearched >= SEARCH_TRANSITIONS:
self.log("Search ended: hit the maximum transitions limit "+
"of %i transitions" % SEARCH_TRANSITIONS)
break
# always taking one path from pathHeap and increasing its length by
# the outgoing transitions of its last state. the increased paths
# are again put to pathHeap.
path,badness = self._fromHeap( heappop(pathHeap) )
# push the req and mark the path executed.
req.push()
last = path[-1]
# If the req has transitionPoints method, we'll use that.
# Otherwise, using getPercentage (all reqs should have that).
if useTP:
for t in path[:-1]:
req.markExecuted(t)
badness -= req.transitionPoints(last)
badness += self._transitionweight(last)
else:
for t in path:
req.markExecuted(t)
# adding a nonpositive number
badness = startCov - req.getPercentage()
# popping the req resets the changes done after push.
req.pop()
# is this the best path so far?
if badness < leastBadness:
leastBadness = badness
bestPaths = [path]
if self._greedy:
# we've found a path that's better than nothing.
# if we're greedy, that's all we need.
break
elif badness == leastBadness:
# this is equally good as the best path
bestPaths.append(path)
if len(path) < MAX_LENGTH:
isDeadEnd = True # dead end until proven otherwise
for t in last.getDestState().getOutTransitions():
if self._tranShouldBeSearched(t,path,seenTrans):
# add an one-transition-longer path to pathHeap
heappush(pathHeap, self._toHeap(path+(t,),badness))
seenTrans.add(t)
isDeadEnd = False
if isDeadEnd:
deadEnds.append(path)
else:
maxLenPaths.append(path)
transitionsSearched += 1
if leastBadness == 0:
# no good paths found...
if pathHeap:
self.log("Returning a random unsearched path.")
p,unused = self._fromHeap(random.choice(pathHeap))
return p
elif maxLenPaths:
self.log("Returning a random max_len path (len = %i)" % (MAX_LENGTH,))
return random.choice( maxLenPaths )
elif deadEnds:
self.log("Returning a random dead end path.")
return random.choice( deadEnds )
else:
# found one or more good paths
shortestBestPathLen = min([len(q) for q in bestPaths])
shortestBestPaths = [p for p in bestPaths
if len(p) == shortestBestPathLen]
bestPath = random.choice(shortestBestPaths)
self.log("Returning a path whose length is %i, badness = %f" % (
len(bestPath),leastBadness) )
return bestPath
def _tranShouldBeSearched(self,t,path,seenTrans):
return (self._seco == NO_CROSSING_PATHS and t not in seenTrans
or
self._seco == NO_LOOPS and t not in path
or
self._seco == NONE)
def _newPathCanBeCreated(self,pathHeap):
return not self._maxNumPaths or len(pathHeap) < self._maxNumPaths
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~oneafteranotherguidance.py | # Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
oneafteranotherguidance executes multiple Guidances. After one guidance
finishes (= reaches 100% coverage), another guidance takes over.
The guidances and coverage requirements are defined in a file given as
a 'file' parameter to oneafteranotherguidance. Example of such a file:
# First, find all 'keke' actions with gameguidance. Then start random walk.
--guidance=gameguidance --coverage=clparser --coveragereq='actions .*keke.*'
--guidance=randomguidance
That is, one line per one set of arguments. The guidances are executed in the
same order as the lines in the file. The syntax is the same as with testengine
command line arguments. Allowed arguments are:
--guidance (REQUIRED)
--guidance-args
--coverage
--coveragereq
--coveragereq-args
NOTE: some of the coverages (dummycoverage, some findnew coverages, etc.) may
never reach 100%. That means that they'll never stop, and the guidances after
them won't be executed.
"""
from tema.guidance.guidance import Guidance as GuidanceBase
import shlex # for parsing a '--XXX=yyy --ZZZ="a b"' kind of argument string
import getopt # for parsing a list of args given by shlex
version='0.1 oneafteranotherguidance'
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._guidances = []
self._guidanceOpts = []
self._covs = []
self._currIndex = -1
self._model = None
self._filename = None
def setTestModel(self,model):
self._model = model
for g in self._guidances:
g.setTestModel(model)
def setParameter(self,parametername,parametervalue):
if parametername=='file':
self._filename = parametervalue
else:
print __doc__
raise Exception("Invalid param '%s' for oneafteranotherguidance."
% (parametername,))
# GuidanceBase.setParameter(self,parametername,parametervalue)
def prepareForRun(self):
if self._filename is None:
raise ValueError("oneafteranotherguidance needs a 'file' param.")
self._addGuidancesFromFile(self._filename)
# This is a bit ugly...
# Setting the getPercentage method of the covreq that was given
# to me by addRequirement. After this, _totalReq will simply answer
# what I want it to answer (= my getPercentage()). It doesn't
# matter what the covreq was originally.
# i.e: we don't use the _totalReq to guide us, we'll just use it to
# report the current progress of the actually used covreqs (= those
# read from the file given as a 'file' parameter) to testengine.
self._totalReq.getPercentage = lambda: self.getPercentage()
self._startNextGuidance()
def markExecuted(self, t):
self._currGuidance.markExecuted(t)
if self._currCov.getPercentage() == 1:
self.log("Reached 100%% coverage: %s"
%(_guidanceOptsAsStr(self._guidanceOpts[self._currIndex],)))
if self._currIndex+1 < len(self._covs):
self._startNextGuidance()
def suggestAction(self, s):
return self._currGuidance.suggestAction(s)
def addRequirement(self, req):
self._totalReq = req
def getPercentage(self):
# E.g: the 3rd one of the total of 4 covreqs is 60% covered ->
# the total coverage is [2 * 100% * (1/4)] + [60% * (1/4)] = 65%
covsFinished = self._currIndex
shareOfOneCov = 1.0 / len(self._covs)
shareOfFinished = covsFinished * shareOfOneCov
shareOfCurr = self._currCov.getPercentage() * shareOfOneCov
return shareOfFinished + shareOfCurr
def _startNextGuidance(self):
self._currIndex += 1
self._currGuidance = self._guidances[self._currIndex]
self._currCov = self._covs[self._currIndex]
self._currGuidance.prepareForRun()
self.log("New guidance: %s"
%(_guidanceOptsAsStr(self._guidanceOpts[self._currIndex],)))
def _addGuidancesFromFile(self,filename):
f = file(filename)
for line in f:
strippedLine = _stripLine(line)
if not strippedLine:
continue
opts = _parseOptionsLine(strippedLine)
self._addGuidance(opts)
f.close()
def _addGuidance(self,opts):
cov = _createCoverage(opts['coverage'],opts['coveragereq'],self._model)
set_parameters(cov,opts['coveragereq-args'])
self._covs.append(cov)
guidance = _createGuidance(opts['guidance'])
set_parameters(guidance,opts['guidance-args'])
guidance.setTestModel(self._model)
guidance.addRequirement(cov)
self._guidances.append(guidance)
self._guidanceOpts.append(opts)
def _stripLine(line):
return line.split('#',1)[0].strip() # anything after '#' is comment
ARGS = ['guidance','guidance-args','coverage','coveragereq','coveragereq-args']
def _parseOptionsLine(line):
opts, rest = getopt.getopt(shlex.split(line),[],['%s='%a for a in ARGS])
if rest:
raise Exception("Invalid arguments: %s'" % (rest,))
opts = [(n[2:],v) for n,v in opts] # remove '--' from argument names
optsDict = dict(opts)
if 'guidance' not in optsDict:
raise Exception("guidance argument required.")
optsDict.update( dict((a,'') for a in ARGS if a not in optsDict) )
return optsDict
def _createGuidance(guidance):
guidancemodule=__import__("tema.guidance."+guidance,globals(),locals(),[''])
return guidancemodule.Guidance()
def _createCoverage(coverage,coveragereq,model):
if not coverage:
from tema.coverage.dummycoverage import CoverageRequirement
return CoverageRequirement('')
covmodule=__import__("tema.coverage."+coverage,globals(),locals(),[''])
return covmodule.requirement(coveragereq,model=model)
def _guidanceOptsAsStr(gopts):
s = gopts['guidance']
if gopts['guidance-args']:
s += '(%s)'%(gopts['guidance-args'],)
if gopts['coverage']:
s += " using %s" % (gopts['coverage'],)
if gopts['coveragereq-args']:
s += '(%s)'%(gopts['coveragereq-args'],)
if gopts['coveragereq']:
s += " with '%s'"%(gopts['coveragereq'],)
return s
# copy-pasted from testengine.
# couldn't import because importing testengine also runs it... :(
def set_parameters(object,argument_string):
"""Parse argument string and call setParameter-method of the
object accordingly. For example argument string
'port:9090,yellowflag,logger:adapterlog' implies calls
setParameter('port',9090), setParameter('yellowflag',None),
setParameter('logger',adapterlog_object)."""
# TODO: implement special object-type parameters (not needed so far)
for argpair in argument_string.split(","):
if not argpair: continue
if ":" in argpair:
name,value=argpair.split(":",1)
else:
name,value=argpair,None
try: object.setParameter(name,int(value))
except Exception,e:
if not (isinstance(e,TypeError) or isinstance(e,ValueError)): raise e
try: object.setParameter(name,float(value))
except Exception,e:
if not (isinstance(e,TypeError) or isinstance(e,ValueError)): raise e
object.setParameter(name,value)
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~wrandomguidance.py | # Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
WGuidance (Weighted Random Test Selection) reads the following parameter
values:
- randomseed (any hashable object, default: None)
seed for random number generator
WRandom requires that Transition objects of the model implement
getProbability() method.
"""
from tema.guidance.guidance import Guidance as GuidanceBase
import random
import time
# The sum of probabilities of transitions must not differ from 1 more
# than FLOAT_ERR, otherwise a warning is printed to log. For those
# who did not know, (0.3 + 0.3 + 0.3 + 0.1) != 1.
FLOAT_ERR=.0000001
version='0.1'
class GuidanceException(Exception): pass
class WGuidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self.setParameter('randomseed',time.time())
def setParameter(self,parametername,parametervalue):
if not parametername in ['randomseed']:
print __doc__
raise Exception("Invalid parameter '%s' for gameguidance." % parametername)
GuidanceBase.setParameter(self,parametername,parametervalue)
if parametername=='randomseed':
self._rndchoose=random.Random(parametervalue).choice
def suggestAction(self,state_object):
# check that getProbability is implemented
try:
probabilities = [t.getProbability() for t in state_object.getOutTransitions()]
except AttributeError:
self.log("getProbability not implemented in a transition from state %s"
% state_object)
raise GuidanceException("getProbability not implemented")
maxvalue=sum(probabilities)
if not (1-FLOAT_ERR < maxvalue < 1+FLOAT_ERR):
self.log("Warning: weights not normalized to 1 (sum = %s) in state %s" % (maxvalue,state_object))
r=random.random()*maxvalue
integral=0
for i,p in enumerate(probabilities):
integral+=p
if r<=integral: return state_object.getOutTransitions()[i].getAction()
# this line should never be reached
raise GuidanceException("Failed to pick an action. Is this a deadlock: %s?"
% state_object)
Guidance=WGuidance
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~greedyguidance.py | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Greedy guidance is a breadth first searching algorithm that
returns the shortest path improving coverage. If one of the search
limits is reached, a random path is selected.
Greedy guidance reads the following parameter values:
- max_states (positive integer, default: 10000)
The number of states the breath search algorithm expands in a single
search round.
- max_second (positive value, default: 3600)
The maximum amount of time in seconds a single search can last.
"""
version='wormguidance based on greedyguidance: 0.beta'
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.model.model import Transition
import random
import time
import re
GoodState, OnlyJump, UglyState, SelectJorS = range(4)
class StopCondition:
def __init__(self, prm_src, sized_dict, start_time):
self._dictionary = sized_dict
self._start_time = start_time
self._max_states = prm_src.getParameter("max_states")
self._time_limit = prm_src.getParameter("max_seconds",3600)
def __call__(self):
rval = (time.time()-self._start_time) >= self._time_limit
if self._max_states :
rval = rval or (len(self._dictionary) >= self._max_states)
return rval
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._stored_path=[]
self._random_select=random.Random(time.time()).choice
self._sleep_ts_re = re.compile(r"SLEEPts.*")
def _search_transition_by_name(self, from_state, a_name):
for trs in from_state.getOutTransitions() :
if str( trs.getAction()) == a_name :
return trs
return None
def _get_select_set(self, state, closed):
rval=[]
for trs in state.getOutTransitions():
if str(trs.getDestState()) not in closed:
rval.append(trs)
return rval
def _construct_path_to(self, transition, closed):
rval=[transition]
s=rval[0].getSourceState()
while s :
rval[0:0]=[closed[str(s)]]
s=rval[0].getSourceState()
return rval[1:]
def _breadth_first_search(self, from_state, target_actions):
self.setParameter("max_states",self.getParameter("max_states",10000))
closed={}
waiting=[Transition(None,None,from_state)]
stop_condition=StopCondition(self,closed,self._start_time)
while waiting and not stop_condition() :
current_trans = waiting.pop(0)
current_state = current_trans.getDestState()
if not closed.has_key(str(current_state)) :
closed[str(current_state)] = current_trans
for trs in current_state.getOutTransitions():
if str(trs.getAction()) in target_actions :
self._forbiden_set=set()
return (self._construct_path_to(trs, closed), True)
elif str(trs.getDestState()) in self._forbiden_set:
pass
elif closed.has_key(str(trs.getDestState())) :
pass
else:
waiting.append(trs)
if waiting :
trs=self._random_select(waiting)
#self._forbiden_set = self._forbiden_set | set(closed.keys())
self._forbiden_set = set(closed.keys())
self.log("Forbiden set: %s" % len(self._forbiden_set))
return (self._construct_path_to(trs, closed), False)
self._forbiden_set=set()
return (None, False)
def _search_engine(self, from_state, target_actions):
self._stored_path, success = self._breadth_first_search (from_state,\
target_actions)
if success :
self._search_state = GoodState
elif ( self._search_state == UglyState and random.random() < 0.25) \
or not self._stored_path :
back_path, success = self._breadth_first_search (from_state,\
self._to_sleep_actions)
if success :
self._stored_path = back_path
self._search_state = GoodState
self.log("Moves backwards")
else :
self._search_state = UglyState
if self._search_state == UglyState :
self.log("Jumps randomly forward")
def prepareForRun(self):
nonexit="Nonexisting string"
if self.getParameter("help", nonexit) != nonexit:
print __doc__
raise Exception("Asked only for help")
GuidanceBase.prepareForRun(self)
if len(self._requirements) != 1 :
raise Exception("Needs exactly one requirement")
if not self._testmodel :
raise Exception("Model should be given")
self._stored_path=[]
self._to_sleep_actions =\
self._testmodel.matchedActions(set([self._sleep_ts_re]))
self._last_go_back = False
self._search_state = GoodState
self._forbiden_set = set()
self.log("Wormguidance ready for rocking")
def _trslist_to_str(self,path):
return str([ str(t.getAction()) for t in path])
def suggestAction(self, from_state):
# self.log("DEBUG: new search beginning")
self._start_time=time.time()
if self._stored_path :
if str(self._stored_path[0].getSourceState()) != str(from_state) :
self.log("Throw away: %s"\
% self._trslist_to_str(self._stored_path) )
self._stored_path=[]
self._forbiden_set=set()
# self.log("DEBUG: Ok, käynnistellään etsintää")
if not self._stored_path :
cov_obj=self._requirements[0]
test_model=self._testmodel
# self.log("DEBUG: about to hint")
rex, d = cov_obj.getExecutionHint()
# self.log("DEBUG: about to degrypt")
actions = test_model.matchedActions(rex)
# self.log("DEBUG: tapahtumanimet "+str(actions))
if len(actions) > 0 :
self._search_engine(from_state, actions)
test_model.clearCache()
self.log("Path: %s"\
% self._trslist_to_str(self._stored_path) )
if self._stored_path :
trs = self._stored_path.pop(0)
self.log("Search has been ended")
return trs.getAction()
else:
raise Exception ("Next action can not be found")
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~sharedtabuguidance.py | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
sharedtabuguidance - a guidance that shares a tabulist between processes.
Uses multiprocessing -> requires python 2.6+
The tabulist is in a separate process which can be started
by giving 'startandconnect:PORT' arg. It listens to localhost:PORT.
After that, other sharedtabuguidances with arg 'connect:PORT' will
use the same tabulist.
When the starter sharedtabuguidance stops, the tabulist process stops also. :(
Accepted guidance-args:
'startandconnect:PORT'
Starts a shared tabulist on localhost:PORT, and connects to it.
'connect:PORT'
Connects to an existing shared tabulist on localhost:PORT.
Other guidance-args, only accepted with 'startandconnect'.
('connect'ing guidances will use those same args)
'tabuitems:TABUITEMTYPE'
One of the following:
state (default)
statecomponent
transition
NOTE:
The tabulist is updated only after a transition has been actually
executed (= markExecuted(t) is called).
Example: (processes P1 and P2)
P1: non-tabu transition T is suggested.
P2: the same non-tabu transition T is suggested.
P1: T is executed
P1: T is added to the tabulist.
P2: T is executed (it's actually tabu now, but P2 doesn't know it)
P2: T is added to the tabulist. (useless but harmless)
Don't think this is a serious issue. Could be fixed by implementing some
kind of nextToBeExecutedTransitions tabulist alongside the main tabulist.
"""
# TODO: maybe get rid of this and
# do a more general kinda shared guidance thing or something...
version="0.01"
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.coverage.tabulist import TabuList
import random
try:
from multiprocessing.managers import SyncManager
from multiprocessing import Lock
except ImportError,e:
from processing.managers import SyncManager
from processing import Lock
class TabuListManager(SyncManager):
pass
class TabuListUser:
# The tabulist.
# There's only 1 tabulist per process (it's a class variable).
_THE_TABULIST = TabuList()
# Locked when somebody's using the tabulist.
_TABULIST_LOCK = Lock()
# Number of connected TabuListUsers.
_CONNECTED = 0
_CONN_LOCK = Lock()
_PARAMS = None
def __init__(self,params=None):
TabuListUser._CONN_LOCK.acquire()
TabuListUser._CONNECTED += 1
self._connNum = TabuListUser._CONNECTED
if params is not None:
TabuListUser._PARAMS = params
TabuListUser._CONN_LOCK.release()
def getParameters(self):
return TabuListUser._PARAMS
def connNum(self):
return self._connNum
def len(self):
TabuListUser._TABULIST_LOCK.acquire()
le = len(TabuListUser._THE_TABULIST)
TabuListUser._TABULIST_LOCK.release()
return le
def add(self, item):
TabuListUser._TABULIST_LOCK.acquire()
TabuListUser._THE_TABULIST.add(item)
TabuListUser._TABULIST_LOCK.release()
def addMany(self, items):
TabuListUser._TABULIST_LOCK.acquire()
for item in items:
TabuListUser._THE_TABULIST.add(item)
TabuListUser._TABULIST_LOCK.release()
def tabunessOf(self, items):
""" Eg. If the 3 first items are tabu and the last one is not,
returns: (True,True,True,False)
"""
TabuListUser._TABULIST_LOCK.acquire()
to = tuple([i in TabuListUser._THE_TABULIST for i in items])
TabuListUser._TABULIST_LOCK.release()
return to
TabuListManager.register('TabuList',TabuListUser)
def _getTabuListManager(port):
manager = TabuListManager( ('127.0.0.1', port),
authkey='tema_shared_tabulist_%s'%(version,) )
return manager
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._port = None
self._manager = None
self._iAmTheManagerStarter = False
self._sgParams = []
def setParameter(self,name,value):
if name == 'help':
print __doc__
raise Exception()
elif name == 'connect':
self._port = value
elif name == 'startandconnect':
self._port = value
self._iAmTheManagerStarter = True
else:
self._sgParams.append( (name,value) )
# GuidanceBase.setParameter(self,name,value)
def _setParameterForReal(self,name,value):
if name in ('tabuitems','tabuitem'):
if value.startswith('statecomp'):
self.markExecuted = self._markExecuted_destStateComps
self.suggestAction = self._suggestAction_destStateComps
elif value in ('state','states'):
self.markExecuted = self._markExecuted_destState
self.suggestAction = self._suggestAction_destState
elif value in ('transition','transitions'):
self.markExecuted = self._markExecuted_transition
self.suggestAction = self._suggestAction_transition
else:
raise ValueError("Invalid tabuitems: %s" % (value,))
else:
raise ValueError("Invalid argument: %s" % (name,))
def prepareForRun(self):
if self._port is None:
raise ValueError("'connect' or 'startandconnect' must be given!")
if self._sgParams and not self._iAmTheManagerStarter:
raise ValueError("Setting parameters are only allowed "+
"with 'startandconnect'. When connecting, "+
"we just use existing params.")
self._manager = _getTabuListManager(self._port)
if self._iAmTheManagerStarter:
for (n,v) in self._sgParams:
self._setParameterForReal(n,v)
self.log("Starting a new shared tabulist on port %i."%(self._port))
self._manager.start()
self.log("Started.")
self._remoteTabuList = self._manager.TabuList(self._sgParams)
else:
self.log("Connecting to an existing shared tabulist on port %i"%(
self._port))
self._manager.connect()
self.log("Connected.")
self._remoteTabuList = self._manager.TabuList()
self._sgParams = self._remoteTabuList.getParameters()
for (n,v) in self._sgParams:
self._setParameterForReal(n,v)
self.log("The guidance params are: %s" % (self._sgParams,))
le = self._remoteTabuList.len()
connNum = self._remoteTabuList.connNum()
self.log(("I was the guidance number %i to connect to this tabulist."+
" It already contains %i items.")%(connNum,le))
def _markExecuted_destState(self, transition):
s = str(transition.getDestState())
self._remoteTabuList.add(s)
def _suggestAction_destState(self, from_state):
trans = from_state.getOutTransitions()
acts = [t.getAction() for t in trans]
dests = [str(t.getDestState()) for t in trans]
tabus = self._remoteTabuList.tabunessOf(dests)
nonTabuActs = [a for i,a in enumerate(acts) if not tabus[i]]
self.log("%i/%i of possible actions are non-tabu."%(
len(nonTabuActs),len(acts)))
if nonTabuActs:
a = random.choice(nonTabuActs)
self.log("Returning a non-tabu action %s"%(a,))
else:
a = random.choice(acts)
self.log("Returning a tabu action %s"%(a,))
return a
markExecuted = _markExecuted_destState
suggestAction = _suggestAction_destState
def _markExecuted_destStateComps(self, transition):
self._remoteTabuList.addMany(_compStates(transition))
def _suggestAction_destStateComps(self, from_state):
actNont = [(t.getAction(),self._nontabunessOfDestStateComps(t)) for
t in from_state.getOutTransitions()]
maxNont = max([nont for a,nont in actNont])
bestActions = [a for (a,nont) in actNont if nont==maxNont]
self.log("There are %i actions with %i non-tabuness."%(
len(bestActions),maxNont))
a = random.choice(bestActions)
return a
def _nontabunessOfDestStateComps(self,transition):
tabunesses = self._remoteTabuList.tabunessOf(_compStates(transition))
return tabunesses.count(False)
def _markExecuted_transition(self, transition):
self._remoteTabuList.add(_transitionAsPicklable(transition))
def _suggestAction_transition(self, from_state):
trans = from_state.getOutTransitions()
picklTrans = [_transitionAsPicklable(t) for t in trans]
acts = [t.getAction() for t in trans]
tabus = self._remoteTabuList.tabunessOf(picklTrans)
nonTabuActs = [a for i,a in enumerate(acts) if not tabus[i]]
self.log("%i/%i of possible actions are non-tabu."%(
len(nonTabuActs),len(acts)))
if nonTabuActs:
a = random.choice(nonTabuActs)
self.log("Returning a non-tabu action %s"%(a,))
else:
a = random.choice(acts)
self.log("Returning a tabu action %s"%(a,))
return a
def isThreadable(self):
# sharedtabuguidance won't work as threaded! (don't really know why)
return False
def _compStates(transition):
jeje = [s._id for s in transition.getDestState()._id]
return tuple([(i,s) for i,s in enumerate(jeje)])
def _transitionAsPicklable(transition):
return (str(transition.getSourceState()),
str(transition.getAction()),
str(transition.getDestState()))
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~guiguidance.py | #!/usr/bin/env python
# coding: iso-8859-1
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
guiguidance let's the user decide (via ModelGui) what to execute next.
"""
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.validator.simulation.modelgui import getTheModelGui
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
def setParameter(self,paramname,paramvalue):
print __doc__
raise Exception("Invalid parameter '%s' for guiguidance." % paramname)
# GuidanceBase.setParameter(self,paramname,paramvalue)
def prepareForRun(self):
self._path = []
def markExecuted(self, transition):
self._path = self._path[1:]
GuidanceBase.markExecuted(self,transition)
def suggestAction(self, from_state):
if not self._path or not self._path[0].getSourceState()==from_state:
# Ask the gui (=user) for a path
self._path = getTheModelGui().selectPath(from_state)
else:
# we still have path left, just drawing the current position
getTheModelGui().stepTo(from_state)
return self._path[0].getAction()
def isThreadable(self):
# Afaik, Tkinter gui doesn't tolerate calls from threads
# other than the one that created it. That's why we can't use
# modelgui from another thread.
return False
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~tabuguidance.py | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A guidance that tries to find actions/states/transitions that are not in
a tabulist.
Params:
'numtabuactions' (nonnegative integer of "infinite"):
Size of action tabu list.
'numtabustates' (nonnegative integer of "infinite"):
Size of state tabu list.
'numtabutransitions' (nonnegative integer of "infinite):
Size of transition tabu list.
If none of these are given, using infinite state tabulist.
If more than one numtabu params given, tabuguidance'll search the tabulists
in this order: action tabulist -> state tabulist -> transition tabulist.
When an outgoing transition whose action/state/transition is not in the
tabulist is found, the corresponding action is suggested.
If no such transition is found after searching through all the tabulists,
a random action is suggested.
If there are many possible actions to execute, one of them is chosen randomly.
"""
version="tabuguidance 0.21"
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.coverage.tabulist import TabuList
import random
INFINITY = () # () is a good choice for INFINITY since () > anything...
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._NUM_TABU_ACTIONS = None
self._NUM_TABU_STATES = None
self._NUM_TABU_TRANSITIONS = None
self._tabulist_action = None
self._tabulist_state = None
self._tabulist_transition = None
def setParameter(self,paramname,paramvalue):
accepted = ("numtabuactions","numtabustates","numtabutransitions")
if paramname=='numtabuactions':
self._NUM_TABU_ACTIONS = self._parseSize(paramname,paramvalue)
elif paramname=='numtabustates':
self._NUM_TABU_STATES = self._parseSize(paramname,paramvalue)
elif paramname=='numtabutransitions':
self._NUM_TABU_TRANSITIONS = self._parseSize(paramname,paramvalue)
else:
print __doc__
raise Exception("Invalid parameter '%s' for tabuguidance. Accepted parameters: %s" % paramname, accepted)
GuidanceBase.setParameter(self,paramname,paramvalue)
def _parseSize(self,paramname,paramvalue):
if paramvalue == float("infinity") or paramvalue is INFINITY or \
paramvalue in ("inf","infinite","infinity"):
return INFINITY
try:
return int(paramvalue)
except ValueError:
raise Exception("Tabuguidance: invalid '%s' value: %s. It should be a positive integer or 'infinite'." % (paramname,paramvalue))
def prepareForRun(self):
# if no numtabu* params given, use infinite state tabulist
if (self._NUM_TABU_ACTIONS is None and
self._NUM_TABU_STATES is None and
self._NUM_TABU_TRANSITIONS is None):
self.log("Using default: 'numtabustates:infinite'")
self._NUM_TABU_STATES = INFINITY
self._suggesters = [] # the funcs that suggest an action
# order: action, state, transition
if self._NUM_TABU_ACTIONS is not None:
self._tabulist_action = TabuList(self._NUM_TABU_ACTIONS)
self._suggesters.append(self._newAction)
if self._NUM_TABU_STATES is not None:
self._tabulist_state = TabuList(self._NUM_TABU_STATES)
self._suggesters.append(self._newStateAction)
if self._NUM_TABU_TRANSITIONS is not None:
self._tabulist_transition = TabuList(self._NUM_TABU_TRANSITIONS)
self._suggesters.append(self._newTransitionAction)
def markExecuted(self, transition):
# special case: add the very first (source) state to the tabu-list
statelist = self._tabulist_state
if statelist and len(statelist) == 0:
statelist.add( transition.getSourceState() )
# add actions/states/transitions to tabulists if given tabulist exists
if self._tabulist_action is not None:
self._tabulist_action.add( str(transition.getAction()) )
if self._tabulist_state is not None:
self._tabulist_state.add( str(transition.getDestState()) )
if self._tabulist_transition is not None:
self._tabulist_transition.add( str(transition) )
GuidanceBase.markExecuted(self,transition)
def suggestAction(self, from_state):
out_trans = from_state.getOutTransitions()
random.shuffle(out_trans)
for suggester in self._suggesters:
action = suggester(out_trans)
if action is not None:
return action
# no non-tabu actions found, a random action is our best suggestion...
return out_trans[0].getAction() # out_trans has been shuffled
def _newAction(self, trans):
"""returns a non-tabu action, or None"""
for t in trans:
if str(t.getAction()) not in self._tabulist_action:
return t.getAction()
return None
def _newStateAction(self, trans):
"""returns an action leading to a non-tabu state, or None"""
for t in trans:
if str(t.getDestState()) not in self._tabulist_state:
return t.getAction()
return None
def _newTransitionAction(self, trans):
"""returns an action of a non-tabu transition, or None"""
for t in trans:
if str(t) not in self._tabulist_transition:
return t.getAction()
return None
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~gameguidance-t.py | # Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
GameGuidance-Threading Guidance module
Notes:
- Do not use this guidance with test models that include deadlocks!
GameGuidance-Threading reads the following parameter values:
- maxdepth (natural number, default: 100)
the search depth to which the algorithm explores the state space
at maximum.
- mindepth (natural number, default: 0)
the lower bound for the search depth before an action can be
suggested.
- randomseed (any hashable object, default: None)
seed for random number generator.
"""
from tema.guidance.guidance import Guidance as GuidanceBase
import random
import time
import thread
import copy
version='0.1 very simple player'
class GuidanceT(GuidanceBase):
FINISHPOINTS="found goal"
def __init__(self):
GuidanceBase.__init__(self)
self.setParameter('maxdepth',100)
self.setParameter('mindepth',1)
self.setParameter('randomseed',time.time())
self._lastroute=[]
self._steps_to_reroute=0
# search front is a list of triplets: (score, path, coverage)
# where score is a pair: (coverage_percentage, steps since
# last change coverage_percentage). Thus, the bigger the
# number of steps, the faster the final coverage_percentage is
# achieved.
self.search_front=[]
self._thread_id=None
self._front_shortened=0 # msg from markExecuted to thread
def setParameter(self,parametername,parametervalue):
if not parametername in ['maxdepth','mindepth','randomseed']:
print __doc__
raise Exception("Invalid parameter '%s' for gameguidance." % parametername)
GuidanceBase.setParameter(self,parametername,parametervalue)
if parametername=='randomseed':
self._rndchoose=random.Random(parametervalue).choice
def prepareForRun(self):
GuidanceBase.prepareForRun(self)
self.search_front_lock=thread.allocate_lock()
def markExecuted(self,transition_object):
locked=0 # keeps if this function has acquired search_front_lock
# Update advances to the 'official' coverage object
GuidanceBase.markExecuted(self,transition_object)
# Then cleanup search front: remove every entry from the
# search front if it starts with some other than the executed
# transition_object, and shorten entries starting with the
# transition.
# Shortening cannot be done if paths are too short, therefore,
# if the thread is running, let it do its job
while 1:
self.search_front_lock.acquire()
locked=1
if len(self.search_front[0][1])<2:
# There is just one action in the search front, it can
# be safely removed only if the thread is no more
# running, that is, time_to_quit signal has been
# given.
if self.time_to_quit: break
else:
self.search_front_lock.release()
locked=0
time.sleep(1) # give some time to the thread
continue
# NOTE: This may cause a livelock if there are
# deadlocks in the model: search front is not getting
# any deeper. There should not be deadlocks!
else:
break
# If the thread is quitting, there is no reason to
# cleanup the search front
#if self.time_to_quit:
# if locked: self.search_front_lock.release()
# return
# This function must own the lock now, search_front can be
# edited.
new_search_front=[]
for points,path,reqs in self.search_front:
if path[0]==transition_object:
self._front_shortened=1 # message to the thread
new_search_front.append([points,path[1:],reqs])
self.search_front=new_search_front
self.log("Search front reduced to length %s and depth %s" %
(len(self.search_front),len(self.search_front[0][1])))
self.search_front_lock.release()
def suggestAction(self,state_object):
# If a thread has not been started yet, start it now and give
# it some time to find something. The first depth is reached
# very fast.
if self._thread_id==None:
self.time_to_quit=0
self._thread_id=thread.start_new_thread(
self._route_planner_thread,(state_object,))
time.sleep(1)
# Choose randomly one the transitions that start the paths
# with the best score.
self.search_front_lock.acquire()
if len(self.search_front)==0:
# The search front should not be empty, because
# suggestAction and markExecuted are executed one after
# another, and markExecuted never finishes with an empty
# search front.
self.log("Strange! Search front should never be empty, but it is.")
raise Exception("suggestAction found an empty search front")
# If necessary, give the algorithm some time to reach the
# minimal search depth.
if self.search_front[0][0][0] < 1.0: # not finished yet
while len(self.search_front[0][1]) < self._params['mindepth']:
self.search_front_lock.release()
time.sleep(1)
self.search_front_lock.acquire()
max_points=self.search_front[-1][0]
best_transition=self._rndchoose(
[path[0] for po,path,tr in self.search_front if po==max_points])
self.search_front_lock.release()
return best_transition.getAction()
def _route_planner_thread(self,starting_state):
self.log("Route planner thread started")
# initialize search front
self.search_front_lock.acquire()
for t in starting_state.getOutTransitions():
reqs=copy.deepcopy(self._requirements)
for r in reqs: r.markExecuted(t)
self.search_front.append(
[(sum([r.getPercentage() for r in reqs]),0),[t],reqs])
self.search_front_lock.release()
while not self.time_to_quit:
# let someone else use search_front...
time.sleep(0.05)
# Increase search front depth by one level:
# 1. Go through a copy of the search front
self.search_front_lock.acquire()
shallow_copy_of_search_front=copy.copy(self.search_front)
self.search_front_lock.release()
if len(shallow_copy_of_search_front[0][1])>=self.getParameter('maxdepth'):
time.sleep(1)
continue # maximum depth reached, do not calculate more
new_search_front=[]
for points,path,reqs in shallow_copy_of_search_front:
if self._front_shortened==1:
# markExecuted touched the front, forget
# the update of this front....
break
for t in path[-1].getDestState().getOutTransitions():
nreqs=copy.deepcopy(reqs)
npath=copy.copy(path)
npath.append(t)
for r in nreqs: r.markExecuted(t)
new_perc=sum([r.getPercentage() for r in nreqs])
if new_perc==points[0]: new_steps=points[1]+1
else: new_steps=0 # the percentage has grown!
new_search_front.append([
(new_perc,new_steps),
npath,
nreqs])
new_search_front.sort()
# 2. If the search front has not been changed during the
# search, it can be updated. Otherwise, forget the results
# and try to update the new search front.
self.search_front_lock.acquire()
if self._front_shortened==0:
self.search_front=new_search_front
self.log("New search front length %s, depth %s, score %s" %
(len(new_search_front),
len(new_search_front[-1][1]),
new_search_front[-1][0]))
else:
self._front_shortened=0
self.log("Throwing away depth %s, rerouting from depth %s" %
(len(shallow_copy_of_search_front[0][1]),
len(self.search_front[0][1])))
if self.search_front[0][0][0]>=1.0:
self.time_to_quit=1
self.log("Nothing can possibly go wrong anymore")
self.search_front_lock.release()
Guidance=GuidanceT
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~randomguidance.py | # Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
RandomGuidance reads the following parameter values:
- randomseed (any hashable object, default: None)
seed for random number generator
"""
from tema.guidance.guidance import Guidance as GuidanceBase
import random
import time # for random seed initialization
version='0.1 random walk'
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self.setParameter('randomseed',time.time())
def setParameter(self,parametername,parametervalue):
if not parametername in ['randomseed']:
print __doc__
raise Exception("Invalid parameter '%s' for gameguidance." % parametername)
GuidanceBase.setParameter(self,parametername,parametervalue)
if parametername=='randomseed':
self._rndchoose=random.Random(parametervalue).choice
def suggestAction(self,state_object):
return self._rndchoose(state_object.getOutTransitions()).getAction()
| [] |
2024-01-10 | tema-tut/tema-tg | TemaLib~tema~guidance~gameguidance.py | # Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
GameGuidance reads the following parameter values:
- lookahead (natural number, default: 15)
the search depth to which the algorithm explores the state space
before answering.
- randomseed (any hashable object, default: None)
seed for random number generator
- rerouteafter (natural number, default: 1)
route will be recalculated when rerouteafter steps have been taken
(or when execution has run out of the previous route in any case)
"""
# TODO
#
# Do not reroute at all when:
#
# - transitions from the current state cause the same function call in
# keyword adapter (vwVerifyText, ~vwVerifyText)
#
# Do not reroute too long when:
#
# - all transitions are about verification, not sending real input
# events
from tema.guidance.guidance import Guidance as GuidanceBase
import random
import time # for random seed initialization
version='0.15 very simple player, chooses a route with max points with min steps'
class Guidance(GuidanceBase):
FINISHPOINTS="found goal"
def __init__(self):
GuidanceBase.__init__(self)
self.setParameter('lookahead',15)
self.setParameter('randomseed',time.time())
self.setParameter('rerouteafter',1)
self._lastroute=[]
self._steps_to_reroute=0
def setParameter(self,parametername,parametervalue):
if not parametername in ['lookahead','randomseed','rerouteafter']:
print __doc__
raise Exception("Invalid parameter '%s' for gameguidance." % parametername)
GuidanceBase.setParameter(self,parametername,parametervalue)
if parametername=='randomseed':
self._rndchoose=random.Random(parametervalue).choice
elif parametername=='rerouteafter':
self._steps_to_reroute=self.getParameter(parametername)
def suggestAction(self,state_object):
if self._steps_to_reroute<=0 \
or self._lastroute==[] \
or not state_object==self._lastroute[-1].getSourceState():
# We need to calculate new route. It will be written to
# self._lastroute.
if len(state_object.getOutTransitions())==1:
# If there is only one possible transition, forget the
# routing for now. Next suggestAction call causes
# rerouting anyway, because then self._lastroute will
# be empty
self._lastroute=[state_object.getOutTransitions()[0]]
self.log("There is only one possible action: %s" % self._lastroute[-1].getAction())
else:
self.log("Rerouting...")
points,self._lastroute = self._plan_route(state_object,self.getParameter('lookahead'))
self._steps_to_reroute=self.getParameter('rerouteafter')
log_actions=[t.getAction().toString() for t in self._lastroute[::-1]]
self.log("New route: points: %s, route: %s" % (points,log_actions))
else:
self.log("Using the next action in the planned route: %s" %
self._lastroute[-1].getAction())
next_transition=self._lastroute.pop()
self._steps_to_reroute-=1
return next_transition.getAction()
def _plan_route(self,state_object,depth):
"""Returns a pair (points, path) where length of path is the
parameter depth+1 and points is a pair
(points_in_the_end_of_path,
number_of_unnecessary_depth_in_the_end_of_the_path).
The unnecessary steps do not increase the points.
"""
# if no look-ahead, return zero points and any out transition
if depth<=0:
try:
transition=[self._rndchoose(
state_object.getOutTransitions() )]
return ([sum([r.getPercentage() for r in self._requirements]),0],
transition)
except:
self.log("Deadlock detected, gameguidance cannot continue.")
self.log("Deadlock state: %s" % state_object)
raise Exception("Unexpected deadlock in the test model.")
outtrans=state_object.getOutTransitions()
# Initialize transition point table of length of
# outtransitions with pairs of zeros. The table contains the
# coverage points after execution the transition.
points=[ [0.0,0] for t in outtrans]
nonfinishing_routes=[None]*len(outtrans)
finishing_routes=[]
shortest_finishing_length=depth
for transition_index,t in enumerate(outtrans):
# mark transition t executed in every requirement and calc points
for r in self._requirements:
r.push()
r.markExecuted(t)
points[transition_index][0]+=r.getPercentage()
if int(points[transition_index][0])>=len(self._requirements):
# every requirement fulfilled
finishing_routes.append([t])
shortest_finishing_length=0
elif shortest_finishing_length>0:
future_points,route = self._plan_route(t.getDestState(),shortest_finishing_length-1)
route.append(t)
if future_points[0]==Guidance.FINISHPOINTS:
finishing_routes.append(route)
shortest_finishing_length=min(shortest_finishing_length,len(route))
else:
if points[transition_index][0]==future_points[0]:
# there will be no increase in points in the future =>
# the search depth after which nothing happens increases
points[transition_index][1]=depth
else:
# future looks bright, wasted depth does not increase
# copy points and the depth
points[transition_index]=future_points
nonfinishing_routes[transition_index]=route
# restore the transition execution status in every requirement
for r in self._requirements:
r.pop()
# if there are finishing routes, return one of the shortest:
if finishing_routes:
route_lengths=[ len(r) for r in finishing_routes ]
minlen=min(route_lengths)
best_route_indexes=[ i for i,rl in enumerate(route_lengths) if rl==minlen ]
chosen_route_index=self._rndchoose(best_route_indexes)
return [Guidance.FINISHPOINTS,0],finishing_routes[ chosen_route_index ]
else:
# return any of the routes with maximum points
# that give the maximum points with the smallest number of steps
maximumpoints=max(points) # max ([ [1,9], [2,8], [2,8], [2,1] ]) == [2,8]
best_route_indexes=[i for i,p in enumerate(points) if p==maximumpoints]
return maximumpoints, nonfinishing_routes[ self._rndchoose(best_route_indexes) ]
| [] |
2024-01-10 | shank250/codyaan-sih | testing-langchain.py |
import os
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
import json
os.environ['OPENAI_API_KEY'] = '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'
user_chat = []
ai_chat = []
'''
1. getting all the details of the user's complaint
2. using that complaint to map to the best suitable government employee
a. initial complaint -> llm routing -> trying to get the best fitted \
employees list -> vector database + langchain query
b. giving the user this freedom to select any one of the filtered employees \
or we may even automate this as using Map re-rank concept available in langchain docs\
or any other better way
3. then re-processing the complaint and connecting it with the employee
'''
user_query = input("Hi, How can i help you ? \n")
user_chat.append(user_query)
chat_summary = ""
chat_summary_status = False
user_query_ = """Dear Mr. Smith,
I am writing to express my dissatisfaction with the service I received from your bank on September 21, 2023. I visited your Greater Noida branch to withdraw cash from my savings account, but I was told that the system was down and that I would have to come back later. I returned to the branch the next day, but I was still unable to withdraw my money. The bank staff was unable to provide me with a clear explanation for the problem, and they were also rude and dismissive.
I am very disappointed with the way this situation was handled. I am a loyal customer of your bank, and I have never had any problems with my account before. However, this recent experience has left me feeling frustrated and undervalued.
I would like to request that you investigate this matter and take appropriate action to ensure that this does not happen again. I would also like to receive a refund for the time and inconvenience that I have experienced.
Thank you for your time and attention to this matter.
Sincerely,
John Doe"""
# now giving my chat description of the llm for vector search
def vector_search(chat_summary = chat_summary, user_chat = user_chat):
llm = ChatOpenAI(temperature=0.5)
prompt = ChatPromptTemplate.from_template(
"You are a grivance complaint registration bot\
now create a best suitalbe json object with following fields : \
'vector-search' = this key will contain a string which would be \
best suitalbe for vector secrch for the given problem statement from the user \
try to make it concise and accurate\
here is the user detailed problem : {summary}"
)
summary = str(user_chat) + chat_summary
print(summary)
chain = LLMChain(llm=llm, prompt=prompt)
vector_search_query = chain.run(summary)
print(vector_search_query)
return vector_search_query
# trying to add router
def complaint_completion(user_query):
global chat_summary
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
unfilable_template = """You are a very smart grivance complaint reciever which forwards the \
grivance to best suitable employee which can solve this problem \
You are great at analysing the grivances \
When you analyse Grivance or Complaint from user just check \
if user has forgot to enter any important info about his grivance \
then ask for that information for example his name, location, bank name, transaction id and all the other relevant details for specific problem \
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
{{{{
"chat-reply": string \ reply tobe send to the user telling about all the informations which are required
"STATUS": string \ This should be "More info required"
}}}}
Here is a Grivance / Complaint from user : \
{input}"""
# REMMEMBER: give
# response in list
# format
# with different information person \
# will require for solving the grivance \
# \
# \
# \
complaint_filable_template = """You are a very smart grivance complaint reciever which forwards the \
grivance to best suitable employee which can solve this problem \
You are great at analysing the grivances \
When you analyse Grivance or Complaint from user just check if he has \
entered all the required information about himself which would be required for \
solving the problem by the respective officer \
if user has entered all the information required for the complaint filing \
like name, transaction details, and all the relevant details related to the banking grivance
then respond with [COMPLAINT-FILABLE] no other words
Here is a Grivance / Complaint from user : \
{input}"""
prompt_infos = [
{
"name": "UNFILABLE",
"description": "if info like name, place, transaction details, account details and all the other details relatedd to the user query are not provided by the user ",
"prompt_template": unfilable_template
},
{
"name": "FILABLE",
"description": "if the user has given a complete description about the grivance he is having or facing",
"prompt_template": complaint_filable_template
}
]
from langchain.chains.router import MultiPromptChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.prompts import PromptTemplate
llm = ChatOpenAI(temperature=0.3)
destination_chains = {}
for p_info in prompt_infos:
name = p_info["name"]
prompt_template = p_info["prompt_template"]
prompt = ChatPromptTemplate.from_template(template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = "\n".join(destinations)
default_prompt = ChatPromptTemplate.from_template("{input}")
default_chain = LLMChain(llm=llm, prompt=default_prompt)
MULTI_PROMPT_ROUTER_TEMPLATE = """Given a raw text input to a \
language model select the model prompt best suited for the input. \
You will be given the names of the available prompts and a \
description of what the prompt is best suited for. \
You may also revise the original input if you think that revising\
it will ultimately lead to a better response from the language model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
```json
{{{{
"destination": string \ name of the prompt to use
"next_inputs": string \ a potentially modified version of the original input with all the correct facts
}}}}
```
REMEMBER: "destination" MUST be one of the candidate prompt \
names specified below or can be "IRRELEVANT" if the input is not\
well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input \
if you don't think any modifications are needed.
<< CANDIDATE PROMPTS >>
{destinations}
<< INPUT >>
{{input}}
<< OUTPUT (remember to include the ```json)>>"""
# or "DEFAULT" this was added after the term prompt to use
# OR it can be "DEFAULT" was addedin remember section
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
# memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)
chain = MultiPromptChain(router_chain=router_chain,
destination_chains=destination_chains,
default_chain=default_chain,
verbose=False
)
response = chain.run(user_query)
ai_chat.append(response)
print(response)
# creating a chat summary
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
llm = ChatOpenAI(temperature=0.3)
prompt = ChatPromptTemplate.from_template(
"Make a chat summary : \
keeping all the facts correct and without missing any important information \
here is the complete chat : {chat}?"
)
chain = LLMChain(llm=llm, prompt=prompt)
chat = chat_summary + user_chat[-1] + ai_chat[-1]
chat_summary = chain.run(chat)
# memory.save_context({"input": f"{user_query}"},
# {"output": f"{response}"})
# print(memory.buffer)
# response = complaint_completion(user_query)
if response == "[COMPLAINT-FILABLE]":
status = "done"
print("moving it to create a vector search prompt")
vector_search()
elif response == "IRRELEVANT":
print("not sure what you are talking about")
status = "new-chat"
else:
print("\n oopes trying to get more data")
status = "more-data-req"
dictionary_response = json.loads(response)
user_query = input("\nuser : "+dictionary_response["chat-reply"])
complaint_completion(chat_summary + user_query)
return response
complaint_completion(user_query)
| [
"{input}",
"Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n \n << FORMATTING >>\n Return a markdown code snippet with a JSON object formatted to look like:\n ```json\n {{{{\n \"destination\": string \\ name of the prompt to use \n \"next_inputs\": string \\ a potentially modified version of the original input with all the correct facts\n }}}}\n ```\n \n REMEMBER: \"destination\" MUST be one of the candidate prompt names specified below or can be \"IRRELEVANT\" if the input is not well suited for any of the candidate prompts. \n REMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n \n << CANDIDATE PROMPTS >>\n {destinations}\n \n << INPUT >>\n {{input}}\n \n << OUTPUT (remember to include the ```json)>>",
"You are a grivance complaint registration bot now create a best suitalbe json object with following fields : 'vector-search' = this key will contain a string which would be best suitalbe for vector secrch for the given problem statement from the user try to make it concise and accurate here is the user detailed problem : {summary}",
"[{'name': 'UNFILABLE', 'description': 'if info like name, place, transaction details, account details and all the other details relatedd to the user query are not provided by the user ', 'prompt_template': PLACEHOLDER}, {'name': 'FILABLE', 'description': 'if the user has given a complete description about the grivance he is having or facing', 'prompt_template': PLACEHOLDER}]",
"You are a very smart grivance complaint reciever which forwards the grivance to best suitable employee which can solve this problem You are great at analysing the grivances \n \n When you analyse Grivance or Complaint from user just check if user has forgot to enter any important info about his grivance then ask for that information for example his name, location, bank name, transaction id and all the other relevant details for specific problem \n\n << FORMATTING >>\n Return a markdown code snippet with a JSON object formatted to look like:\n \n {{{{\n \"chat-reply\": string \\ reply tobe send to the user telling about all the informations which are required\n \"STATUS\": string \\ This should be \"More info required\"\n }}}}\n \n \n Here is a Grivance / Complaint from user : {input}",
"Make a chat summary : keeping all the facts correct and without missing any important information here is the complete chat : {chat}?",
"You are a very smart grivance complaint reciever which forwards the grivance to best suitable employee which can solve this problem You are great at analysing the grivances When you analyse Grivance or Complaint from user just check if he has entered all the required information about himself which would be required for solving the problem by the respective officer \n if user has entered all the information required for the complaint filing like name, transaction details, and all the relevant details related to the banking grivance \n then respond with [COMPLAINT-FILABLE] no other words\n \n Here is a Grivance / Complaint from user : {input}",
"input",
"prompt_template"
] |
2024-01-10 | thatguylah/woaiai_hackathon | botv2.py | """
Image-generating Bot that assists with image design ideas and image generation based on its own text-to-image prompt
Usage:
Sequence of automated questions to answer and then generating an image based on suggested image design.
Press Ctrl-C on the command line or send a signal to the process to stop the bot.
"""
import openai
import logging
from dotenv import dotenv_values
import argparse
import os
from api.conversation import (
start_command,
cancel_command,
get_image_purpose,
select_theme,
select_image_design,
get_image_prompt,
generate_image,
)
from api.utils import run_in_threadpool_decorator
from api.outpainting import outpainting_handler
from telegram import __version__ as TG_VER
from telegram import Update
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
MessageHandler,
filters,
ConversationHandler,
PicklePersistence,
)
try:
from telegram import __version_info__
except ImportError:
__version_info__ = (0, 0, 0, 0, 0) # type: ignore[assignment]
if __version_info__ < (20, 0, 0, "alpha", 1):
raise RuntimeError(
f"This example is not compatible with your current PTB version {TG_VER}. To view the "
f"{TG_VER} version of this example, "
f"visit https://docs.python-telegram-bot.org/en/v{TG_VER}/examples.html"
)
# get config
config = dotenv_values(".env")
# get API tokens
HF_TOKEN = config["HF_API_KEY"]
openai.api_key = config["OPENAI_API_KEY"]
# TELEBOT_TOKEN = config['TELEBOT_TOKEN']
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(processName)s - %(threadName)s - [%(thread)d] - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
huggingFaceLogger = logging.getLogger("huggingface_hub").setLevel(logging.INFO)
messageHandlerLogger = logging.getLogger("telegram.bot").setLevel(logging.INFO)
applicationLogger = logging.getLogger("telegram.ext").setLevel(logging.INFO)
openAILogger = logging.getLogger("openai").setLevel(logging.INFO)
# assign variable name for each integer in sequence for easy tracking of conversation
(
IMAGE_TYPE,
IMAGE_PURPOSE,
SELECTED_THEME,
SELECTED_IMAGE_DESIGN,
TEXT_TO_IMAGE_PROMPT,
GENERATED_IMAGE,
) = range(6)
async def pong(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""serves a health check status for debugging
Args:
update (Update): _description_
context (ContextTypes.DEFAULT_TYPE): _description_
Returns:
Pong to the user
"""
await update.message.reply_text("Pong")
# function to start the bot
def main(dev_mode) -> None:
if dev_mode:
TELEBOT_TOKEN = config["TELEBOT_DEV_TOKEN"]
else:
TELEBOT_TOKEN = config["TELEBOT_TOKEN"]
# create folders for outputs
if not os.path.exists("data"):
os.mkdir("data")
if not os.path.exists("data/image_output"):
os.mkdir("data/image_output")
# configure chatbot's persistence
persistence = PicklePersistence(filepath="data/conversation")
# create the Application pass telebot's token to application
application = (
Application.builder()
.token(TELEBOT_TOKEN)
.concurrent_updates(True)
.persistence(persistence)
.build()
)
# Conversation Handler with the states IMAGE_TYPE, IMAGE_PURPOSE, SELECTED_THEME, SELECTED_IMAGE_DESIGN
conv_handler = ConversationHandler(
entry_points=[CommandHandler("start", start_command)],
states={
IMAGE_TYPE: [
CommandHandler("Yes", start_command),
CommandHandler("No", cancel_command),
],
IMAGE_PURPOSE: [
MessageHandler(filters.TEXT, get_image_purpose, block=False)
],
SELECTED_THEME: [MessageHandler(filters.TEXT, select_theme, block=False)],
SELECTED_IMAGE_DESIGN: [
MessageHandler(filters.TEXT, select_image_design, block=False)
],
TEXT_TO_IMAGE_PROMPT: [
MessageHandler(filters.TEXT, get_image_prompt, block=False)
],
GENERATED_IMAGE: [
MessageHandler(filters.TEXT, generate_image, block=False)
],
},
fallbacks=[CommandHandler("cancel", cancel_command)],
allow_reentry=True, # allow user to enter back any state of the ConversationHandler
name="ImageGeneratingBot",
persistent=True,
block=False,
)
ping_handler = CommandHandler("ping", pong, block=False)
# add conversation handler to application
application.add_handler(conv_handler)
application.add_handler(ping_handler)
application.add_handler(outpainting_handler)
application.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-DEV", "--dev", action="store_true", help="Run with local Tele API token"
)
args = parser.parse_args()
main(args.dev)
| [] |
2024-01-10 | thatguylah/woaiai_hackathon | botv3.py | """
Image-generating Bot that assists with image design ideas and image generation based on its own text-to-image prompt
Usage:
1. Sequence of automated questions to answer and then generating an image based on suggested image design.
2. Editing existing images to remove any object (inpainting) or extending out the image (outpainting)
Press Ctrl-C on the command line or send a signal to the process to stop the bot.
"""
import openai
import logging
from dotenv import dotenv_values
import requests
import json
import argparse
import os
from huggingface_hub import InferenceClient
from api.conversation import *
from api.inpainting import inpainting_handler
from api.outpainting import outpainting_handler
from telegram import __version__ as TG_VER
from telegram import (
ForceReply,
Update,
ReplyKeyboardMarkup,
InlineKeyboardMarkup,
InlineKeyboardButton,
ReplyKeyboardRemove,
)
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
MessageHandler,
filters,
ConversationHandler,
PicklePersistence,
)
try:
from telegram import __version_info__
except ImportError:
__version_info__ = (0, 0, 0, 0, 0) # type: ignore[assignment]
if __version_info__ < (20, 0, 0, "alpha", 1):
raise RuntimeError(
f"This example is not compatible with your current PTB version {TG_VER}. To view the "
f"{TG_VER} version of this example, "
f"visit https://docs.python-telegram-bot.org/en/v{TG_VER}/examples.html"
)
# get config
config = dotenv_values(".env")
# get API tokens
HF_TOKEN = config["HF_API_KEY"]
openai.api_key = config["OPENAI_API_KEY"]
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(processName)s - %(threadName)s - [%(thread)d] - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
huggingFaceLogger = logging.getLogger("huggingface_hub").setLevel(logging.DEBUG)
messageHandlerLogger = logging.getLogger("telegram.bot").setLevel(logging.DEBUG)
applicationLogger = logging.getLogger("telegram.ext").setLevel(logging.DEBUG)
# assign variable name for each integer in sequence for easy tracking of conversation
(
RESET_CHAT,
VALIDATE_USER,
USER_COMPANY,
EDIT_COMPANY,
IMAGE_TYPE,
IMAGE_PURPOSE,
SELECT_THEME,
SELECT_IMAGE_DESIGN,
CUSTOM_IMAGE_PROMPT,
GENERATE_PROMPT_AND_IMAGE,
GENERATE_IMAGE,
) = range(11)
# list of selected government agencies
lst_govt_agencies = [
"Housing Development Board (HDB)",
"Government Technology Agency (GovTech)",
"Others",
]
# function to check bot's health status (CommandHandler type)
async def pong(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""serves a health check status for debugging
Args:
update (Update): _description_
context (ContextTypes.DEFAULT_TYPE): _description_
Returns:
Pong to the user
"""
await update.message.reply_text("Pong")
# function to start the bot
def main(dev_mode) -> None:
if dev_mode:
TELEBOT_TOKEN = config["TELEBOT_DEV_TOKEN"]
else:
TELEBOT_TOKEN = config["TELEBOT_TOKEN"]
# create folders for outputs
if not os.path.exists("data"):
os.mkdir("data")
if not os.path.exists("data/image_output"):
os.mkdir("data/image_output")
# configure chatbot's persistence
persistence = PicklePersistence(filepath="data/conversation")
# create the Application pass telebot's token to application
application = (
Application.builder().token(TELEBOT_TOKEN).persistence(persistence).build()
)
# Conversation Handler with the states IMAGE_TYPE, IMAGE_PURPOSE, SELECTED_THEME, SELECTED_IMAGE_DESIGN
conv_handler = ConversationHandler(
entry_points=[
CommandHandler("start", start_command),
CommandHandler("editcompany", edit_company_command),
CommandHandler("choosetheme", get_previous_themes),
CommandHandler("choosedesign", get_previous_image_designs),
inpainting_handler,
outpainting_handler,
],
states={
RESET_CHAT: [
MessageHandler(
filters.Regex("(Generate Image Again)"), generate_image, block=False
),
MessageHandler(
filters.Regex("(Generate New Image: Step-by-step Process)"),
validate_user,
block=False,
),
MessageHandler(
filters.Regex("(Generate New Image: Use Custom Prompt)"),
get_user_custom_image_prompt,
block=False,
),
MessageHandler(
filters.Regex("(Edit Existing Image)"), validate_user, block=False
),
],
VALIDATE_USER: [
MessageHandler(
filters.TEXT
& ~filters.Regex("(Generate Image: Use Custom Prompt)")
& ~filters.COMMAND,
validate_user,
block=False,
),
MessageHandler(
filters.Regex("(Generate Image: Use Custom Prompt)"),
get_user_custom_image_prompt,
block=False,
),
],
USER_COMPANY: [
MessageHandler(
filters.TEXT
& ~filters.Regex(
"(Edit Existing Image|Generate Image: Use Custom Prompt|Yes|No)"
)
& ~filters.COMMAND,
get_user_company,
block=False,
),
MessageHandler(
filters.Regex("(Edit Existing Image)"), validate_user, block=False
),
MessageHandler(filters.Regex("(Yes)"), get_user_company, block=False),
MessageHandler(filters.Regex("(No)"), validate_user, block=False),
MessageHandler(
filters.Regex("(Generate Image: Use Custom Prompt)"),
get_user_custom_image_prompt,
block=False,
),
],
IMAGE_TYPE: [
MessageHandler(
filters.Regex("(Continue)")
& ~filters.Regex("(/quit|Edit Company Name)"),
get_image_type,
block=False,
),
MessageHandler(
filters.Regex("(Edit Company Name)"),
edit_company_command,
block=False,
),
],
IMAGE_PURPOSE: [
MessageHandler(
filters.Regex("(Poster|Realistic Photo|Illustration)"),
get_image_purpose,
block=False,
)
],
SELECT_THEME: [
MessageHandler(filters.TEXT & ~filters.COMMAND, get_theme, block=False)
],
SELECT_IMAGE_DESIGN: [
MessageHandler(
filters.TEXT
& ~filters.Regex("(Propose other themes|Write own theme)")
& ~filters.COMMAND,
select_image_design,
block=True,
),
MessageHandler(
filters.Regex("(Propose other themes)"), get_theme, block=False
),
MessageHandler(
filters.Regex("(Write own theme)"),
get_user_custom_theme,
block=False,
),
],
CUSTOM_IMAGE_PROMPT: [
MessageHandler(
filters.Regex("(Generate Image: Use Custom Prompt|Continue)"),
get_user_custom_image_prompt,
block=False,
)
],
GENERATE_PROMPT_AND_IMAGE: [
MessageHandler(
filters.TEXT
& ~filters.Regex(
"(Propose other image designs|Write own image design)"
)
& ~filters.COMMAND,
generate_prompt_and_image,
block=False,
),
MessageHandler(
filters.Regex("(Propose other image designs)"),
select_image_design,
block=False,
),
MessageHandler(
filters.Regex("(Write own image design)"),
get_user_custom_image_design,
block=False,
),
],
GENERATE_IMAGE: [
MessageHandler(
filters.TEXT & ~filters.COMMAND, generate_image, block=False
)
],
},
fallbacks=[CommandHandler("quit", quit_command)],
allow_reentry=True, # allow user to enter back any state of the ConversationHandler
name="ImageGeneratingBot",
persistent=True,
block=False,
)
# handler to check bot's health status
ping_handler = CommandHandler("ping", pong, block=False)
# add handlers to application
application.add_handler(conv_handler)
application.add_handler(ping_handler)
application.run_polling(allowed_updates=Update.ALL_TYPES)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-DEV", "--dev", action="store_true", help="Run with local Tele API token"
)
args = parser.parse_args()
main(args.dev)
| [] |
2024-01-10 | thatguylah/woaiai_hackathon | api~outpainting.py | ## This function is used as an entry point to a conversation handler in telegram bot.
## It is called when the command /outpainting is issued by the user.
## It then receives an image from the user, whilst rejecting any invalid messages (non images)
## It then stores that image in an s3 bucket in aws and returns a message to the user.
## The conversation handler then continues and prompts the user for a second image, again to be stored in s3.
## The conversation handler then calls the outpainting function, which is left to be defined for now.
import openai
import logging
from dotenv import dotenv_values
import json
import boto3
import unicodedata
from datetime import datetime
import io
from .utils import run_in_threadpool_decorator
from telegram import __version__ as TG_VER
from telegram.ext import (
ContextTypes,
ConversationHandler,
CommandHandler,
MessageHandler,
filters,
)
from telegram import (
Update,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
)
try:
from telegram import __version_info__
except ImportError:
__version_info__ = (0, 0, 0, 0, 0) # type: ignore[assignment]
if __version_info__ < (20, 0, 0, "alpha", 1):
raise RuntimeError(
f"This example is not compatible with your current PTB version {TG_VER}. To view the "
f"{TG_VER} version of this example, "
f"visit https://docs.python-telegram-bot.org/en/v{TG_VER}/examples.html"
)
# get config
config = dotenv_values(".env")
# get API tokens
HF_TOKEN = config["HF_API_KEY"]
openai.api_key = config["OPENAI_API_KEY"]
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(processName)s - %(threadName)s - [%(thread)d] - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
UPLOAD_IMAGE, PROCESS_IMAGE = range(13, 15)
async def outpainting_process_start(update: Update, context: ContextTypes):
context.user_data["editing_image_job"] = {
"job_type": "outpainting",
"base_image_s3_key": None,
"outpaint_direction": None,
}
buttons_lst = [["Left"], ["Right"], ["Top"], ["Bottom"]]
output_text = "Hi! You have triggered an /outpainting workflow.\n\nWhich direction would you like to outpaint / expand your image?\nSelect an option below.\n\nSend /cancel to exit the outpainting workflow."
# ask user to select one of the options
await update.message.reply_html(
f"{output_text}",
reply_markup=ReplyKeyboardMarkup(
buttons_lst, resize_keyboard=True, one_time_keyboard=True
),
)
return UPLOAD_IMAGE
async def outpainting_process_upload_image(update: Update, context: ContextTypes):
selected_direction = update.message.text
logger.log(logging.INFO, f"selected_direction: {selected_direction}")
context.user_data["editing_image_job"][
"outpaint_direction"
] = selected_direction.lower()
await update.message.reply_text(
"Upload the image you would like to outpaint / expand and type out what you would like the expanded regions to contain in the caption (e.g., purple skies, blue background).\n\nSend /cancel to exit the outpainting workflow."
)
return PROCESS_IMAGE
async def outpainting_process_terminate(update: Update, context: ContextTypes):
del context.user_data["editing_image_job"]
await update.message.reply_text(
"You have terminated the outpainting workflow.\n\nPlease send /outpainting to start again or send /start for a new conversation."
)
return ConversationHandler.END
class ImageProcessor:
def __init__(self) -> None:
# Start s3 and sns clients
self.s3_client = boto3.client("s3")
self.sqs_client = boto3.client("sqs", region_name="ap-southeast-1")
self.QueueUrl = QUEUE_URL = config["SQS_URL"]
# self.base_image_s3_key = None
# self.mask_image_s3_key = None
self.bucket_name = BUCKET_NAME = config["BUCKET_NAME"]
self.state = None
@run_in_threadpool_decorator(name="aws_io")
def upload_to_s3(self, file_stream, BUCKET_NAME, s3_key):
response = self.s3_client.upload_fileobj(file_stream, self.bucket_name, s3_key)
logger.log(logging.INFO, f"response: {response}")
return 0
@run_in_threadpool_decorator(name="aws_io")
def put_to_sqs(self, MessageBody):
MessageBody = json.dumps(MessageBody)
response = self.sqs_client.send_message(
QueueUrl=self.QueueUrl, MessageBody=MessageBody
)
logger.log(logging.INFO, f"response:{response}")
return 0
async def outpainting_process_image(self, update: Update, context: ContextTypes):
# self.state = ConversationHandler.END
update_as_dict = update.to_dict()
update_as_json = json.dumps(update_as_dict)
logger.log(logging.INFO, f"update_as_json: {update_as_json}")
if (
update.message.chat.username is None
): ## User does not have username. @handle on tele.
username = update.message.from_user.first_name
else:
username = update.message.chat.username
# clean_username = unicodedata.name(username)
clean_username = username
if (
update.message.photo
): # User uploaded an image. Put the image into s3 bucket.Put update_as_json to SQS queue
# Initialize timestamp for uniqueness and file stream buffer
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S")
file_stream = io.BytesIO()
# Get file name and file id from telegram update
file_id = update.message.photo[-1].file_id
file_name = f"{file_id}{timestamp_str}.jpg"
file = await update.message.photo[-1].get_file()
# Download file to file stream buffer
await file.download_to_memory(out=file_stream)
file_stream.seek(0) # Reset file stream buffer pointer to start of buffer
s3_key = f"input/outpaint-image/{clean_username}/{file_name}"
await self.upload_to_s3(file_stream, self.bucket_name, s3_key)
# self.mask_image_s3_key = s3_key
context.user_data["editing_image_job"]["base_image_s3_key"] = s3_key
try:
MessageBody = update_as_dict
MessageBody["editing_image_job"] = context.user_data[
"editing_image_job"
]
await self.put_to_sqs(MessageBody)
await update.message.reply_text(
"Your image has been received!🙂 Your request is currently being processed, the image will be sent to you once it is completed.\n\nThis conversation has ended. Please send /outpainting to process a new image or send /start for a new conversation."
)
return ConversationHandler.END
except Exception as e:
logger.log(logging.ERROR, f"Exception caught here:{e}")
await update.message.reply_text(
"Sorry, your job has failed to submit, please try again or contact woaiai.\n\nSend /outpainting to process a new image or /start for a new conversation."
)
return ConversationHandler.END
else:
await update.message.reply_text(
"Please upload an image 🙂\n\nSend /cancel to stop the outpainting workflow."
)
return PROCESS_IMAGE
image_processor_instance = ImageProcessor()
outpainting_handler = ConversationHandler(
entry_points=[CommandHandler("outpainting", outpainting_process_start)],
states={
UPLOAD_IMAGE: [
MessageHandler(
filters.Regex("(Left|Right|Top|Bottom)"),
outpainting_process_upload_image,
block=False,
)
],
PROCESS_IMAGE: [
MessageHandler(
filters.PHOTO,
image_processor_instance.outpainting_process_image,
block=False,
)
],
},
name="OutpaintingBot",
persistent=True,
block=False,
fallbacks=[
CommandHandler("cancel", outpainting_process_terminate),
CommandHandler("outpainting", outpainting_process_start),
],
)
| [] |
2024-01-10 | thatguylah/woaiai_hackathon | api~inpainting.py | ## This function is used as an entry point to a conversation handler in telegram bot.
## It is called when the command /inpainting is issued by the user.
## It then receives an image from the user, whilst rejecting any invalid messages (non images)
## It then stores that image in an s3 bucket in aws and returns a message to the user.
## The conversation handler then continues and prompts the user for a second image, again to be stored in s3.
## The conversation handler then calls the inpainting function, which is left to be defined for now.
import openai
import logging
from dotenv import dotenv_values
import json
import boto3
import unicodedata
from datetime import datetime
import io
from .utils import run_in_threadpool_decorator
from telegram import __version__ as TG_VER
from telegram import Update
from telegram.ext import (
ContextTypes,
ConversationHandler,
CommandHandler,
MessageHandler,
filters,
)
try:
from telegram import __version_info__
except ImportError:
__version_info__ = (0, 0, 0, 0, 0) # type: ignore[assignment]
if __version_info__ < (20, 0, 0, "alpha", 1):
raise RuntimeError(
f"This example is not compatible with your current PTB version {TG_VER}. To view the "
f"{TG_VER} version of this example, "
f"visit https://docs.python-telegram-bot.org/en/v{TG_VER}/examples.html"
)
# get config
config = dotenv_values(".env")
# get API tokens
HF_TOKEN = config["HF_API_KEY"]
openai.api_key = config["OPENAI_API_KEY"]
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(processName)s - %(threadName)s - [%(thread)d] - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
(STAGE_0, STAGE_1) = range(11, 13)
async def inpainting_process_start(update: Update, context: ContextTypes):
context.user_data["inpainting_image_job"] = {
"job_type": "inpainting",
"base_image_s3_key": None,
"mask_image_s3_key": None,
}
await update.message.reply_text(
"Hi! You have triggered an /inpainting workflow, please follow the instructions below:\n\n1. Upload a base image you would like to inpaint\n2. Once base image is received, instructions will be provided to upload a masked image of the same base image\n\nSend /cancel to exit the inpainting workflow."
)
return STAGE_0
async def inpainting_process_terminate(update: Update, context: ContextTypes):
del context.user_data["inpainting_image_job"]
await update.message.reply_text(
"You have terminated the inpainting workflow.\n\nPlease send /inpainting to start again or send /start for a new conversation."
)
return ConversationHandler.END
class ImageProcessor:
def __init__(self) -> None:
# Start s3 and sns clients
self.s3_client = boto3.client("s3")
self.sqs_client = boto3.client("sqs", region_name="ap-southeast-1")
self.QueueUrl = QUEUE_URL = config["SQS_URL"]
# self.base_image_s3_key = None
# self.mask_image_s3_key = None
self.bucket_name = BUCKET_NAME = config["BUCKET_NAME"]
self.state = None
@run_in_threadpool_decorator(name="aws_io")
def upload_to_s3(self, file_stream, BUCKET_NAME, s3_key):
response = self.s3_client.upload_fileobj(file_stream, self.bucket_name, s3_key)
logger.log(logging.INFO, f"response: {response}")
return 0
@run_in_threadpool_decorator(name="aws_io")
def put_to_sqs(self, MessageBody):
MessageBody = json.dumps(MessageBody)
response = self.sqs_client.send_message(
QueueUrl=self.QueueUrl, MessageBody=MessageBody
)
logger.log(logging.INFO, f"response:{response}")
return 0
async def inpainting_process_base_image(
self, update: Update, context: ContextTypes
):
update_as_dict = update.to_dict()
update_as_json = json.dumps(update_as_dict)
logger.log(logging.INFO, f"update_as_json: {update_as_json}")
if (
update.message.chat.username is None
): ## User does not have username. @handle on tele.
username = update.message.from_user.first_name
else:
username = update.message.chat.username
# clean_username = unicodedata.name(username)
clean_username = username
if (
update.message.photo
): # User uploaded an image. Put the image into s3 bucket.Put update_as_json to SQS queue
# Initialize timestamp for uniqueness and file stream buffer
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S")
file_stream = io.BytesIO()
# Get file name and file id from telegram update
file_id = update.message.photo[-1].file_id
file_name = f"{file_id}{timestamp_str}.jpg"
file = await update.message.photo[-1].get_file()
# Download file to file stream buffer
await file.download_to_memory(out=file_stream)
file_stream.seek(0) # Reset file stream buffer pointer to start of buffer
s3_key = f"input/base-image/{clean_username}/{file_name}"
await self.upload_to_s3(file_stream, self.bucket_name, s3_key)
else:
await update.message.reply_text(
"Please upload an image 🙂\n\nSend /cancel to exit the inpainting workflow."
)
return STAGE_0
# self.base_image_s3_key = s3_key
context.user_data["inpainting_image_job"]["base_image_s3_key"] = s3_key
await update.message.reply_text(
"Your base image has been received!🙂 Please use telegram's inbuilt brush feature to brush over the portion you would like to change.\n\nOptionally, type out a caption to guide the removal based on what you'd like the masked region to be replaced with (Eg. 'Blue Background', 'A tree')\n\nSend /cancel to exit the inpainting workflow."
)
return STAGE_1
async def inpainting_process_mask_image(
self, update: Update, context: ContextTypes
):
# self.state = ConversationHandler.END
update_as_dict = update.to_dict()
update_as_json = json.dumps(update_as_dict)
logger.log(logging.INFO, f"update_as_json: {update_as_json}")
if (
update.message.chat.username is None
): ## User does not have username. @handle on tele.
username = update.message.from_user.first_name
else:
username = update.message.chat.username
# clean_username = unicodedata.name(username)
clean_username = username
if (
update.message.photo
): # User uploaded an image. Put the image into s3 bucket.Put update_as_json to SQS queue
# Initialize timestamp for uniqueness and file stream buffer
timestamp_str = datetime.now().strftime("%Y%m%d%H%M%S")
file_stream = io.BytesIO()
# Get file name and file id from telegram update
file_id = update.message.photo[-1].file_id
file_name = f"{file_id}{timestamp_str}.jpg"
file = await update.message.photo[-1].get_file()
# Download file to file stream buffer
await file.download_to_memory(out=file_stream)
file_stream.seek(0) # Reset file stream buffer pointer to start of buffer
s3_key = f"input/mask-image/{clean_username}/{file_name}"
await self.upload_to_s3(file_stream, self.bucket_name, s3_key)
# self.mask_image_s3_key = s3_key
context.user_data["inpainting_image_job"]["mask_image_s3_key"] = s3_key
try:
MessageBody = update_as_dict
MessageBody["editing_image_job"] = context.user_data[
"inpainting_image_job"
]
await self.put_to_sqs(MessageBody)
await update.message.reply_text(
"Your masked image has been received!🙂 Your request is currently being processed, the image will be sent to you once it is completed.\n\nThis conversation has ended. Please send /inpainting to process a new image or send /start for a new conversation."
)
return ConversationHandler.END
except Exception as e:
logger.log(logging.ERROR, f"Exception caught here:{e}")
await update.message.reply_text(
"Sorry, your job has failed to submit, please try again or contact woaiai.\n\nSend /inpainting to process a new image or /start for a new conversation."
)
return ConversationHandler.END
else:
await update.message.reply_text(
"Please upload an image 🙂\n\nSend /cancel to stop the inpainting workflow."
)
return STAGE_1
image_processor_instance = ImageProcessor()
inpainting_handler = ConversationHandler(
entry_points=[CommandHandler("inpainting", inpainting_process_start)],
states={
STAGE_0: [
MessageHandler(
filters.PHOTO,
image_processor_instance.inpainting_process_base_image,
block=False,
)
],
STAGE_1: [
MessageHandler(
filters.PHOTO,
image_processor_instance.inpainting_process_mask_image,
block=False,
)
],
},
name="InpaintingBot",
persistent=True,
block=False,
fallbacks=[
CommandHandler("cancel", inpainting_process_terminate),
CommandHandler("inpainting", inpainting_process_start),
],
)
| [] |
2024-01-10 | krishna0306/ai_playground | langchain~learn_google.py | from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
# If it exists, get its value into a Python variable
api_key = os.environ["OPENAIKEY"]
else:
raise ValueError("Please set the OPENAIKEY environment variable")
os.environ["OPENAI_API_KEY"] = api_key
from llama_index import VectorStoreIndex, download_loader
from llama_hub.tools.google_search.base import GoogleSearchToolSpec
from llama_index.agent import OpenAIAgent
tool_spec = GoogleSearchToolSpec(key="AIzaSyBymYdbUAYQ0oO66C8hUNZ9N_cj3G5SbcE", engine="47c5fbc1550aa447c")
pprint(tool_spec.google_search("weather today"))
agent = OpenAIAgent.from_tools(tool_spec.to_tool_list())
agent.chat("Please create an event on aug 13th, 2023 at 5pm for 1 hour and invite [email protected] to discuss tax laws")
r=agent.chat('What is on my calendar for today?')
pprint(r)
# index = VectorStoreIndex.from_documents([document])
# query_engine = index.as_query_engine()
# query_engine.query('how vulnerable are security protocols?')
IPython.embed()
| [
"What is on my calendar for today?",
"Please create an event on aug 13th, 2023 at 5pm for 1 hour and invite [email protected] to discuss tax laws"
] |
2024-01-10 | krishna0306/ai_playground | langchain~learn_pdf.py | from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
# If it exists, get its value into a Python variable
api_key = os.environ["OPENAIKEY"]
else:
raise ValueError("Please set the OPENAIKEY environment variable")
os.environ["OPENAI_API_KEY"] = api_key
from llama_index import VectorStoreIndex, download_loader
ImageReader = download_loader("ImageReader")
imageLoader = ImageReader(text_type="plain_text")
FlatPdfReader = download_loader("FlatPdfReader")
pdfLoader = FlatPdfReader(image_loader=imageLoader)
document = pdfLoader.load_data(file=Path('/Users/despiegk/Downloads/Threefold_tech_architecture_may2023_v1.2.pdf'))
# index = VectorStoreIndex.from_documents([document])
# query_engine = index.as_query_engine()
# query_engine.query('how vulnerable are security protocols?')
IPython.embed()
| [] |
2024-01-10 | krishna0306/ai_playground | langchain~learn_md.py | from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
# If it exists, get its value into a Python variable
api_key = os.environ["OPENAIKEY"]
else:
raise ValueError("Please set the OPENAIKEY environment variable")
os.environ["OPENAI_API_KEY"] = api_key
from llama_index import VectorStoreIndex, download_loader
MarkdownReader = download_loader("MarkdownReader")
loader = MarkdownReader()
documents = loader.load_data(file=Path('/Users/despiegk/code/github/despiegk/ai_playground/openai/readme.md'))
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
# query_engine.query('how vulnerable are security protocols?')
IPython.embed()
| [] |
2024-01-10 | NProkoptsev/gensim | gensim~test~test_coherencemodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import os
import tempfile
import unittest
from unittest import SkipTest
import multiprocessing as mp
import numpy as np
from gensim.corpora.dictionary import Dictionary
from gensim.matutils import argsort
from gensim.models.coherencemodel import CoherenceModel, BOOLEAN_DOCUMENT_BASED
from gensim.models.ldamodel import LdaModel
from gensim.models.wrappers import LdaMallet
from gensim.models.wrappers import LdaVowpalWabbit
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestCoherenceModel(unittest.TestCase):
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
dictionary = Dictionary(texts)
@classmethod
def setUpClass(cls):
cls.corpus = [cls.dictionary.doc2bow(text) for text in cls.texts]
def setUp(self):
# Suppose given below are the topics which two different LdaModels come up with.
# `topics1` is clearly better as it has a clear distinction between system-human
# interaction and graphs. Hence both the coherence measures for `topics1` should be
# greater.
self.topics1 = [
['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']
]
self.topics2 = [
['user', 'graph', 'minors', 'system'],
['time', 'graph', 'survey', 'minors']
]
self.ldamodel = LdaModel(
corpus=self.corpus, id2word=self.dictionary, num_topics=2,
passes=0, iterations=0
)
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
if self.mallet_path:
self.malletmodel = LdaMallet(
mallet_path=self.mallet_path, corpus=self.corpus,
id2word=self.dictionary, num_topics=2, iterations=0
)
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
logging.info(
"Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping sanity checks for LDA Model"
)
self.vw_path = None
else:
self.vw_path = vw_path
self.vwmodel = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, id2word=self.dictionary,
num_topics=2, passes=0
)
def check_coherence_measure(self, coherence):
"""Check provided topic coherence algorithm on given topics"""
if coherence in BOOLEAN_DOCUMENT_BASED:
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence)
else:
kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence)
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm2 = CoherenceModel(topics=self.topics2, **kwargs)
self.assertGreater(cm1.get_coherence(), cm2.get_coherence())
def testUMass(self):
"""Test U_Mass topic coherence algorithm on given topics"""
self.check_coherence_measure('u_mass')
def testCv(self):
"""Test C_v topic coherence algorithm on given topics"""
self.check_coherence_measure('c_v')
def testCuci(self):
"""Test C_uci topic coherence algorithm on given topics"""
self.check_coherence_measure('c_uci')
def testCnpmi(self):
"""Test C_npmi topic coherence algorithm on given topics"""
self.check_coherence_measure('c_npmi')
def testUMassLdaModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Model"""
# Note that this is just a sanity check because LDA does not guarantee a better coherence
# value on the topics if iterations are increased. This can be seen here:
# https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde
CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')
def testCvLdaModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')
def testCw2vLdaModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDAModel."""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_w2v')
def testCuciLdaModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')
def testCnpmiLdaModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')
def testUMassMalletModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_mallet(self):
if not self.mallet_path:
raise SkipTest("Mallet not installed")
def testCvMalletModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_v')
def testCw2vMalletModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_w2v')
def testCuciMalletModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_uci')
def testCnpmiMalletModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_npmi')
def testUMassVWModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_vw(self):
if not self.vw_path:
raise SkipTest("Vowpal Wabbit not installed")
def testCvVWModel(self):
"""Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_v')
def testCw2vVWModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_w2v')
def testCuciVWModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_uci')
def testCnpmiVWModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_npmi')
def testErrors(self):
"""Test if errors are raised on bad input"""
# not providing dictionary
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
coherence='u_mass'
)
# not providing texts for c_v and instead providing corpus
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
dictionary=self.dictionary, coherence='c_v'
)
# not providing corpus or texts for u_mass
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary,
coherence='u_mass'
)
def testProcesses(self):
cpu = mp.cpu_count()
get_model = lambda p: CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass', processes=p,
)
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass',
)
self.assertEqual(model.processes, cpu - 1)
for p in range(-2, 1):
self.assertEqual(get_model(p).processes, cpu - 1)
for p in range(1, 4):
self.assertEqual(get_model(p).processes, p)
def testPersistence(self):
fname = testfile()
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingCorpus(self):
fname = testfile()
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingTexts(self):
fname = testfile()
model = CoherenceModel(
topics=self.topics1, texts=self.texts, dictionary=self.dictionary, coherence='c_v'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testAccumulatorCachingSameSizeTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics2
self.assertEqual(None, cm1._accumulator)
def testAccumulatorCachingTopicSubsets(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = [t[:2] for t in self.topics1]
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
def testAccumulatorCachingWithModelSetting(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
cm1.model = self.ldamodel
topics = []
for topic in self.ldamodel.state.get_lambda():
bestn = argsort(topic, topn=cm1.topn, reverse=True)
topics.append(bestn)
self.assertTrue(np.array_equal(topics, cm1.topics))
self.assertIsNone(cm1._accumulator)
def testAccumulatorCachingWithTopnSettingGivenTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
# Topics should not have been truncated, so topn settings below 5 should work
cm1.topn = 4
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(4, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
with self.assertRaises(ValueError):
cm1.topn = 6 # can't expand topics any further without model
def testAccumulatorCachingWithTopnSettingGivenModel(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(model=self.ldamodel, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
cm1.topn = 6 # should be able to expand given the model
self.assertEqual(6, len(cm1.topics[0]))
def testCompareCoherenceForTopics(self):
topics = [self.topics1, self.topics2]
cm = CoherenceModel.for_topics(
topics, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for topic_list in topics:
cm.topics = topic_list
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_model_topics(topics)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertGreater(coherence1, coherence2)
def testCompareCoherenceForModels(self):
models = [self.ldamodel, self.ldamodel]
cm = CoherenceModel.for_models(
models, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for model in models:
cm.model = model
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_models(models)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertAlmostEqual(coherence1, coherence2, places=4)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| [] |
2024-01-10 | 079035/ARVO-079 | utils_GPT.py | from OpenAI_gpt import GPT4,GPT4_Preview
from pathlib import Path
from Diff import getDiff
REPORTS_DIR = Path("./Reports")
def oss_fuzz_get_patch(localID):
diff_file = getDiff(localID)
if diff_file == False:
return False
with open(diff_file, mode="rb") as f:
content = f.read()
try:
return content.decode()
except UnicodeDecodeError as e:
return content.decode('latin-1')
def oss_fuzz_vul_labeler(localID):
print(f"[+] Labeling case {localID}...")
diff = oss_fuzz_get_patch(localID)
if diff == False:
return False
message = f"Can you describe what vulnerability could be patched in following diff file?\nDiff information:\n```\n{diff}\n```"
res = GPT4().api_call(message)
if res == False:
res = GPT4_Preview().api_call(message)
print(res)
return res
if __name__ == "__main__":
res = oss_fuzz_vul_labeler(31585)
print(res) | [] |
2024-01-10 | 079035/ARVO-079 | fx.py | # Use to deal with the data of fix
import json
import time
import shutil
import OpenAI
import random
import re
import jsonlines
import starcoder
from Locator import *
from base58 import b58encode
from utils_GPT import *
from utils import *
from glob import glob
from unidiff import PatchSet
from reproducer import build_from_srcmap
from Diff import getVulCommit, getDiff
TEMP = 0.75
SAVE_MONEY = True
def get_test_dataset():
filter1 = "This model's maximum context length"
fs = glob("./PatchDesc/*")
res = []
for fname in fs:
with open(fname,'r') as f:
if filter1 not in f.read():
res.append(int(fname.split("/")[-1][:-4]))
return res
def _get_reports_id():
res = glob("./Reports/*")
return [int(x.split("/")[-1][:-5]) for x in res]
def get_all_single_mods(DEBUG=True):
reports = _get_reports_id()
res = []
for r in reports:
if(DEBUG):
print("[*] Testing localID: " + str(r))
diff_file = oss_fuzz_get_patch(r)
if diff_file == False:
continue
diff_content = str(diff_file).split("\n")
mod_cnt = 0
for z in range(len(diff_content)):
if(diff_content[z].startswith("@@ ")):
mod_cnt+=1
if mod_cnt > 1: # stop when over 2
break
if(DEBUG):
print("mod: "+str(mod_cnt))
if mod_cnt==1:
res.append(r) # localID
if(DEBUG):
print("[+] "+ str(r) +" added")
else:
if(DEBUG):
print("[-] "+ str(r) +" skipped")
print("[!] Done")
if(DEBUG):
print(res)
print("Total: "+str(len(res)))
return res
def get_vul_code(diff_file,repo_dir):
# Get the vul code
patch = PatchSet.from_filename(diff_file, encoding='utf-8')
mods = []
# parse the file
for _ in range(len(patch)):
target_set = patch[_]
if target_set.is_modified_file:
file_name = target_set.source_file[1:]
for mod in target_set:
tmp = (file_name,mod.source_start,mod.source_length)
mods.append([tmp,target_set])
vul_code = []
count = 0 # Counter for mod
for x,y in mods:
print(repo_dir/x[0][1:])
with open(repo_dir/x[0][1:],'rb') as f:
code = f.readlines()[x[1]-1:x[1]+x[2]-1]
# line info
diff_content = str(y).split("\n")
added_lines = []
removed_lines = []
# pase the diff
tmp = count
for z in range(len(diff_content)):
if(diff_content[z].startswith("@@ ")):
if tmp !=0:
tmp-=1
else:
diff_content = diff_content[z+1:]
break
ct = 0
while(ct<len(diff_content)):
if diff_content[ct].startswith("-"):
removed_lines.append(ct)
elif diff_content[ct].startswith("+"):
added_lines.append(ct)
elif diff_content[ct].startswith("@@ "):
break
ct+=1
# store them
ori_code = b"".join(code)
item = [ori_code,removed_lines,added_lines,y.target_file]
vul_code.append(item)
count+=1
return vul_code
def get_bug_info(localId):
# Get the bug type
with open("./Reports/"+str(localId)+".json") as f:
bug_type = json.loads(f.read())['crash_type'].split(" ")[0]
return (" ".join(bug_type.split("-"))).lower()
def strategy_start_with(bug_info,start_with,mode='Modify'):
if mode == "Modify":
instruction = f'Fix the {bug_info} vulnerability on the lines beginning with "{start_with[:0x20].decode()}"'
elif mode == "Insert":
instruction = f'Fix the {bug_info} vulnerability by inserting code after the line beginning with "{start_with[:0x20].decode()}"'
return instruction
def getDescription(fname):
if fname.exists():
with open(fname,'r') as f:
patch_desc = f.readlines()
else:
return False
# parsing
x,y,z = 0,0,0
for _ in range(len(patch_desc)):
if x==0 and patch_desc[_].endswith("ity:\n"):
x=_+1
elif y==0 and patch_desc[_].endswith("ix:\n"):
y=_+1
elif z==0 and patch_desc[_].endswith("ix:\n"):
z=_+1
d = dict()
d['vul'] = ("\n".join(patch_desc[x:y-1])).strip(" \n")
d['summary'] = ("\n".join(patch_desc[y:z-1])).strip(" \n")
d['details'] = ("\n".join(patch_desc[z:])).strip(" \n")
if d['vul']=="":
return False
return d
def verify_FIX(localId,repo_dir,pname):
# TODO: Functional checking
print(localId)
# localId, int
# return value: -1 error, 0 False, 1 True
def leave(result):
if CLEAN_TMP and case_dir:
clean_dir(case_dir)
if(RM_IMAGES):
remove_oss_fuzz_img(localId)
return result
srcmap,issue = getIssueTuple(localId)
case_dir = tmpDir()
try:
case_path = download_reproducer(issue,case_dir,"crash_case")
except:
return leave(False)
if not case_path or not case_path.exists():
return leave(False)
srcmap = srcmap[0]
build_res = \
build_from_srcmap(srcmap,issue,replace_dep=[pname,repo_dir])
if not build_res:
return leave(False)
not_crash = crashVerify(issue,case_path)
if not_crash == True:
return leave(True)
else:
return leave(False)
def getCrashType(localId):
return getIssue(localId)['crash_type']
def perform_fix(fix,ori_code,target_file):
with open(target_file,'rb') as f:
raw_code = f.read()
raw_code = raw_code.replace(ori_code,fix)
with open(target_file,'wb') as f:
f.write(raw_code)
def getDesc(localId):
localDesc = Path(f"./PatchDesc/{localId}.log")
if localDesc.exists() and SAVE_MONEY:
patch_desc = getDescription(localDesc)
else:
patch_desc = oss_fuzz_vul_labeler(localId)
assert(patch_desc != False)
with open(localDesc,'w') as f:
f.write(patch_desc)
return patch_desc
def get_GPT_fix(localId,vul_code,work_dir,model,lite=False,logDiff=False):
print("[*] Getting Fix Description")
code = vul_code [0]
if lite == True:
desciption = getCrashType(localId)
else:
patch_desc = getDesc(localId)
desciption = patch_desc['vul']
prompt = f"""
Can you fix the vulnerability in the following code:
```
{code.decode()}
```
There is a vulnerability description for the possible bug:
{desciption}
Please only return the code in the response. Do not include explanations in your reply.
"""
if logDiff != False:
dst= logDiff / str(localId)
dst.mkdir(exist_ok=True)
with open(dst/"prompt","w") as f:
f.write(prompt)
print("[+] Performing GPT Fixing..")
fixed_code = OpenAI.performChatFix(prompt,model)
print("[+] Recieved the result from ChatGPT")
# extract the code out
if "maximum context length" in fixed_code:
eventLog(f"[-] GPT failed to fix the bug: Inout OOB, {localId}")
exit(1)
fixed_code = re.sub(r'```.*\n', "\n_XxXSPLITTAGXxX_\n", fixed_code)
fixed_code = re.sub(r'```', "\n_XxXSPLITTAGXxX_\n", fixed_code)
if "_XxXSPLITTAGXxX_" in fixed_code:
tmp = fixed_code.split("_XxXSPLITTAGXxX_")
if(len(tmp)!=3):
eventLog(f"[X] get_GPT_fix: Odd return Value from GPT:\n\n {fixed_code} \n\n")
exit(1)
fixed_code = tmp[1]
return [fixed_code.encode()], code, work_dir/vul_code[3][2:]
def get_Codex_fix(localId,vul_code,work_dir,model,lite=False,logDiff=False):
print("[*] Getting Fix Description")
code = vul_code [0]
if lite == True:
desciption = getCrashType(localId)
else:
patch_desc = getDesc(localId)
desciption = patch_desc['vul']
prompt = f"""
Can you fix the vulnerability in the given code.
There is a vulnerability description for the possible bug:
{desciption}
"""
if logDiff != False:
dst= logDiff / str(localId)
dst.mkdir(exist_ok=True)
with open(dst/"prompt","w") as f:
f.write(prompt)
print("[+] Performing Codex Fixing..")
# "gpt-3.5-turbo-instruct",
# "code-davinci-edit-001"
if model not in ["gpt-3.5-turbo-instruct","code-davinci-edit-001"]:
panic(f"[X] Invalid Model {model}")
res = OpenAI.performCompletionFix(code.decode(),prompt,model=model,n=1,temperature=TEMP)
print(res)
fixed_code = list(set([ x['text'].encode() for x in res['choices'] if "error" not in x.keys() ]))
return fixed_code, code, work_dir/vul_code[3][2:]
def get_Wizard_fix(localId,vul_code,work_dir,model="Wizard-15B",lite=False,logDiff=False):
print("[*] Getting Wizard Fix Description")
code = vul_code [0]
target_file = vul_code[3]
print("[+] Getting Wizard Fix Code..")
fixed_code=""
if lite == True:
output_data = jsonlines.open(f"./_wizard_data/{model}_lite.jsonl", mode='r')
if logDiff != False:
pass
else:
output_data = jsonlines.open(f"./_wizard_data/{model}.jsonl", mode='r')
for line in output_data:
one_data = line
id = one_data["id"]
if id==localId:
fixed_code=one_data["wizardcoder"]
break
return [fixed_code.encode()], code, work_dir/target_file[2:]
def get_star_fix(localID,vul_code,work_dir,model="startcoder",lite=False):
print("[*] Getting Starcoder Fix Description")
code = vul_code [0]
target_file = vul_code[3]
print("[+] Getting Starcoder Fix Code..")
fixed_code=""
fixed_code = starcoder.start_coder_fix(localID)
return [fixed_code.encode()], code, work_dir/target_file[2:]
def _check_repo(target,localId):
if not target.exists():
if target.parent.exists():
shutil.rmtree(target.parent)
print("[-] Target repo doesn't exist, reproducing...")
if verify(localId):
if not Path(target).exists():
eventLog(f"[-] _check_repo: Main_repo Doesn't Exist after Reproducing {localId}")
return False
else:
return True
else:
eventLog(f"[-] _check_repo: Failed to verify {localId}")
return False
return True
def genDiff(ori,update):
filea = tmpFile()
fileb = tmpFile()
with open(filea,'wb') as f:
f.write(ori)
with open(fileb,'wb') as f:
f.write(update)
res= execute(["git",'diff',"-W",filea.absolute(),fileb.absolute()])
shutil.rmtree(filea.parent.absolute())
shutil.rmtree(fileb.parent.absolute())
return res
def fixDiff(fix,dst,name):
with open(Path(dst)/name,'wb') as f:
f.write(fix)
def oracleDiff(src,dst,name):
with open(src) as f:
diff_content = f.read()
with open(Path(dst)/name,'w') as f:
f.write(diff_content)
def BenchMarkFuncExamp(localId,vul_code,work_dir,model="code-davinci-edit-001"):
print("[*] Getting Fix Description")
code = vul_code [0]
patch_desc = getDesc(localId)
desciption = patch_desc['vul']
prompt = f"""
Can you fix the vulnerability in the given code.
There is a vulnerability description for the possible bug:
{desciption}
"""
print("[+] Performing Codex Fixing..")
res = OpenAI.performCompletionFix(code.decode(),prompt,model=model,n=1,temperature=TEMP)
print(res)
fixed_code = list(set([ x['text'].encode() for x in res['choices'] if "error" not in x.keys() ]))
return fixed_code, code, work_dir/vul_code[3][2:]
def getMeta(localId):
work_dir = tmpDir()
# Get meta data
pname = getPname(localId)
diff_file = getDiff(localId)
vul_commit = getVulCommit(localId)
if vul_commit == False or diff_file == False:
return False
# Copy Repo
repo_dir = work_dir / pname
url = get_projectInfo(localId,pname)[0]['url']
if _check_repo(OSS_DB/b58encode(url).decode()/pname,localId) == False:
eventLog(f"[-] XxX: Failed to prepare the main repo: {localId}")
shutil.rmtree(work_dir)
return False
shutil.copytree( OSS_DB/b58encode(url).decode()/pname, repo_dir,symlinks=True)
# Check out to vul version
if check_call(['git','reset','--hard',vul_commit],repo_dir) == False:
shutil.rmtree(work_dir)
return False
# Get code info, maker sure there should be only one case
vul_code = get_vul_code(diff_file,repo_dir)
res = []
for x in vul_code:
res.append(x[0])
# if(len(vul_code)!=1):
# print(f"[X] The case is a complex case. Please user python Functions as API")
# eventLog(f"[X] More than one modifications. The result could be not precise: {localId=}")
# return False
# else:
# vul_code = vul_code[0]
return res
def BenchMarkAPI(localId,fix):
work_dir = tmpDir()
# Get meta data
pname = getPname(localId)
diff_file = getDiff(localId)
vul_commit = getVulCommit(localId)
if vul_commit == False or diff_file == False:
return False
# Copy Repo
repo_dir = work_dir / pname
url = get_projectInfo(localId,pname)[0]['url']
if _check_repo(OSS_DB/b58encode(url).decode()/pname,localId) == False:
eventLog(f"[-] XxX: Failed to prepare the main repo: {localId}")
shutil.rmtree(work_dir)
return False
shutil.copytree( OSS_DB/b58encode(url).decode()/pname, repo_dir,symlinks=True)
# Check out to vul version
if check_call(['git','reset','--hard',vul_commit],repo_dir) == False:
shutil.rmtree(work_dir)
return False
# Get code info, maker sure there should be only one case
vul_code = get_vul_code(diff_file,repo_dir)
if(len(vul_code)!=1):
eventLog(f"[X] More than one modifications. The result could be not precise: {localId=}")
return False
else:
vul_code = vul_code[0]
target_file = vul_code[3]
# Perform Fixing
# try:
# fixes, ori_code, target_file = fixer(localId,vul_code,repo_dir)
# except:
# return False
# Try to build and verify all possibe fixes
perform_fix(fix,vul_code[0],repo_dir/target_file[2:])
res = verify_FIX(localId,repo_dir,pname)
if res:
print("[+] Successful fix: ")
print(fix)
shutil.rmtree(work_dir)
shutil.rmtree(os.path.dirname(diff_file))
if res:
print("[+] SUCCESS!")
return True
else:
print("[-] FAIL to FIX.")
return False
def XxX(localId,module,chance=1,lite=False,logDiff=False,tag=""):
# logDiff False or a string of Path
# TODO: CLEAN TMP DIR
if logDiff == True:
logDiff = tmpDir()
if logDiff != False:
logDiff = logDiff/ f"{module}_{tag}"
logDiff.mkdir(exist_ok=True)
if module == "Codex":
module = "code-davinci-edit-001"
if module not in ["gpt-3.5-turbo-16k","Starcoder","gpt-3.5-turbo-instruct","code-davinci-edit-001","gpt-3.5-turbo","gpt-4","gpt-4-1106-preview"] and \
'Wizard' not in module:
panic(f"[X] Invalid Model {module}")
work_dir = tmpDir()
# Get meta data
pname = getPname(localId)
diff_file = getDiff(localId)
vul_commit = getVulCommit(localId)
if vul_commit==False or diff_file==False:
return False
# Copy Repo
repo_dir = work_dir / pname
url = get_projectInfo(localId,pname)[0]['url']
if _check_repo(OSS_DB/b58encode(url).decode()/pname,localId) == False:
eventLog(f"[-] XxX: Failed to prepare the main repo: {localId}")
shutil.rmtree(work_dir)
return False
shutil.copytree( OSS_DB/b58encode(url).decode()/pname, repo_dir,symlinks=True)
# Check out to vul version
if check_call(['git','reset','--hard',vul_commit],repo_dir) == False:
shutil.rmtree(work_dir)
return False
# Get code info, maker sure there should be only one case
vul_code = get_vul_code(diff_file,repo_dir)
if(len(vul_code)!=1):
eventLog(f"[X] More than one modifications. The result could be not precise: {localId=}")
return False
else:
vul_code = vul_code[0]
# Perform Fixing
if(module in ["Codex","gpt-3.5-turbo-instrct","code-davinci-edit-001"]):
try:
fixes, ori_code, target_file = get_Codex_fix(localId,vul_code,repo_dir,model=module,lite=lite,logDiff=logDiff)
except:
eventLog(f"[X] Failed perform Codex fixing: {localId=}")
return False
elif("Wizard" in module):
try:
fixes, ori_code, target_file = get_Wizard_fix(localId,vul_code,repo_dir,model=module,lite=lite)
if fixes[0]==b'':
return False
except:
eventLog(f"[X] Failed perform Codex fixing: {localId=}")
return False
elif module in ["gpt-3.5-turbo","gpt-4","gpt-4-1106-preview","gpt-3.5-turbo-16k"]:
try:
fixes, ori_code, target_file = get_GPT_fix(localId,vul_code,repo_dir,model=module,lite=lite,logDiff=logDiff)
except:
eventLog(f"[X] Failed perform GPT fixing: {localId=}")
return False
elif module == "Starcoder":
try:
fixes, ori_code, target_file = get_star_fix(localId,vul_code,repo_dir,model=module,lite=lite,)
except:
eventLog(f"[X] Failed perform Starcoder fixing: {localId=}")
return False
else:
panic("UNK Module")
for x in fixes[:chance]:
print("\n"+x.decode()+"\n")
# Try to build and verify all possibe fixes
for fix in fixes[:chance]:
perform_fix(fix,ori_code,target_file)
res = verify_FIX(localId,repo_dir,pname)
if res:
print("[+] Successful fix: ")
print(fix)
break
if chance == 1 and logDiff!=False:
dst= logDiff / str(localId)
dst.mkdir(exist_ok=True)
diff_content = genDiff(vul_code[0],fixes[0])
fixDiff(diff_content,dst,f"fix_{localId}.diff")
oracleDiff(diff_file,dst,f"ora_{localId}.diff")
shutil.rmtree(work_dir)
shutil.rmtree(os.path.dirname(diff_file))
if logDiff:
dst= logDiff / str(localId)
with open(dst/"res",'w') as f:
if res:
f.write(f"[+] SUCCESS!\n")
else:
f.write(f"[-] FAIL to FIX.\n")
if res:
print("[+] SUCCESS!")
return True
else:
print("[-] FAIL to FIX.")
return False
def patchDesc(localId,update=False):
if not update and Path(f"./PatchDesc/{localId}.log").exists():
return
patch_desc = oss_fuzz_vul_labeler(localId)
if patch_desc == False:
eventLog(f"[-] patchDesc: Faild to analysis the pacth: {localId=}")
return
with open(f"./PatchDesc/{localId}.log","w") as f:
f.write(patch_desc)
print("Sleeping to cool-down...")
time.sleep(10)
return
def desc_allPatches(ids=None,update=False):
if ids != None:
pass
else:
ids = get_all_single_mods()
for x in ids:
print(f"[+] Generating the Desc for issue: {x}")
patchDesc(x,update)
return
# Test Area
def TestBenchmark(round, module="GPT"):
logfile= f"./_fx_local_log/{module}_round_{round}.json"
logfile = Path(logfile)
# Init log files
if not json_file_check(logfile):
panic(f"[X] Failed to init {logfile}")
# Get tested issues
content = json.loads(open(logfile).read())
tested = [int(x.strip())for x in content.keys()]
# Do test
ids = get_all_single_mods(False)
while(len(tested) < len(ids)):
chosen = ids[random.randint(0,len(ids)-1)]
if chosen in tested:
continue
try:
res=LMFix(chosen, module)
except:
res=False
content[chosen] = res
with open(logfile,'w') as f:
json.dumps(content,sort_keys = True,indent=4)
print("[!] Test ALL Done")
def Benchmark(round, module):
if module not in ['GPT','Codex','Wizard']:
print("[X] Select a module from 'GPT','Codex','Wizard'")
return False
logfile= f"./Log/BenchMark/{module}_round_{round}.json"
logfile = Path(logfile)
# Init log files
if not json_file_check(logfile):
panic(f"[X] Failed to init {logfile}")
# Get tested issues
content = json.loads(open(logfile).read())
tested = [int(x.strip())for x in content.keys()]
# Do test
ids = get_all_single_mods(False)
while(len(tested) < len(ids)):
chosen = ids[random.randint(0,len(ids)-1)]
if chosen in tested:
continue
try:
res= XxX(chosen, module)
except:
res=False
content[chosen] = res
with open(logfile,'w') as f:
json.dumps(content,sort_keys = True,indent=4)
print("[!] Test ALL Done")
### returns prompt
def getWizardPrompt(localId, vul_code,lite=False):
if lite == True:
desciption = getCrashType(localId)
else:
patch_desc = getDesc(localId)
desciption = patch_desc['vul']
prompt = f"""
Instruction:
Rewrite this code to patch the bug:
```
{vul_code}
```
Bug description:
{desciption}
Always and only return the rewritten code.
"""
return prompt
### Generate input.jsonl for HPC Wizard
def GenerateWizardInput(input_file_path, localIds=[58086], limit=1,lite=False,logDiff=False):
# Do test
input_file = jsonlines.open(input_file_path, mode='w')
cnt=0
for localId in localIds:
if(1):
vul_code = [b'']
# TODO: CLEAN TMP DIR
work_dir = tmpDir()
# Get meta data
pname = getPname(localId)
diff_file = getDiff(localId)
vul_commit = getVulCommit(localId)
if vul_commit==False or diff_file==False:
raise ValueError('Incorrect vul commit or diff file.')
# Copy Repo
repo_dir = work_dir / pname
url = get_projectInfo(localId,pname)[0]['url']
if _check_repo(OSS_DB/b58encode(url).decode()/pname,localId) == False:
eventLog(f"[-] XxX: Failed to prepare the main repo: {localId}")
shutil.rmtree(work_dir)
raise ValueError('Failed to prepare the main repo for the issue')
shutil.copytree( OSS_DB/b58encode(url).decode()/pname, repo_dir,symlinks=True)
# Check out to vul version
if check_call(['git','reset','--hard',vul_commit],repo_dir) == False:
eventLog(f"[-] XxX: Failed to reset {localId}")
shutil.rmtree(work_dir)
raise ValueError('Failed to git reset to vul commit')
# Get code info, maker sure there should be only one case
vul_code = get_vul_code(diff_file,repo_dir)
# print("[+] Got vul code")
# print(vul_code)
if(len(vul_code)>limit):
print("[X] More than one modifications. The result could be not precise.")
vul_code = [b'']
else:
vul_code = vul_code[0]
# print(vul_code)
# except Exception as e:
# print("[-] Failed getting vul code:" + str(e))
# vul_code = [b'']
if vul_code is not [b'']:
prompt = getWizardPrompt(localId,vul_code[0].decode(),lite)
input_data = {"idx":localId, "Instruction":prompt}
input_file.write(input_data)
cnt+=1
if logDiff != False:
dst= logDiff / str(localId)
dst.mkdir(exist_ok=True)
with open(dst/"prompt","w") as f:
f.write(prompt)
if cnt==100:
break
print(f"[+] Finished writing {str(cnt)} prompts to {input_file_path}")
if __name__ =="__main__":
pass
| [
"\nCan you fix the vulnerability in the given code.\n \nThere is a vulnerability description for the possible bug:\n\nPLACEHOLDER\n\n",
"\nInstruction:\nRewrite this code to patch the bug:\n```\nPLACEHOLDER\n```\n \nBug description:\n\nPLACEHOLDER\n\nAlways and only return the rewritten code.\n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.