date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part07~ch03_langchain~gen3~database~upload.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import (
NotebookLoader,
TextLoader,
UnstructuredMarkdownLoader,
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
load_dotenv()
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(os.path.dirname(CUR_DIR), "dataset")
SK_CODE_DIR = os.path.join(DATA_DIR, "semantic-kernel", "python")
SK_SAMPLE_DIR = os.path.join(
DATA_DIR, "semantic-kernel", "samples", "notebooks", "python"
)
SK_DOC_DIR = os.path.join(DATA_DIR, "semantic-kernel-docs", "semantic-kernel")
CHROMA_PERSIST_DIR = os.path.join(CUR_DIR, "chroma-persist")
CHROMA_COLLECTION_NAME = "fastcampus-bot"
LOADER_DICT = {
"py": TextLoader,
"md": UnstructuredMarkdownLoader,
"ipynb": NotebookLoader,
}
def upload_embedding_from_file(file_path):
loader = LOADER_DICT.get(file_path.split(".")[-1])
if loader is None:
raise ValueError("Not supported file type")
documents = loader(file_path).load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=100)
docs = text_splitter.split_documents(documents)
Chroma.from_documents(
docs,
OpenAIEmbeddings(),
collection_name=CHROMA_COLLECTION_NAME,
persist_directory=CHROMA_PERSIST_DIR,
)
def upload_embeddings_from_dir(dir_path):
failed_upload_files = []
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith(".py") or file.endswith(".md") or file.endswith(".ipynb"):
file_path = os.path.join(root, file)
try:
upload_embedding_from_file(file_path)
print("SUCCESS: ", file_path)
except Exception:
print("FAILED: ", file_path)
failed_upload_files.append(file_path)
if __name__ == "__main__":
upload_embeddings_from_dir(SK_CODE_DIR)
upload_embeddings_from_dir(SK_SAMPLE_DIR)
upload_embeddings_from_dir(SK_DOC_DIR)
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch03_langchain~gen2~multi_prompt_chains.py | import os
from dotenv import load_dotenv
from langchain.chains import ConversationChain, LLMChain, LLMRouterChain
from langchain.chains.router import MultiPromptChain
from langchain.chains.router.llm_router import RouterOutputParser
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
load_dotenv()
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
BUG_STEP1_PROMPT_TEMPLATE = os.path.join(
CUR_DIR, "prompt_templates", "bug_say_sorry.txt"
)
BUG_STEP2_PROMPT_TEMPLATE = os.path.join(
CUR_DIR, "prompt_templates", "bug_request_context.txt"
)
ENHANCE_STEP1_PROMPT_TEMPLATE = os.path.join(
CUR_DIR, "prompt_templates", "enhancement_say_thanks.txt"
)
def read_prompt_template(file_path: str) -> str:
with open(file_path, "r") as f:
prompt_template = f.read()
return prompt_template
def create_chain(llm, template_path, output_key):
return LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_prompt_template(template_path)
),
output_key=output_key,
verbose=True,
)
llm = ChatOpenAI(temperature=0.1, max_tokens=200, model="gpt-3.5-turbo")
bug_step1_chain = create_chain(
llm=llm,
template_path=BUG_STEP1_PROMPT_TEMPLATE,
output_key="text",
)
enhance_step1_chain = create_chain(
llm=llm,
template_path=ENHANCE_STEP1_PROMPT_TEMPLATE,
output_key="text",
)
destinations = [
"bug: Related to a bug, vulnerability, unexpected error with an existing feature",
"documentation: Changes to documentation and examples, like .md, .rst, .ipynb files. Changes to the docs/ folder",
"enhancement: A large net-new component, integration, or chain. Use sparingly. The largest features",
"improvement: Medium size change to existing code to handle new use-cases",
"nit: Small modifications/deletions, fixes, deps or improvements to existing code or docs",
"question: A specific question about the codebase, product, project, or how to use a feature",
"refactor: A large refactor of a feature(s) or restructuring of many files",
]
destinations = "\n".join(destinations)
router_prompt_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations)
router_prompt = PromptTemplate.from_template(
template=router_prompt_template, output_parser=RouterOutputParser()
)
router_chain = LLMRouterChain.from_llm(llm=llm, prompt=router_prompt, verbose=True)
multi_prompt_chain = MultiPromptChain(
router_chain=router_chain,
destination_chains={
"bug": bug_step1_chain,
"enhancement": enhance_step1_chain,
},
default_chain=ConversationChain(llm=llm, output_key="text"),
)
| [
"enhancement_say_thanks.txt",
"enhancement",
"bug_say_sorry.txt",
"prompt_templates",
"bug_request_context.txt"
] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part07~ch04_semantic_kernel~gen3~database~upload.py | import os
from uuid import uuid4
import markdown
import nbformat
import semantic_kernel as sk
from bs4 import BeautifulSoup
from semantic_kernel.connectors.ai.open_ai import OpenAITextEmbedding
from semantic_kernel.connectors.memory.chroma import ChromaMemoryStore
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(CUR_DIR, "dataset")
SK_CODE_DIR = os.path.join(DATA_DIR, "semantic-kernel", "python")
SK_SAMPLE_DIR = os.path.join(DATA_DIR, "semantic-kernel", "samples", "python")
SK_DOC_DIR = os.path.join(DATA_DIR, "semantic-kernel-docs", "semantic-kernel")
CHROMA_PERSIST_DIR = os.path.join(CUR_DIR, "chroma-persist")
CHROMA_COLLECTION_NAME = "fastcampus-bot"
def read_file(file_path):
with open(file_path, "r") as f:
if file_path.endswith(".ipynb"):
nb = nbformat.read(f, as_version=4)
contents = ""
for cell in nb["cells"]:
if cell["cell_type"] in ["code", "markdown"]:
contents += cell["source"] + "\n"
else:
raise ValueError(f"Unknown cell type: {cell['cell_type']}")
else:
contents = f.read()
if file_path.endswith(".ipynb") or file_path.endswith(".md"):
contents = markdown.markdown(contents)
soup = BeautifulSoup(contents, "html.parser")
contents = soup.get_text()
return contents
async def upload_embeddings_from_file(file_path):
contents = read_file(file_path)
await kernel.memory.save_information_async(
collection=CHROMA_COLLECTION_NAME,
text=contents,
id=str(uuid4()),
description=os.path.relpath(file_path, DATA_DIR),
)
async def upload_embeddings_from_dir(dir):
failed_upload_files = []
for root, dirs, files in os.walk(dir):
for file in files:
ext = os.path.splitext(file)[1]
if ext in [".py", ".md", ".ipynb"]:
file_path = os.path.join(root, file)
try:
await upload_embeddings_from_file(file_path)
print("SUCCESS:", file_path)
except Exception:
print("FAILED:", file_path)
failed_upload_files.append(file_path)
return failed_upload_files
if __name__ == "__main__":
import asyncio
from dotenv import load_dotenv
load_dotenv()
kernel = sk.Kernel()
kernel.add_text_embedding_generation_service(
"ada",
OpenAITextEmbedding(
"text-embedding-ada-002",
os.getenv("OPENAI_API_KEY"),
),
)
kernel.register_memory_store(
memory_store=ChromaMemoryStore(persist_directory=CHROMA_PERSIST_DIR)
)
failed_doc_files = asyncio.run(upload_embeddings_from_dir(SK_DOC_DIR))
failed_codebase_files = asyncio.run(upload_embeddings_from_dir(SK_SAMPLE_DIR))
failed_sample_files = asyncio.run(upload_embeddings_from_dir(SK_CODE_DIR))
# print(failed_doc_files)
# print(failed_codebase_files)
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch03_langchain~gen3~chains.py | import os
from dotenv import load_dotenv
from langchain.chains import ConversationChain, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate
load_dotenv()
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
BUG_STEP1_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "prompt_templates", "bug_analyze.txt")
BUG_STEP2_PROMPT_TEMPLATE = os.path.join(
CUR_DIR, "prompt_templates", "bug_solution.txt"
)
ENHANCE_STEP1_PROMPT_TEMPLATE = os.path.join(
CUR_DIR, "prompt_templates", "enhancement_say_thanks.txt"
)
INTENT_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "prompt_templates", "parse_intent.txt")
def read_prompt_template(file_path: str) -> str:
with open(file_path, "r") as f:
prompt_template = f.read()
return prompt_template
def create_chain(llm, template_path, output_key):
return LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_prompt_template(template_path)
),
output_key=output_key,
verbose=True,
)
llm = ChatOpenAI(temperature=0.1, max_tokens=200, model="gpt-3.5-turbo")
bug_step1_chain = create_chain(
llm=llm,
template_path=BUG_STEP1_PROMPT_TEMPLATE,
output_key="bug_analysis",
)
bug_step2_chain = create_chain(
llm=llm,
template_path=BUG_STEP2_PROMPT_TEMPLATE,
output_key="output",
)
enhance_step1_chain = create_chain(
llm=llm,
template_path=ENHANCE_STEP1_PROMPT_TEMPLATE,
output_key="output",
)
parse_intent_chain = create_chain(
llm=llm,
template_path=INTENT_PROMPT_TEMPLATE,
output_key="intent",
)
default_chain = ConversationChain(llm=llm, output_key="output")
| [
"enhancement_say_thanks.txt",
"parse_intent.txt",
"bug_analyze.txt",
"prompt_templates",
"bug_solution.txt"
] |
2024-01-10 | skrashevich/ChatGPT | src~revChatGPT~Unofficial.py | import json
import logging
import re
import uuid
from time import sleep
import tls_client
import undetected_chromedriver as uc
from requests.exceptions import HTTPError
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# Disable all logging
logging.basicConfig(level=logging.ERROR)
BASE_URL = "https://chat.openai.com/"
class Chrome(uc.Chrome):
def __del__(self):
self.quit()
class Chatbot:
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
no_refresh=False,
) -> None:
self.config = config
self.session = tls_client.Session(
client_identifier="chrome_108",
)
if "proxy" in config:
if type(config["proxy"]) != str:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if type(config["verbose"]) != bool:
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.isMicrosoftLogin = False
# stdout colors
self.GREEN = "\033[92m"
self.WARNING = "\033[93m"
self.ENDCOLOR = "\033[0m"
if "email" in config and "password" in config:
if type(config["email"]) != str:
raise Exception("Email must be a string!")
if type(config["password"]) != str:
raise Exception("Password must be a string!")
self.email = config["email"]
self.password = config["password"]
if "isMicrosoftLogin" in config and config["isMicrosoftLogin"] == True:
self.isMicrosoftLogin = True
self.__microsoft_login()
else:
self.__email_login()
elif "session_token" in config:
if no_refresh:
self.__get_cf_cookies()
return
if type(config["session_token"]) != str:
raise Exception("Session token must be a string!")
self.session_token = config["session_token"]
self.session.cookies.set(
"__Secure-next-auth.session-token",
config["session_token"],
)
self.__get_cf_cookies()
else:
raise Exception("Invalid config!")
self.__retry_refresh()
def __retry_refresh(self):
retries = 5
refresh = True
while refresh:
try:
self.__refresh_session()
refresh = False
except Exception as exc:
if retries == 0:
raise exc
retries -= 1
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
gen_title=False,
session_token=None,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
:param session_token: String
"""
if session_token:
self.session.cookies.set(
"__Secure-next-auth.session-token",
session_token,
)
self.session_token = session_token
self.config["session_token"] = session_token
self.__retry_refresh()
self.__map_conversations()
if conversation_id == None:
conversation_id = self.conversation_id
if parent_id == None:
parent_id = (
self.parent_id
if conversation_id == self.conversation_id
else self.conversation_mapping[conversation_id]
)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id or str(uuid.uuid4()),
"model": "text-davinci-002-render"
if self.config.get("paid") is not True
else "text-davinci-002-render-paid",
}
new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "backend-api/conversation",
data=json.dumps(data),
timeout_seconds=180,
)
if response.status_code != 200:
print(response.text)
self.__refresh_session()
raise HTTPError(
f"Wrong response code: {response.status_code}! Refreshing session...",
)
else:
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
print("Incorrect response from OpenAI API")
raise Exception("Incorrect response from OpenAI API") from exc
# Check if it is JSON
if response.startswith("{"):
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
res = {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
if gen_title and new_conv:
try:
title = self.__gen_title(
self.conversation_id,
self.parent_id,
)["title"]
except Exception as exc:
split = prompt.split(" ")
title = " ".join(split[:3]) + ("..." if len(split) > 3 else "")
res["title"] = title
return res
else:
return None
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
raise Exception("Response code error: ", response.status_code)
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"backend-api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, id):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data
def __gen_title(self, id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"backend-api/conversation/gen_title/{id}"
response = self.session.post(
url,
data=json.dumps(
{
"message_id": message_id,
"model": "text-davinci-002-render"
if self.config.get("paid") is not True
else "text-davinci-002-render-paid",
},
),
)
self.__check_response(response)
data = json.loads(response.text)
return data
def change_title(self, id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "backend-api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __refresh_session(self, session_token=None):
if session_token:
self.session.cookies.set(
"__Secure-next-auth.session-token",
session_token,
)
self.session_token = session_token
self.config["session_token"] = session_token
url = BASE_URL + "api/auth/session"
response = self.session.get(url, timeout_seconds=180)
if response.status_code == 403:
self.__get_cf_cookies()
raise Exception("Clearance refreshing...")
try:
if "error" in response.json():
raise Exception(
f"Failed to refresh session! Error: {response.json()['error']}",
)
elif (
response.status_code != 200
or response.json() == {}
or "accessToken" not in response.json()
):
raise Exception(
f"Response code: {response.status_code} \n Response: {response.text}",
)
else:
self.session.headers.update(
{
"Authorization": "Bearer " + response.json()["accessToken"],
},
)
self.session_token = self.session.cookies._find(
"__Secure-next-auth.session-token",
)
except Exception:
print("Failed to refresh session!")
if self.isMicrosoftLogin:
print("Attempting to re-authenticate...")
self.__microsoft_login()
else:
self.__email_login()
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def __microsoft_login(self) -> None:
"""
Login to OpenAI via Microsoft Login Authentication.
:return: None
"""
driver = None
try:
# Open the browser
self.cf_cookie_found = False
self.session_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get(BASE_URL)
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
# Wait for the login button to appear
WebDriverWait(driver, 120).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[contains(text(), 'Log in')]"),
),
)
# Click the login button
driver.find_element(
by=By.XPATH,
value="//button[contains(text(), 'Log in')]",
).click()
# Wait for the Login with Microsoft button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@data-provider='windowslive']"),
),
)
# Click the Login with Microsoft button
driver.find_element(
by=By.XPATH,
value="//button[@data-provider='windowslive']",
).click()
# Wait for the email input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//input[@type='email']"),
),
)
# Enter the email
driver.find_element(
by=By.XPATH,
value="//input[@type='email']",
).send_keys(self.config["email"])
# Wait for the Next button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# Click the Next button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# Wait for the password input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//input[@type='password']"),
),
)
# Enter the password
driver.find_element(
by=By.XPATH,
value="//input[@type='password']",
).send_keys(self.config["password"])
# Wait for the Sign in button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# Click the Sign in button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# Wait for the Allow button to appear
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# click Yes button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# wait for input box to appear (to make sure we're signed in)
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//textarea"),
),
)
while not self.session_cookie_found:
sleep(5)
print(self.GREEN + "Login successful." + self.ENDCOLOR)
finally:
# Close the browser
if driver is not None:
driver.quit()
del driver
def __email_login(self) -> None:
"""
Login to OpenAI via Email/Password Authentication and 2Captcha.
:return: None
"""
# Open the browser
driver = None
try:
self.cf_cookie_found = False
self.session_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get(BASE_URL)
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
# Wait for the login button to appear
WebDriverWait(driver, 120).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[contains(text(), 'Log in')]"),
),
)
# Click the login button
driver.find_element(
by=By.XPATH,
value="//button[contains(text(), 'Log in')]",
).click()
# Wait for the email input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.ID, "username"),
),
)
# Enter the email
driver.find_element(by=By.ID, value="username").send_keys(
self.config["email"],
)
# Wait for the Continue button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@type='submit']"),
),
)
# Click the Continue button
driver.find_element(
by=By.XPATH,
value="//button[@type='submit']",
).click()
# Wait for the password input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.ID, "password"),
),
)
# Enter the password
driver.find_element(by=By.ID, value="password").send_keys(
self.config["password"],
)
# Wait for the Sign in button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@type='submit']"),
),
)
# Click the Sign in button
driver.find_element(
by=By.XPATH,
value="//button[@type='submit']",
).click()
# wait for input box to appear (to make sure we're signed in)
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//textarea"),
),
)
while not self.session_cookie_found:
sleep(5)
print(self.GREEN + "Login successful." + self.ENDCOLOR)
finally:
if driver is not None:
# Close the browser
driver.quit()
del driver
def __get_ChromeOptions(self):
options = uc.ChromeOptions()
options.add_argument("--start_maximized")
options.add_argument("--disable-extensions")
options.add_argument("--disable-application-cache")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("--disable-setuid-sandbox")
options.add_argument("--disable-dev-shm-usage")
if self.config.get("proxy", "") != "":
options.add_argument("--proxy-server=" + self.config["proxy"])
return options
def __get_cf_cookies(self) -> None:
"""
Get cloudflare cookies.
:return: None
"""
driver = None
try:
self.cf_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get("https://chat.openai.com/chat")
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
finally:
# Close the browser
if driver is not None:
driver.quit()
del driver
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
def __detect_cookies(self, message):
if "params" in message:
if "headers" in message["params"]:
if "set-cookie" in message["params"]["headers"]:
# Use regex to get the cookie for cf_clearance=*;
cf_clearance_cookie = re.search(
"cf_clearance=.*?;",
message["params"]["headers"]["set-cookie"],
)
session_cookie = re.search(
"__Secure-next-auth.session-token=.*?;",
message["params"]["headers"]["set-cookie"],
)
if cf_clearance_cookie and not self.cf_cookie_found:
print("Found Cloudflare Cookie!")
# remove the semicolon and 'cf_clearance=' from the string
raw_cf_cookie = cf_clearance_cookie.group(0)
self.cf_clearance = raw_cf_cookie.split("=")[1][:-1]
if self.verbose:
print(
self.GREEN
+ "Cloudflare Cookie: "
+ self.ENDCOLOR
+ self.cf_clearance,
)
self.cf_cookie_found = True
if session_cookie and not self.session_cookie_found:
print("Found Session Token!")
# remove the semicolon and '__Secure-next-auth.session-token=' from the string
raw_session_cookie = session_cookie.group(0)
self.session_token = raw_session_cookie.split("=")[1][:-1]
self.session.cookies.set(
"__Secure-next-auth.session-token",
self.session_token,
)
if self.verbose:
print(
self.GREEN
+ "Session Token: "
+ self.ENDCOLOR
+ self.session_token,
)
self.session_cookie_found = True
def __detect_user_agent(self, message):
if "params" in message:
if "headers" in message["params"]:
if "user-agent" in message["params"]["headers"]:
# Use regex to get the cookie for cf_clearance=*;
user_agent = message["params"]["headers"]["user-agent"]
self.user_agent = user_agent
self.agent_found = True
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
def __refresh_headers(self, cf_clearance, user_agent):
del self.session.cookies["cf_clearance"]
self.session.headers.clear()
self.session.cookies.set("cf_clearance", cf_clearance)
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": "Bearer ",
"Content-Type": "application/json",
"User-Agent": user_agent,
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for i in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
from os import getenv
from os.path import exists
def configure():
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def chatGPT_main(config):
print("Logging in...")
chatbot = Chatbot(config)
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if prompt == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!refresh - Refresh the session authentication
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
""",
)
continue
elif prompt == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
continue
elif prompt == "!refresh":
chatbot.__refresh_session()
print("Session successfully refreshed.\n")
continue
elif prompt == "!config":
print(json.dumps(chatbot.config, indent=4))
continue
elif prompt.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(prompt.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
continue
elif prompt.startswith("!setconversation"):
try:
chatbot.config["conversation"] = prompt.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
continue
elif prompt == "!exit":
break
try:
print("Chatbot: ")
message = chatbot.ask(
prompt,
conversation_id=chatbot.config.get("conversation"),
parent_id=chatbot.config.get("parent_id"),
)
print(message["message"])
except Exception as exc:
print("Something went wrong!")
print(exc)
continue
def main():
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
chatGPT_main(configure())
if __name__ == "__main__":
main()
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | shivatmax/Research-LLM | lang_help.py | import os
import streamlit as st
import dill
import time
from langchain.llms import GooglePalm
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import UnstructuredURLLoader
from langchain.embeddings import GooglePalmEmbeddings
from langchain.vectorstores import FAISS
st.title("RockyBot: News Research Tool 📈")
st.sidebar.title("News Article URLs")
urls = []
for i in range(3):
url = st.sidebar.text_input(f"URL {i+1}")
urls.append(url)
process_url_clicked = st.sidebar.button("Process URLs")
file_path = "faiss_store_openai.pkl"
main_placeholder = st.empty()
GOOGLE_API_KEY='AIzaSyAmMtufl_TPyWdLbuCFD5Lh8IEpJWrEXYo'
google_api_key = os.getenv("GOOGLE_API_KEY", GOOGLE_API_KEY)
if google_api_key is None:
raise ValueError("GOOGLE_API_KEY environment variable is not set")
llm = GooglePalm(google_api_key=google_api_key, temperature=0.8, max_tokens=500)
if process_url_clicked:
# load data
loader = UnstructuredURLLoader(urls=urls)
main_placeholder.text("Data Loading...Started...✅✅✅")
data = loader.load()
# split data
text_splitter = RecursiveCharacterTextSplitter(
separators=['\n\n', '\n', '.', ','],
chunk_size=1000
)
main_placeholder.text("Text Splitter...Started...✅✅✅")
docs = text_splitter.split_documents(data)
# create embeddings and save it to FAISS index
embeddings = GooglePalmEmbeddings(google_api_key=google_api_key)
vectorstore_openai = FAISS.from_documents(docs, embeddings)
main_placeholder.text("Embedding Vector Started Building...✅✅✅")
time.sleep(2)
# Save the FAISS index to a pickle file
with open(file_path, "wb") as f:
dill.dump(vectorstore_openai, f)
query = main_placeholder.text_input("Question: ")
if query:
if os.path.exists(file_path):
with open(file_path, "rb") as f:
vectorstore = dill.load(f)
chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever())
result = chain({"question": query}, return_only_outputs=True)
# result will be a dictionary of this format --> {"answer": "", "sources": [] }
st.header("Answer")
st.write(result["answer"])
# Display sources, if available
sources = result.get("sources", "")
if sources:
st.subheader("Sources:")
sources_list = sources.split("\n") # Split the sources by newline
for source in sources_list:
st.write(source)
| [] |
2024-01-10 | Earnest-Testlabs/jupyter-ai | packages~jupyter-ai-magics~jupyter_ai_magics~providers.py | import asyncio
import base64
import copy
import functools
import io
import json
from concurrent.futures import ThreadPoolExecutor
from typing import Any, ClassVar, Coroutine, Dict, List, Literal, Optional, Union
from jsonpath_ng import parse
from langchain.chat_models import (
AzureChatOpenAI,
BedrockChat,
ChatAnthropic,
ChatOpenAI,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms import (
AI21,
Anthropic,
Bedrock,
Cohere,
GPT4All,
HuggingFaceHub,
OpenAI,
OpenAIChat,
SagemakerEndpoint,
)
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts import PromptTemplate
from langchain.schema import LLMResult
from langchain.utils import get_from_dict_or_env
from pydantic import BaseModel, Extra, root_validator
class EnvAuthStrategy(BaseModel):
"""Require one auth token via an environment variable."""
type: Literal["env"] = "env"
name: str
class MultiEnvAuthStrategy(BaseModel):
"""Require multiple auth tokens via multiple environment variables."""
type: Literal["file"] = "file"
names: List[str]
class AwsAuthStrategy(BaseModel):
"""Require AWS authentication via Boto3"""
type: Literal["aws"] = "aws"
AuthStrategy = Optional[
Union[
EnvAuthStrategy,
MultiEnvAuthStrategy,
AwsAuthStrategy,
]
]
class Field(BaseModel):
key: str
label: str
# "text" accepts any text
format: Literal["json", "jsonpath", "text"]
class TextField(Field):
type: Literal["text"] = "text"
class MultilineTextField(Field):
type: Literal["text-multiline"] = "text-multiline"
class IntegerField(BaseModel):
type: Literal["integer"] = "integer"
key: str
label: str
Field = Union[TextField, MultilineTextField, IntegerField]
class BaseProvider(BaseModel):
#
# pydantic config
#
class Config:
extra = Extra.allow
#
# class attrs
#
id: ClassVar[str] = ...
"""ID for this provider class."""
name: ClassVar[str] = ...
"""User-facing name of this provider."""
models: ClassVar[List[str]] = ...
"""List of supported models by their IDs. For registry providers, this will
be just ["*"]."""
help: ClassVar[str] = None
"""Text to display in lieu of a model list for a registry provider that does
not provide a list of models."""
model_id_key: ClassVar[str] = ...
"""Kwarg expected by the upstream LangChain provider."""
model_id_label: ClassVar[str] = ""
"""Human-readable label of the model ID."""
pypi_package_deps: ClassVar[List[str]] = []
"""List of PyPi package dependencies."""
auth_strategy: ClassVar[AuthStrategy] = None
"""Authentication/authorization strategy. Declares what credentials are
required to use this model provider. Generally should not be `None`."""
registry: ClassVar[bool] = False
"""Whether this provider is a registry provider."""
fields: ClassVar[List[Field]] = []
"""User inputs expected by this provider when initializing it. Each `Field` `f`
should be passed in the constructor as a keyword argument, keyed by `f.key`."""
#
# instance attrs
#
model_id: str
prompt_templates: Dict[str, PromptTemplate]
"""Prompt templates for each output type. Can be overridden with
`update_prompt_template`. The function `prompt_template`, in the base class,
refers to this."""
def __init__(self, *args, **kwargs):
try:
assert kwargs["model_id"]
except:
raise AssertionError(
"model_id was not specified. Please specify it as a keyword argument."
)
model_kwargs = {}
if self.__class__.model_id_key != "model_id":
model_kwargs[self.__class__.model_id_key] = kwargs["model_id"]
model_kwargs["prompt_templates"] = {
"code": PromptTemplate.from_template(
"{prompt}\n\nProduce output as source code only, "
"with no text or explanation before or after it."
),
"html": PromptTemplate.from_template(
"{prompt}\n\nProduce output in HTML format only, "
"with no markup before or afterward."
),
"image": PromptTemplate.from_template(
"{prompt}\n\nProduce output as an image only, "
"with no text before or after it."
),
"markdown": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"md": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"math": PromptTemplate.from_template(
"{prompt}\n\nProduce output in LaTeX format only, "
"with $$ at the beginning and end."
),
"json": PromptTemplate.from_template(
"{prompt}\n\nProduce output in JSON format only, "
"with nothing before or after it."
),
"text": PromptTemplate.from_template("{prompt}"), # No customization
}
super().__init__(*args, **kwargs, **model_kwargs)
async def _call_in_executor(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
"""
Calls self._call() asynchronously in a separate thread for providers
without an async implementation. Requires the event loop to be running.
"""
executor = ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
_call_with_args = functools.partial(self._call, *args, **kwargs)
return await loop.run_in_executor(executor, _call_with_args)
async def _generate_in_executor(
self, *args, **kwargs
) -> Coroutine[Any, Any, LLMResult]:
"""
Calls self._generate() asynchronously in a separate thread for providers
without an async implementation. Requires the event loop to be running.
"""
executor = ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
_call_with_args = functools.partial(self._generate, *args, **kwargs)
return await loop.run_in_executor(executor, _call_with_args)
def update_prompt_template(self, format: str, template: str):
"""
Changes the class-level prompt template for a given format.
"""
self.prompt_templates[format] = PromptTemplate.from_template(template)
def get_prompt_template(self, format) -> PromptTemplate:
"""
Produce a prompt template suitable for use with a particular model, to
produce output in a desired format.
"""
if format in self.prompt_templates:
return self.prompt_templates[format]
else:
return self.prompt_templates["text"] # Default to plain format
@property
def is_chat_provider(self):
return isinstance(self, BaseChatModel)
class AI21Provider(BaseProvider, AI21):
id = "ai21"
name = "AI21"
models = [
"j1-large",
"j1-grande",
"j1-jumbo",
"j1-grande-instruct",
"j2-large",
"j2-grande",
"j2-jumbo",
"j2-grande-instruct",
"j2-jumbo-instruct",
]
model_id_key = "model"
pypi_package_deps = ["ai21"]
auth_strategy = EnvAuthStrategy(name="AI21_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class AnthropicProvider(BaseProvider, Anthropic):
id = "anthropic"
name = "Anthropic"
models = [
"claude-v1",
"claude-v1.0",
"claude-v1.2",
"claude-2",
"claude-2.0",
"claude-instant-v1",
"claude-instant-v1.0",
"claude-instant-v1.2",
]
model_id_key = "model"
pypi_package_deps = ["anthropic"]
auth_strategy = EnvAuthStrategy(name="ANTHROPIC_API_KEY")
class ChatAnthropicProvider(BaseProvider, ChatAnthropic):
id = "anthropic-chat"
name = "ChatAnthropic"
models = [
"claude-v1",
"claude-v1.0",
"claude-v1.2",
"claude-2",
"claude-2.0",
"claude-instant-v1",
"claude-instant-v1.0",
"claude-instant-v1.2",
]
model_id_key = "model"
pypi_package_deps = ["anthropic"]
auth_strategy = EnvAuthStrategy(name="ANTHROPIC_API_KEY")
class CohereProvider(BaseProvider, Cohere):
id = "cohere"
name = "Cohere"
models = ["medium", "xlarge"]
model_id_key = "model"
pypi_package_deps = ["cohere"]
auth_strategy = EnvAuthStrategy(name="COHERE_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class GPT4AllProvider(BaseProvider, GPT4All):
def __init__(self, **kwargs):
model = kwargs.get("model_id")
if model == "ggml-gpt4all-l13b-snoozy":
kwargs["backend"] = "llama"
else:
kwargs["backend"] = "gptj"
kwargs["allow_download"] = False
n_threads = kwargs.get("n_threads", None)
if n_threads is not None:
kwargs["n_threads"] = max(int(n_threads), 1)
super().__init__(**kwargs)
id = "gpt4all"
name = "GPT4All"
docs = "https://docs.gpt4all.io/gpt4all_python.html"
models = [
"ggml-gpt4all-j-v1.2-jazzy",
"ggml-gpt4all-j-v1.3-groovy",
# this one needs llama backend and has licence restriction
"ggml-gpt4all-l13b-snoozy",
]
model_id_key = "model"
pypi_package_deps = ["gpt4all"]
auth_strategy = None
fields = [IntegerField(key="n_threads", label="CPU thread count (optional)")]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
HUGGINGFACE_HUB_VALID_TASKS = (
"text2text-generation",
"text-generation",
"text-to-image",
)
class HfHubProvider(BaseProvider, HuggingFaceHub):
id = "huggingface_hub"
name = "Hugging Face Hub"
models = ["*"]
model_id_key = "repo_id"
help = (
"See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. "
"Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`."
)
# ipywidgets needed to suppress tqdm warning
# https://stackoverflow.com/questions/67998191
# tqdm is a dependency of huggingface_hub
pypi_package_deps = ["huggingface_hub", "ipywidgets"]
auth_strategy = EnvAuthStrategy(name="HUGGINGFACEHUB_API_TOKEN")
registry = True
# Override the parent's validate_environment with a custom list of valid tasks
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in HUGGINGFACE_HUB_VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
# Handle image outputs
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Hugging Face Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string or image generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
response = self.client(inputs=prompt, params=_model_kwargs)
if type(response) is dict and "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
# Custom code for responding to image generation responses
if self.client.task == "text-to-image":
imageFormat = response.format # Presume it's a PIL ImageFile
mimeType = ""
if imageFormat == "JPEG":
mimeType = "image/jpeg"
elif imageFormat == "PNG":
mimeType = "image/png"
elif imageFormat == "GIF":
mimeType = "image/gif"
else:
raise ValueError(f"Unrecognized image format {imageFormat}")
buffer = io.BytesIO()
response.save(buffer, format=imageFormat)
# Encode image data to Base64 bytes, then decode bytes to str
return mimeType + ";base64," + base64.b64encode(buffer.getvalue()).decode()
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class OpenAIProvider(BaseProvider, OpenAI):
id = "openai"
name = "OpenAI"
models = [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
"davinci",
"curie",
"babbage",
"ada",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
class ChatOpenAIProvider(BaseProvider, OpenAIChat):
id = "openai-chat"
name = "OpenAI"
models = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
def append_exchange(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
self.prefix_messages.append({"role": "user", "content": prompt})
self.prefix_messages.append({"role": "assistant", "content": output})
# uses the new OpenAIChat provider. temporarily living as a separate class until
# conflicts can be resolved
class ChatOpenAINewProvider(BaseProvider, ChatOpenAI):
id = "openai-chat-new"
name = "OpenAI"
models = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
fields = [
TextField(
key="openai_api_base", label="Base API URL (optional)", format="text"
),
TextField(
key="openai_organization", label="Organization (optional)", format="text"
),
TextField(key="openai_proxy", label="Proxy (optional)", format="text"),
]
class AzureChatOpenAIProvider(BaseProvider, AzureChatOpenAI):
id = "azure-chat-openai"
name = "Azure OpenAI"
models = ["*"]
model_id_key = "deployment_name"
model_id_label = "Deployment name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
registry = True
fields = [
TextField(
key="openai_api_base", label="Base API URL (required)", format="text"
),
TextField(
key="openai_api_version", label="API version (required)", format="text"
),
TextField(
key="openai_organization", label="Organization (optional)", format="text"
),
TextField(key="openai_proxy", label="Proxy (optional)", format="text"),
]
class JsonContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def __init__(self, request_schema, response_path):
self.request_schema = json.loads(request_schema)
self.response_path = response_path
self.response_parser = parse(response_path)
def replace_values(self, old_val, new_val, d: Dict[str, Any]):
"""Replaces values of a dictionary recursively."""
for key, val in d.items():
if val == old_val:
d[key] = new_val
if isinstance(val, dict):
self.replace_values(old_val, new_val, val)
return d
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
request_obj = copy.deepcopy(self.request_schema)
self.replace_values("<prompt>", prompt, request_obj)
request = json.dumps(request_obj).encode("utf-8")
return request
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
matches = self.response_parser.find(response_json)
return matches[0].value
class SmEndpointProvider(BaseProvider, SagemakerEndpoint):
id = "sagemaker-endpoint"
name = "SageMaker endpoint"
models = ["*"]
model_id_key = "endpoint_name"
model_id_label = "Endpoint name"
# This all needs to be on one line of markdown, for use in a table
help = (
"Specify an endpoint name as the model ID. "
"In addition, you must specify a region name, request schema, and response path. "
"For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) "
"and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints)."
)
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
registry = True
fields = [
TextField(key="region_name", label="Region name (required)", format="text"),
MultilineTextField(
key="request_schema", label="Request schema (required)", format="json"
),
TextField(
key="response_path", label="Response path (required)", format="jsonpath"
),
]
def __init__(self, *args, **kwargs):
request_schema = kwargs.pop("request_schema")
response_path = kwargs.pop("response_path")
content_handler = JsonContentHandler(
request_schema=request_schema, response_path=response_path
)
super().__init__(*args, **kwargs, content_handler=content_handler)
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class BedrockProvider(BaseProvider, Bedrock):
id = "bedrock"
name = "Amazon Bedrock"
models = [
"amazon.titan-text-express-v1",
"ai21.j2-ultra-v1",
"ai21.j2-mid-v1",
"cohere.command-text-v14",
]
model_id_key = "model_id"
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
fields = [
TextField(
key="credentials_profile_name",
label="AWS profile (optional)",
format="text",
),
TextField(key="region_name", label="Region name (optional)", format="text"),
]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class BedrockChatProvider(BaseProvider, BedrockChat):
id = "bedrock-chat"
name = "Amazon Bedrock Chat"
models = [
"anthropic.claude-v1",
"anthropic.claude-v2",
"anthropic.claude-instant-v1",
]
model_id_key = "model_id"
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
fields = [
TextField(
key="credentials_profile_name",
label="AWS profile (optional)",
format="text",
),
TextField(key="region_name", label="Region name (optional)", format="text"),
]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
async def _agenerate(self, *args, **kwargs) -> Coroutine[Any, Any, LLMResult]:
return await self._generate_in_executor(*args, **kwargs)
| [
"{prompt}\n\nProduce output in LaTeX format only, with $$ at the beginning and end.",
"{prompt}\n\nProduce output in HTML format only, with no markup before or afterward.",
"{prompt}\n\nProduce output in JSON format only, with nothing before or after it.",
"{prompt}\n\nProduce output as an image only, with no text before or after it.",
"{prompt}\n\nProduce output as source code only, with no text or explanation before or after it.",
"{prompt}",
"{prompt}\n\nProduce output in markdown format only."
] |
2024-01-10 | Srini-98/AI_Applications | ask_excel.py | import os
import streamlit as st
import pandas as pd
from llama_index.query_engine.pandas_query_engine import PandasQueryEngine
from llama_index import LLMPredictor , ServiceContext
from langchain.chat_models import ChatOpenAI
from llama_index.llms import OpenAI
OPENAI_API_KEY=os.environ.get("OPENAI_API_KEY")
st.set_page_config(page_title="Chat with Your Data")
model = "GPT-4"
st.header(f"Analyze your doucments with text queries")
#508,420,500 investmnet
@st.cache_data()
def load_docs(file):
file_extension = os.path.splitext(file.name)[1]
print("file extention" , file_extension)
if file_extension == ".csv":
df = pd.read_csv(file)
elif file_extension == ".xlsx":
df = pd.read_excel(file)
return df
def get_text():
input_text = st.text_input("" , key="input")
return input_text
@st.cache_resource
def get_engine(df , DEFAULT_INSTRUCTION_STR):
llm= OpenAI(temperature=0, model_name="gpt-4")
service_context = ServiceContext.from_defaults(llm=llm)
return PandasQueryEngine(df = df , service_context=service_context) #, instruction_str=DEFAULT_INSTRUCTION_STR) #service_context=service_context , instruction_str=DEFAULT_INSTRUCTION_STR)
def main():
uploaded_files = st.file_uploader(label = "Upload your excel file")
if uploaded_files is not None:
df = load_docs(uploaded_files)
DEFAULT_INSTRUCTION_STR = f"""You should only use columns from the following list to get your answer: {df.columns}"\n.
You should not make up a column name under any circumstance. If you think a relevant column is not available to answer a query , you must try infering from the existing columns.
Use the values shown in the table rows for filtering rows. Do not make your own values.
We wish to convert this query to executable Python code using Pandas.
The final line of code should be a Python expression that can be called with the `eval()` function."
Think step by step and come up with a logic to get the answer.
"""
query_engine = get_engine(df , DEFAULT_INSTRUCTION_STR)
user_input = get_text()
if query_engine:
if user_input and uploaded_files is not None:
print("user input is" , user_input)
response = query_engine.query(user_input)
st.write(df.head())
st.write("Command to get data")
st.write(response.metadata["pandas_instruction_str"])
st.write("Executed Code")
st.write(response.response)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Shyam4801/sdrl574 | utils~segment_tree.py | # This script is taken from openAI's baselines implementation
# ===================================================================================================================
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| [] |
2024-01-10 | Shyam4801/sdrl574 | utils~replay_buffer.py | # This script is taken from openAI's baselines implementation
# ===================================================================================================================
# from hyperparameters import *
from config.hyperparameters import *
import numpy as np
np.random.seed(SEED)
import random
random.seed(SEED)
import time
from utils.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer,self).add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
#print priority
#time.sleep(0.5)
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
| [] |
2024-01-10 | Shyam4801/sdrl574 | utils~schedules.py | # This script is taken from openAI's baselines implementation
# ===================================================================================================================
"""This file is used for specifying various schedules that evolve over
time throughout the execution of the algorithm, such as:
- learning rate for the optimizer
- exploration epsilon for the epsilon greedy exploration strategy
- beta parameter for beta parameter in prioritized replay
Each schedule has a function `value(t)` which returns the current value
of the parameter given the timestep t of the optimization procedure.
"""
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
| [] |
2024-01-10 | yj-Tang/stable-baselines | stable_baselines~sac~sac_backup.py | import time
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.sac.policies import SACPolicy
from stable_baselines import logger
class SAC(OffPolicyRLModel):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo
(https://github.com/rail-berkeley/softlearning/)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update", between 0 and 1)
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_update_interval: (int) update the target network every `target_network_update_freq` steps.
:param gradient_steps: (int) How many gradient update after each step
:param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto')
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf DDPG for the different action noise type.
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for SAC normally but can help exploring when using HER + SAC.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on SAC logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000,
learning_starts=100, train_freq=1, batch_size=64,
tau=0.005, ent_coef='auto', target_update_interval=1,
gradient_steps=1, target_entropy='auto', action_noise=None,
random_exploration=0.0, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=None):
super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
# In the original paper, same learning rate is used for all networks
# self.policy_lr = learning_rate
# self.qf_lr = learning_rate
# self.vf_lr = learning_rate
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.gradient_steps = gradient_steps
self.gamma = gamma
self.action_noise = action_noise
self.random_exploration = random_exploration
self.value_fn = None
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.target_entropy = target_entropy
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.value_target = None
self.step_ops = None
self.target_update_op = None
self.infos_names = None
self.entropy = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.log_ent_coef = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.deterministic_action)
return policy.obs_ph, self.actions_ph, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy.obs_ph
self.processed_next_obs_ph = self.target_policy.processed_obs
self.action_target = self.target_policy.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, create_qf=True, create_vf=False,
reuse=True)
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32,
initializer=np.log(init_value).astype(np.float32))
self.ent_coef = tf.exp(self.log_ent_coef)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef = float(self.ent_coef)
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean((q_backup - qf1) ** 2)
qf2_loss = 0.5 * tf.reduce_mean((q_backup - qf2) ** 2)
# Compute the entropy temperature loss
# it is used when the entropy coefficient is learned
ent_coef_loss, entropy_optimizer = None, None
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - qf1_pi)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
policy_loss = policy_kl_loss
# Target for value fn regression
# We update the vf towards the min of two Q-functions in order to
# reduce overestimation bias from function approximation error.
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean((value_fn - v_backup) ** 2)
values_losses = qf1_loss + qf2_loss + value_loss
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn")
target_params = tf_util.get_trainable_vars("target/values_fn")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
# Control flow is used because sess.run otherwise evaluates in nondeterministic order
# and we first need to compute the policy action before computing q values losses
with tf.control_dependencies([policy_train_op]):
train_values_op = value_optimizer.minimize(values_losses, var_list=values_params)
self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy']
# All ops to call during one training step
self.step_ops = [policy_loss, qf1_loss, qf2_loss,
value_loss, qf1, qf2, value_fn, logp_pi,
self.entropy, policy_train_op, train_values_op]
# Add entropy coefficient optimization operation if needed
if ent_coef_loss is not None:
with tf.control_dependencies([train_values_op]):
ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef)
self.infos_names += ['ent_coef_loss', 'ent_coef']
self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('entropy', self.entropy)
if ent_coef_loss is not None:
tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tf.summary.scalar('ent_coef', self.ent_coef)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/values_fn")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
def _train_step(self, step, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def learn(self, total_timesteps, callback=None,
log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Add noise to the action (improve exploration,
# not needed in general)
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) \
or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
# substract 1 as we appended a new term just now
num_episodes = len(episode_rewards) - 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
callback.on_training_end()
return self
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
if actions is not None:
raise ValueError("Error: SAC does not have action probabilities.")
warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it "
"is squashed by a tanh before being scaled and outputed.")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation, deterministic=deterministic)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False):
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
"ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto',
"target_entropy": self.target_entropy,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| [] |
2024-01-10 | joeyc408/Data-Search | json%E6%95%B0%E6%8D%AE%E6%9F%A5%E8%AF%A2demo.py | import openai
import sqlite3
import re
import streamlit as st
openai.api_key = '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'
url = 'https://api.openai.com/v1/chat/completions'
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"
}
def get_completion(messages, model='gpt-4-1106-preview'):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.0,
)
return response.choices[0].message["content"]
expense_account = [
{
'报销单号':1,
'报销说明':'在北京的会议费',
'票据':[
{
'票据号': 1,
'票据类型':'飞机票',
'日期':'2023年3月1日',
'金额':2000,
'备注':'上海到北京的往返机票'
},
{
'票据号': 2,
'票据类型':'增值税发票',
'日期':'2023年3月3日',
'金额':1500,
'备注':'在北京住宿费 3月1日至3月3日'
},
{
'票据号': 3,
'票据类型':'增值税发票',
'日期':'2023年3月1日',
'金额':150,
'备注':'一日三餐费'
},
]
},
{
'报销单号':2,
'报销说明':'在北京的招待费',
'票据':[
{
'票据号': 1,
'票据类型':'增值税发票',
'日期':'2023年3月1日',
'金额':1000,
'备注':'招待用白酒一瓶'
},
{
'票据号': 2,
'票据类型':'增值税发票',
'日期':'2023年3月3日',
'金额':1500,
'备注':'招待8人总费用'
},
{
'票据号': 3,
'票据类型':'招待说明',
'备注':'共招待八人'
},
]
},
{
'报销单号': 3,
'报销说明':'去北京出差花费',
'票据':[
{
'票据号': 1,
'票据类型': '增值税发票',
'日期': '2023年3月1日',
'金额':4000,
'备注': '手机'
},
{
'票据号': 2,
'票据类型': '飞机票',
'日期': '2023年3月2日',
'金额': 2000,
'备注': '北京到上海飞机票'
},
{
'票据号': 3,
'票据类型':'增值税发票',
'日期':' 2023年3月3日',
'金额': 1000,
'备注': '上海3月1日到3月3日住宿费'
},
{
'票据号': 4,
'票据类型':'会议通知',
'备注':'2023年3月2日到3月5日,在北京召开人工智能大会'
}
]
},
{
'报销单号': 4,
'报销说明':'在上海招待费',
'票据':[
{
'票据号': 1,
'票据类型': '增值税发票',
'日期': '2023年3月1日',
'金额':4000,
'备注': '红酒两瓶'
},
{
'票据号': 2,
'票据类型': '飞机票',
'日期': '2023年3月2日',
'金额': 2000,
'备注': '招待4位外宾总费用'
},
{
'票据号': 3,
'票据类型':'增值税发票',
'日期':' 2023年3月3日',
'金额': 1000,
'备注': 'KTV费用'
}
]
},
{
'报销单号': 5,
'报销说明':'办公用品费',
'票据':[
{
'票据号': 1,
'票据类型': '增值税发票',
'日期': '2023年3月1日',
'金额':4000,
'备注': '打印机一台'
},
{
'票据号': 2,
'票据类型': '增值税发票',
'日期': '2023年3月2日',
'金额': 2000,
'备注': '显示屏一台'
},
{
'票据号': 3,
'票据类型':'增值税发票',
'日期':' 2023年3月3日',
'金额': 500,
'备注': '零食'
},
{
'票据号': 4,
'票据类型':'增值税发票',
'日期':' 2023年3月3日',
'金额': 500,
'备注': '游戏机一台'
}
]
},
{
'报销单号': 6,
'报销说明':'去长沙差旅费',
'票据':[
{
'票据号': 1,
'票据类型': '增值税发票',
'日期': '2023年3月1日',
'金额':1000,
'备注': '长沙住宿费 两晚'
},
{
'票据号': 2,
'票据类型': '增值税发票',
'日期': '2023年3月2日',
'金额': 100,
'备注': '洗衣费'
},
{
'票据号': 3,
'票据类型':'飞机票',
'日期':' 2023年3月3日',
'金额': 800,
'备注': '上海到长沙飞机票 单程'
},
{
'票据号': 4,
'票据类型':'报销说明',
'备注':'仅有单程费用'
}
]
},
]
st.title("数据查询助手")
query = st.text_input("你的问题是:")
conn = st.connection('accounts_db', type='sql')
with conn.session as s:
if st.button("发送") and query:
messages = [
{'role': 'system', 'content': '''你是一位能力很强的数据库查询员,你的任务是根据用户的问题返回mysql数据库查询语句。
数据库的格式如下:r_id对应了报销单号,description对应了报销说明,receipt_id对应了票据号,receipt_type对应了票据类型,date对应了日期,amount对应了金额,notes对应了备注。
当你需要确定询问的具体内容时,只需要在notes里出现过该内容即可,比如我想查询有关于白酒的报销单,只需要在notes里出现过白酒一次即可。
我的表格名字为reimbursement_receipts,在输出结果,请你只输出代码,不要包含其他任何内容。'''},
{'role': 'user', 'content': f'问题如下:{query}'}
]
response = get_completion(messages)
pattern = r"```sql\n(.+);\n```"
match = re.search(pattern, response, re.DOTALL)
sql_command = match.group(1)
data = conn.query(sql_command)
messages = [
{'role': 'system', 'content': '''你是一位能力很强的客服,你的任务是根据用户的问题和输入的数据进行回答。
输入内容会是一些数据,代表对于用户的问题的回答,请你自己理解该数据的含义,然后根据用户的问题进行回答。'''},
{'role': 'user', 'content': f'用户问题如下:{query}'},
{'role': 'user', 'content': f'输入数据如下:{data}'}
]
response = get_completion(messages)
st.write(response)
| [
"你是一位能力很强的数据库查询员,你的任务是根据用户的问题返回mysql数据库查询语句。\n 数据库的格式如下:r_id对应了报销单号,description对应了报销说明,receipt_id对应了票据号,receipt_type对应了票据类型,date对应了日期,amount对应了金额,notes对应了备注。\n 当你需要确定询问的具体内容时,只需要在notes里出现过该内容即可,比如我想查询有关于白酒的报销单,只需要在notes里出现过白酒一次即可。\n 我的表格名字为reimbursement_receipts,在输出结果,请你只输出代码,不要包含其他任何内容。",
"问题如下:PLACEHOLDER",
"输入数据如下:PLACEHOLDER",
"你是一位能力很强的客服,你的任务是根据用户的问题和输入的数据进行回答。\n 输入内容会是一些数据,代表对于用户的问题的回答,请你自己理解该数据的含义,然后根据用户的问题进行回答。",
"用户问题如下:PLACEHOLDER"
] |
2024-01-10 | mark3labs/langflow | src~backend~langflow~template~frontend_node~memories.py | from typing import Optional
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.frontend_node.constants import INPUT_KEY_INFO, OUTPUT_KEY_INFO
from langflow.template.template.base import Template
from langchain.memory.chat_message_histories.postgres import DEFAULT_CONNECTION_STRING
from langchain.memory.chat_message_histories.mongodb import (
DEFAULT_COLLECTION_NAME,
DEFAULT_DBNAME,
)
class MemoryFrontendNode(FrontendNode):
#! Needs testing
def add_extra_fields(self) -> None:
# chat history should have another way to add common field?
# prevent adding incorect field in ChatMessageHistory
base_message_classes = ["BaseEntityStore", "BaseChatMessageHistory"]
if any(base_class in self.base_classes for base_class in base_message_classes):
return
# add return_messages field
self.template.add_field(
TemplateField(
field_type="bool",
required=False,
show=True,
name="return_messages",
advanced=False,
value=False,
)
)
# add input_key and output_key str fields
self.template.add_field(
TemplateField(
field_type="str",
required=False,
show=True,
name="input_key",
advanced=True,
value="",
)
)
if self.template.type_name not in {"VectorStoreRetrieverMemory"}:
self.template.add_field(
TemplateField(
field_type="str",
required=False,
show=True,
name="output_key",
advanced=True,
value="",
)
)
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if not isinstance(field.value, str):
field.value = None
if field.name == "k":
field.required = True
field.show = True
field.field_type = "int"
field.value = 10
field.display_name = "Memory Size"
field.password = False
if field.name == "return_messages":
field.required = False
field.show = True
field.advanced = False
if field.name in {"input_key", "output_key"}:
field.required = False
field.show = True
field.advanced = False
field.value = ""
field.info = (
INPUT_KEY_INFO if field.name == "input_key" else OUTPUT_KEY_INFO
)
if field.name == "memory_key":
field.value = "chat_history"
if field.name == "chat_memory":
field.show = True
field.advanced = False
field.required = False
if field.name == "url":
field.show = True
if field.name == "entity_store":
field.show = False
if name == "ConversationEntityMemory" and field.name == "memory_key":
field.show = False
field.required = False
class PostgresChatMessageHistoryFrontendNode(MemoryFrontendNode):
name: str = "PostgresChatMessageHistory"
template: Template = Template(
type_name="PostgresChatMessageHistory",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
name="session_id",
),
TemplateField(
field_type="str",
required=True,
show=True,
name="connection_string",
value=DEFAULT_CONNECTION_STRING,
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="message_store",
name="table_name",
),
],
)
description: str = "Memory store with Postgres"
base_classes: list[str] = ["PostgresChatMessageHistory", "BaseChatMessageHistory"]
class MongoDBChatMessageHistoryFrontendNode(MemoryFrontendNode):
name: str = "MongoDBChatMessageHistory"
template: Template = Template(
# langchain/memory/chat_message_histories/mongodb.py
# connection_string: str,
# session_id: str,
# database_name: str = DEFAULT_DBNAME,
# collection_name: str = DEFAULT_COLLECTION_NAME,
type_name="MongoDBChatMessageHistory",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
name="session_id",
),
TemplateField(
field_type="str",
required=True,
show=True,
name="connection_string",
value="",
info="MongoDB connection string (e.g mongodb://mongo_user:password123@mongo:27017)",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value=DEFAULT_DBNAME,
name="database_name",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value=DEFAULT_COLLECTION_NAME,
name="collection_name",
),
],
)
description: str = "Memory store with MongoDB"
base_classes: list[str] = ["MongoDBChatMessageHistory", "BaseChatMessageHistory"]
| [
"MongoDBChatMessageHistory",
"table_name",
"connection_string",
"message_store",
"database_name",
"collection_name",
"MongoDB connection string (e.g mongodb://mongo_user:password123@mongo:27017)",
"session_id",
"PostgresChatMessageHistory"
] |
2024-01-10 | darrenmetoyer/Voice-Assistant | Voice-Assistant.py | import openai
import pyttsx3
import speech_recognition as sr
import time
open.ai_key = ""
engine = pyttsx3.init()
wakeWord = "Genuis"
assistantName = "Jarvis"
startupPhrase = "I am at your service, sir"
def transcribeAudioToTest (filename):
recognizer = sr.Recognizer()
with sr.AudioFile(filename) as source:
audio = recognizer.record(source)
try:
return recognizer.recognize_google(audio)
except:
print("Skipping unkown error")
def generateResponse(prompt):
response = openai.Completion.create(
engine = "text-davinci-003",
prompt = prompt,
max_tokens = 4000,
n = 1,
stop = None,
temperature = 0.5,
)
return response["choices"][0]["text"]
def speakText(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
print("Say " + wakeWord + " to wake up " + assistantName)
with sr.Microphone() as source:
recognizer = sr.Recognizer()
audio = recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio)
if transcription.lower() == wakeWord:
filename = "input.wav"
print(startupPhrase)
with sr.Microphone() as source:
recognizer = sr.Recognizer()
source.pause_threshold = 1
audio = recognizer.listen(source, phrase_time_limit = None, timeout = None)
with open(filename, "wb") as f:
f.write(audio.get_wav_data())
text = transcribeAudioToTest(filename)
if text:
response = generateResponse(text)
print(response)
speakText(response)
except Exception as e:
print("An error occured: {}".format(e))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | AAgha66/ail-trust-regions | models~trpo.py | import torch
import torch.optim as optim
from utils.projection_utils import compute_metrics, gaussian_kl
from models.model import Policy
from models.distributions import FixedNormal
class TRPO():
def __init__(self,
actor_critic,
vf_epoch,
lr_value,
eps,
action_space,
obs_space,
num_steps=4096,
max_kl=0.01,
cg_damping=1e-3,
cg_max_iters=10,
line_search_coef=0.9,
line_search_max_iter=10,
line_search_accept_ratio=0.1,
mini_batch_size=64):
self.actor_critic = actor_critic
self.vf_epoch = vf_epoch
self.mini_batch_size = mini_batch_size
self.num_steps = num_steps
self.policy_params = list(actor_critic.base.actor.parameters()) + list(actor_critic.dist.parameters())
self.vf_params = list(actor_critic.base.critic.parameters())
self.optimizer_vf = optim.Adam(self.vf_params, lr=lr_value, eps=eps)
self.global_steps = 0
self.max_kl = max_kl
self.cg_damping = cg_damping
self.cg_max_iters = cg_max_iters
self.line_search_coef = line_search_coef
self.line_search_max_iter = line_search_max_iter
self.line_search_accept_ratio = line_search_accept_ratio
self.action_space = action_space
self.obs_space = obs_space
def flat_grad(self, grads):
grad_flatten = []
for grad in grads:
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(self, hessians):
hessians_flatten = []
for hessian in hessians:
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def flat_params(self, model_params):
params = []
for param in model_params:
params.append(param.data.view(-1))
params_flatten = torch.cat(params)
return params_flatten
def update_model(self, model_params, new_params):
index = 0
for params in model_params:
params_length = len(params.view(-1))
new_param = new_params[index: index + params_length]
new_param = new_param.view(params.size())
params.data.copy_(new_param)
index += params_length
def train_critic(self, advantages, rollouts):
value_loss_epoch = 0
for e in range(self.vf_epoch):
data_generator = rollouts.feed_forward_generator(
advantages, mini_batch_size=self.mini_batch_size)
for sample in data_generator:
obs_batch, _, value_preds_batch, return_batch, _, _, _, _, _ = sample
# Reshape to do in a single forward pass for all steps
values, _ = self.actor_critic.evaluate_actions(obs_batch)
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer_vf.zero_grad()
value_loss.backward()
self.optimizer_vf.step()
value_loss_epoch += value_loss.item()
return value_loss_epoch
def fisher_vector_product(self, obs_batch, p):
p.detach()
_, dist = self.actor_critic.evaluate_actions(obs_batch)
detached_dist = FixedNormal(dist.mean.detach(), dist.stddev.detach())
mean_kl, cov_kl = gaussian_kl(dist, detached_dist)
kl = mean_kl + cov_kl
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, self.policy_params, create_graph=True)
kl_grad = self.flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, self.policy_params)
kl_hessian_p = self.flat_hessian(kl_hessian_p)
return kl_hessian_p + self.cg_damping * p
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(self, states, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = self.fisher_vector_product(states, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def update(self, rollouts, j, use_disc_as_adv):
self.global_steps = j
# ----------------------------
# step 1: get returns and GAEs
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
# ----------------------------
# step 2: train critic several steps with respect to returns
value_loss_epoch = self.train_critic(advantages=advantages, rollouts=rollouts)
num_updates_value = self.vf_epoch * (self.num_steps / self.mini_batch_size)
# ----------------------------
# step 3: get gradient of loss and hessian of kl
data_generator_policy = rollouts.feed_forward_generator(
advantages, mini_batch_size=self.num_steps)
metrics = None
for batch in data_generator_policy:
obs_batch, actions_batch, value_preds_batch, return_batch, _, _, adv_targ, _, _ = batch
_, dist = self.actor_critic.evaluate_actions(obs_batch)
action_log_probs = dist.log_probs(actions_batch)
ratio = torch.exp(action_log_probs -
action_log_probs.detach())
loss = (ratio * adv_targ).mean()
loss_grad = torch.autograd.grad(loss, self.policy_params)
loss_grad = self.flat_grad(loss_grad)
step_dir = self.conjugate_gradient(obs_batch, loss_grad.data, nsteps=self.cg_max_iters)
loss = loss.data.numpy()
# ----------------------------
# step 4: get step direction and step size and full step
params = self.flat_params(self.policy_params)
shs = 0.5 * (step_dir * self.fisher_vector_product(obs_batch, step_dir)
).sum(0, keepdim=True)
step_size = 1 / torch.sqrt(shs / self.max_kl)[0]
full_step = step_size * step_dir
# ----------------------------
# step 5: do backtracking line search for n times
# old_actor = Actor(actor.num_inputs, actor.num_outputs)
old_actor_critic = Policy(
self.obs_space.shape,
self.action_space)
old_policy_params = list(old_actor_critic.base.actor.parameters()) + \
list(old_actor_critic.dist.parameters())
self.update_model(old_policy_params, params)
expected_improve = (loss_grad * full_step).sum(0, keepdim=True)
expected_improve = expected_improve.data.numpy()
flag = False
fraction = 1.0
detached_old_dist = None
new_loss = None
new_dist = None
for i in range(self.line_search_max_iter):
new_params = params + fraction * full_step
self.update_model(self.policy_params, new_params)
_, new_dist = self.actor_critic.evaluate_actions(obs_batch)
new_action_log_probs = new_dist.log_probs(actions_batch)
ratio = torch.exp(new_action_log_probs -
action_log_probs.detach())
new_loss = (ratio * adv_targ).mean()
new_loss = new_loss.data.numpy()
loss_improve = new_loss - loss
expected_improve *= fraction
detached_old_dist = FixedNormal(dist.mean.detach(), dist.stddev.detach())
mean_kl, cov_kl = gaussian_kl(new_dist, detached_old_dist)
kl = mean_kl + cov_kl
kl = kl.mean()
"""print('kl: {:.4f} loss improve: {:.4f} expected improve: {:.4f} '
'number of line search: {}'
.format(kl.data.numpy(), loss_improve, expected_improve[0], i))"""
# see https: // en.wikipedia.org / wiki / Backtracking_line_search
#if kl < self.max_kl and (loss_improve / expected_improve) > self.line_search_accept_ratio:
if kl < self.max_kl:
flag = True
break
fraction *= self.line_search_coef
if not flag:
params = self.flat_params(old_policy_params)
self.update_model(self.policy_params, params)
print('policy update does not impove the surrogate')
detached_new_dist = FixedNormal(new_dist.mean.detach(), new_dist.stddev.detach())
metrics = compute_metrics(detached_old_dist, detached_new_dist)
metrics['value_loss_epoch'] = value_loss_epoch / num_updates_value
metrics['action_loss_epoch'] = new_loss
metrics['trust_region_loss_epoch'] = 0
metrics['advantages'] = advantages
metrics['on_policy_kurtosis'] = None
metrics['off_policy_kurtosis'] = None
metrics['on_policy_value_kurtosis'] = None
metrics['off_policy_value_kurtosis'] = None
metrics['policy_grad_norms'] = None
metrics['critic_grad_norms'] = None
metrics['ratios_list'] = None
metrics['on_policy_cos_mean'] = None
metrics['off_policy_cos_mean'] = None
metrics['on_policy_cos_median'] = None
metrics['off_policy_cos_median'] = None
return metrics
| [] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_retriever_flan_xl.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import RetrievalQA
from langchain import OpenAI
from langchain.prompts import PromptTemplate
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
import json
import os
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FLAN_XL_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"temperature":1e-10, "max_length": 500},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(
llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
return qa
def run_chain(chain, prompt: str, history=[]):
result = chain(prompt)
# To make it compatible with chat samples
return {
"answer": result['result'],
"source_documents": result['source_documents']
}
if __name__ == "__main__":
chain = build_chain()
result = run_chain(chain, "What's SageMaker?")
print(result['answer'])
if 'source_documents' in result:
print('Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
| [
"don't know",
"context",
"question",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_retriever_open_ai.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import RetrievalQA
from langchain import OpenAI
from langchain.prompts import PromptTemplate
import os
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
llm = OpenAI(batch_size=5, temperature=0, max_tokens=300)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
return RetrievalQA.from_chain_type(
llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
def run_chain(chain, prompt: str, history=[]):
result = chain(prompt)
# To make it compatible with chat samples
return {
"answer": result['result'],
"source_documents": result['source_documents']
}
if __name__ == "__main__":
chain = build_chain()
result = run_chain(chain, "What's SageMaker?")
print(result['answer'])
if 'source_documents' in result:
print('Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
| [
"don't know",
"question",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:",
"context"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_retriever_anthropic.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatAnthropic as Anthropic
import os
def build_chain():
ANTHROPIC_API_KEY = os.environ["ANTHROPIC_API_KEY"]
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
llm = Anthropic(temperature=0, anthropic_api_key=ANTHROPIC_API_KEY)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
Human: This is a friendly conversation between a human and an AI.
The AI is talkative and provides specific details from its context but limits it to 240 tokens.
If the AI does not know the answer to a question, it truthfully says it
does not know.
Assistant: OK, got it, I'll be a talkative truthful AI assistant.
Human: Here are a few documents in <documents> tags:
<documents>
{context}
</documents>
Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Assistant:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
return RetrievalQA.from_chain_type(
llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
def run_chain(chain, prompt: str, history=[]):
result = chain(prompt)
# To make it compatible with chat samples
return {
"answer": result['result'],
"source_documents": result['source_documents']
}
if __name__ == "__main__":
chain = build_chain()
result = run_chain(chain, "What's SageMaker?")
print(result['answer'])
if 'source_documents' in result:
print('Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
| [
"don't know",
"\n\n Human: This is a friendly conversation between a human and an AI. \n The AI is talkative and provides specific details from its context but limits it to 240 tokens.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n\n Assistant: OK, got it, I'll be a talkative truthful AI assistant.\n\n Human: Here are a few documents in <documents> tags:\n <documents>\n {context}\n </documents>\n Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n\n Assistant:",
"context",
"question"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_retriever_flan_xxl.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import RetrievalQA
from langchain import OpenAI
from langchain.prompts import PromptTemplate
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
import json
import os
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FLAN_XXL_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
print(response_json)
return response_json["generated_texts"][0]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"temperature":1e-10, "max_length": 500},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(
llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
return qa
def run_chain(chain, prompt: str, history=[]):
result = chain(prompt)
# To make it compatible with chat samples
return {
"answer": result['result'],
"source_documents": result['source_documents']
}
if __name__ == "__main__":
chain = build_chain()
result = run_chain(chain, "What's SageMaker?")
print(result['answer'])
if 'source_documents' in result:
print('Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
| [
"don't know",
"context",
"question",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_bedrock_titan.py | # from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ['AWS_PROFILE']
print(credentials_profile_name)
llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name = region,
model_id="amazon.titan-tg1-large"
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,top_k=5,region_name=region)
prompt_template = """
Human: This is a friendly conversation between a human and an AI.
The AI is talkative and provides specific details from its context but limits it to 240 tokens.
If the AI does not know the answer to a question, it truthfully says it
does not know.
Assistant: OK, got it, I'll be a talkative truthful AI assistant.
Human: Here are a few documents in <documents> tags:
<documents>
{context}
</documents>
Based on the above documents, provide a detailed answer for, {question}
Answer "don't know" if not present in the document.
Assistant:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT})
# qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, qa_prompt=PROMPT, return_source_documents=True)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"question",
"\n\n Human: This is a friendly conversation between a human and an AI. \n The AI is talkative and provides specific details from its context but limits it to 240 tokens.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n\n Assistant: OK, got it, I'll be a talkative truthful AI assistant.\n\n Human: Here are a few documents in <documents> tags:\n <documents>\n {context}\n </documents>\n Based on the above documents, provide a detailed answer for, {question} \n Answer \"don't know\" if not present in the document. \n\nAssistant:\n ",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_flan_xxl.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import sys
import json
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FLAN_XXL_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"temperature":1e-10, "max_length": 500},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT})
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"question",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_falcon_40b.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import sys
import json
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FALCON_40B_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"inputs": prompt, "parameters": model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.8,
"max_new_tokens": 512,
"do_sample": True,
"top_p": 0.9,
"repetition_penalty": 1.03,
"stop": ["\nUser:","<|endoftext|>","</s>"]
},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region, top_k=2)
prompt_template = """
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
verbose =True,
combine_docs_chain_kwargs={"prompt":PROMPT})
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"question",
"\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~ja~kendra_chat_bedrock_claude.py | # from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
from langchain.chains.llm import LLMChain
import sys
import os
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ["AWS_PROFILE"]
language_code = os.environ["LANGUAGE_CODE"]
llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name=region,
model_kwargs={
"max_tokens_to_sample": 300,
"temperature": 1,
"top_k": 250,
"top_p": 0.999,
"anthropic_version": "bedrock-2023-05-31",
},
model_id="anthropic.claude-v1",
)
retriever = AmazonKendraRetriever(
index_id=kendra_index_id,
top_k=5,
region_name=region,
attribute_filter={
"EqualsTo": {
"Key": "_language_code",
"Value": {"StringValue": language_code},
}
},
)
prompt_template = """
システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。
{context}
上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。
ユーザー: {question}
システム:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。
フォローアップの質問: {question}
独立した質問:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt": PROMPT},
verbose=True,
)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
for query in sys.stdin:
if query.strip().lower().startswith("new search:"):
query = query.strip().lower().replace("new search:", "")
chat_history = []
elif len(chat_history) == MAX_HISTORY_LENGTH:
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result["answer"] + bcolors.ENDC)
if "source_documents" in result:
print(bcolors.OKGREEN + "Sources:")
for d in result["source_documents"]:
print(d.metadata["source"])
print(bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"\n システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。\n\n {context}\n\n 上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。\n ユーザー: {question}\n システム:\n ",
"question",
"context",
"\n 次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。\n\n フォローアップの質問: {question}\n 独立した質問:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_retriever_falcon_40b.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import RetrievalQA
from langchain import OpenAI
from langchain.prompts import PromptTemplate
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
import json
import os
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FALCON_40B_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"inputs": prompt, "parameters": model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
print(response_json)
return response_json[0]["generated_text"]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"temperature":1e-10, "min_length": 10000, "max_length": 10000, "max_new_tokens": 100},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(
llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
return_source_documents=True
)
return qa
def run_chain(chain, prompt: str, history=[]):
result = chain(prompt)
# To make it compatible with chat samples
return {
"answer": result['result'],
"source_documents": result['source_documents']
}
if __name__ == "__main__":
chain = build_chain()
result = run_chain(chain, "What's SageMaker?")
print(result['answer'])
if 'source_documents' in result:
print('Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
| [
"don't know",
"context",
"question",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~ja~kendra_chat_bedrock_claudev2.py | # from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
from langchain.chains.llm import LLMChain
import sys
import os
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ["AWS_PROFILE"]
language_code = os.environ["LANGUAGE_CODE"]
llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name=region,
model_kwargs={
"max_tokens_to_sample": 300,
"temperature": 1,
"top_k": 250,
"top_p": 0.999,
"anthropic_version": "bedrock-2023-05-31",
},
model_id="anthropic.claude-v2",
)
retriever = AmazonKendraRetriever(
index_id=kendra_index_id,
top_k=5,
region_name=region,
attribute_filter={
"EqualsTo": {
"Key": "_language_code",
"Value": {"StringValue": language_code},
}
},
)
prompt_template = """
システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。
{context}
上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。
ユーザー: {question}
システム:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。
フォローアップの質問: {question}
独立した質問:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt": PROMPT},
verbose=True,
)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
for query in sys.stdin:
if query.strip().lower().startswith("new search:"):
query = query.strip().lower().replace("new search:", "")
chat_history = []
elif len(chat_history) == MAX_HISTORY_LENGTH:
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result["answer"] + bcolors.ENDC)
if "source_documents" in result:
print(bcolors.OKGREEN + "Sources:")
for d in result["source_documents"]:
print(d.metadata["source"])
print(bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"\n システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。\n\n {context}\n\n 上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。\n ユーザー: {question}\n システム:\n ",
"question",
"context",
"\n 次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。\n\n フォローアップの質問: {question}\n 独立した質問:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_anthropic.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatAnthropic as Anthropic
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
ANTHROPIC_API_KEY = os.environ["ANTHROPIC_API_KEY"]
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
llm = Anthropic(temperature=0, anthropic_api_key=ANTHROPIC_API_KEY, max_tokens_to_sample = 512)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
Human: This is a friendly conversation between a human and an AI.
The AI is talkative and provides specific details from its context but limits it to 240 tokens.
If the AI does not know the answer to a question, it truthfully says it
does not know.
Assistant: OK, got it, I'll be a talkative truthful AI assistant.
Human: Here are a few documents in <documents> tags:
<documents>
{context}
</documents>
Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Assistant:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT})
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"\n\n Human: This is a friendly conversation between a human and an AI. \n The AI is talkative and provides specific details from its context but limits it to 240 tokens.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n\n Assistant: OK, got it, I'll be a talkative truthful AI assistant.\n\n Human: Here are a few documents in <documents> tags:\n <documents>\n {context}\n </documents>\n Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n\n Assistant:",
"question",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~ja~kendra_chat_open_ai.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain import OpenAI
import sys
import os
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
language_code = os.environ["LANGUAGE_CODE"]
llm = OpenAI(batch_size=5, temperature=0, max_tokens=300)
retriever = AmazonKendraRetriever(
index_id=kendra_index_id,
region_name=region,
attribute_filter={
"EqualsTo": {
"Key": "_language_code",
"Value": {"StringValue": language_code},
}
},
)
prompt_template = """
システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。
{context}
上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。
ユーザー: {question}
システム:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。
フォローアップの質問: {question}
独立した質問:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt": PROMPT},
)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
qa = build_chain()
chat_history = []
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
for query in sys.stdin:
if query.strip().lower().startswith("new search:"):
query = query.strip().lower().replace("new search:", "")
chat_history = []
elif len(chat_history) == MAX_HISTORY_LENGTH:
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result["answer"] + bcolors.ENDC)
if "source_documents" in result:
print(bcolors.OKGREEN + "Sources:")
for d in result["source_documents"]:
print(d.metadata["source"])
print(bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"\n システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。\n\n {context}\n\n 上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。\n ユーザー: {question}\n システム:\n ",
"question",
"context",
"\n 次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。\n\n フォローアップの質問: {question}\n 独立した質問:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_bedrock_claude.py | # from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
from langchain.chains.llm import LLMChain
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ['AWS_PROFILE']
print(credentials_profile_name)
llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name = region,
model_kwargs={"max_tokens_to_sample":300,"temperature":1,"top_k":250,"top_p":0.999,"anthropic_version":"bedrock-2023-05-31"},
model_id="anthropic.claude-v1"
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,top_k=5,region_name=region)
prompt_template = """Human: This is a friendly conversation between a human and an AI.
The AI is talkative and provides specific details from its context but limits it to 240 tokens.
If the AI does not know the answer to a question, it truthfully says it
does not know.
Assistant: OK, got it, I'll be a talkative truthful AI assistant.
Human: Here are a few documents in <documents> tags:
<documents>
{context}
</documents>
Based on the above documents, provide a detailed answer for, {question}
Answer "don't know" if not present in the document.
Assistant:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """Human:
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:
Assistant:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT},
verbose=True)
# qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, qa_prompt=PROMPT, return_source_documents=True)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"Human: This is a friendly conversation between a human and an AI. \n The AI is talkative and provides specific details from its context but limits it to 240 tokens.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n\n Assistant: OK, got it, I'll be a talkative truthful AI assistant.\n\n Human: Here are a few documents in <documents> tags:\n <documents>\n {context}\n </documents>\n Based on the above documents, provide a detailed answer for, {question} \n Answer \"don't know\" if not present in the document. \n\n Assistant:\n ",
"question",
"Human: \n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question: \n\n Assistant:",
"context"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_bedrock_claudev2.py | # from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
from langchain.chains.llm import LLMChain
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ['AWS_PROFILE']
print(credentials_profile_name)
llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name = region,
model_kwargs={"max_tokens_to_sample":300,"temperature":1,"top_k":250,"top_p":0.999,"anthropic_version":"bedrock-2023-05-31"},
model_id="anthropic.claude-v2"
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,top_k=5,region_name=region)
prompt_template = """Human: This is a friendly conversation between a human and an AI.
The AI is talkative and provides specific details from its context but limits it to 240 tokens.
If the AI does not know the answer to a question, it truthfully says it
does not know.
Assistant: OK, got it, I'll be a talkative truthful AI assistant.
Human: Here are a few documents in <documents> tags:
<documents>
{context}
</documents>
Based on the above documents, provide a detailed answer for, {question}
Answer "don't know" if not present in the document.
Assistant:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """{chat_history}
Human:
Given the previous conversation and a follow up question below, rephrase the follow up question
to be a standalone question.
Follow Up Question: {question}
Standalone Question:
Assistant:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT},
verbose=True)
# qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, qa_prompt=PROMPT, return_source_documents=True)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"Human: This is a friendly conversation between a human and an AI. \n The AI is talkative and provides specific details from its context but limits it to 240 tokens.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n\n Assistant: OK, got it, I'll be a talkative truthful AI assistant.\n\n Human: Here are a few documents in <documents> tags:\n <documents>\n {context}\n </documents>\n Based on the above documents, provide a detailed answer for, {question} \n Answer \"don't know\" if not present in the document. \n\n Assistant:\n ",
"{chat_history}\n Human:\n Given the previous conversation and a follow up question below, rephrase the follow up question\n to be a standalone question.\n\n Follow Up Question: {question}\n Standalone Question:\n\n Assistant:",
"question",
"context"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_flan_xl.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
import sys
import json
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FLAN_XL_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"temperature":1e-10, "max_length": 500},
content_handler=content_handler
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT})
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"question",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_open_ai.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain import OpenAI
import sys
import os
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
llm = OpenAI(batch_size=5, temperature=0, max_tokens=300)
retriever = AmazonKendraRetriever(index_id=kendra_index_id, region_name=region)
prompt_template = """
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT})
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
qa = build_chain()
chat_history = []
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"don't know",
"question",
"context",
"\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:",
"\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~kendra_chat_llama_2.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
import sys
import json
import os
from typing import Dict, List
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["LLAMA_2_ENDPOINT"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
# input_str = json.dumps({"inputs": [[{"role": "user", "content": prompt},]],
# "parameters" : model_kwargs
# })
input_str = json.dumps({"inputs": prompt,
"parameters" : model_kwargs
})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
print(response_json)
return response_json[0]['generated_text']
content_handler = ContentHandler()
llm=SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={"max_new_tokens": 1500, "top_p": 0.8,"temperature":0.6},
endpoint_kwargs={"CustomAttributes":"accept_eula=true"},
content_handler=content_handler,
)
retriever = AmazonKendraRetriever(index_id=kendra_index_id,region_name=region)
prompt_template = """
<s>[INST] <<SYS>>
The following is a friendly conversation between a human and an AI.
The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it
does not know.
{context}
<</SYS>>
Instruction: Based on the above documents, provide a detailed answer for, {question} Answer "don't know"
if not present in the document.
Solution:
[/INST]"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"],
)
condense_qa_template = """
<s>[INST] <<SYS>>
Given the following conversation and a follow up question, rephrase the follow up question
to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
<</SYS>>
Standalone question: [/INST]"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT},
verbose=False
)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
def format_messages(messages: List[Dict[str, str]]) -> List[str]:
"""Format messages for Llama-2 chat models.
The model only supports 'system', 'user' and 'assistant' roles, starting with 'system', then 'user' and
alternating (u/a/u/a/u...). The last message must be from 'user'.
"""
prompt: List[str] = []
if messages[0]["role"] == "system":
content = "".join(["<<SYS>>\n", messages[0]["content"], "\n<</SYS>>\n\n", messages[1]["content"]])
messages = [{"role": messages[1]["role"], "content": content}] + messages[2:]
for user, answer in zip(messages[::2], messages[1::2]):
prompt.extend(["<s>", "[INST] ", (user["content"]).strip(), " [/INST] ", (answer["content"]).strip(), "</s>"])
prompt.extend(["<s>", "[INST] ", (messages[-1]["content"]).strip(), " [/INST] "])
return "".join(prompt)
def print_messages(prompt: str, response: str) -> None:
bold, unbold = '\033[1m', '\033[0m'
print(f"{bold}> Input{unbold}\n{prompt}\n\n{bold}> Output{unbold}\n{response[0]['generated_text']}\n")
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC) | [
"don't know",
"\n <s>[INST] <<SYS>>\n Given the following conversation and a follow up question, rephrase the follow up question \n to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n <</SYS>>\n Standalone question: [/INST]",
"\n <s>[INST] <<SYS>>\n The following is a friendly conversation between a human and an AI. \n The AI is talkative and provides lots of specific details from its context.\n If the AI does not know the answer to a question, it truthfully says it \n does not know.\n {context}\n <</SYS>>\n Instruction: Based on the above documents, provide a detailed answer for, {question} Answer \"don't know\" \n if not present in the document. \n Solution:\n [/INST]",
"question",
"context"
] |
2024-01-10 | aws-samples/amazon-kendra-langchain-extensions | kendra_retriever_samples~ja~kendra_chat_falcon_40b.py | from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain import SagemakerEndpoint
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.prompts import PromptTemplate
import sys
import json
import os
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
MAX_HISTORY_LENGTH = 5
def build_chain():
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
endpoint_name = os.environ["FALCON_40B_ENDPOINT"]
language_code = os.environ["LANGUAGE_CODE"]
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"inputs": prompt, "parameters": model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
content_handler = ContentHandler()
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.8,
"max_new_tokens": 512,
"do_sample": True,
"top_p": 0.9,
"repetition_penalty": 1.03,
"stop": ["\nUser:", "<|endoftext|>", "</s>"],
},
content_handler=content_handler,
)
retriever = AmazonKendraRetriever(
index_id=kendra_index_id,
region_name=region,
top_k=1,
attribute_filter={
"EqualsTo": {
"Key": "_language_code",
"Value": {"StringValue": language_code},
}
},
)
prompt_template = """
システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。
{context}
上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。
ユーザー: {question}
システム:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
condense_qa_template = """
次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。
フォローアップの質問: {question}
独立した質問:"""
standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
verbose=True,
combine_docs_chain_kwargs={"prompt": PROMPT},
)
return qa
def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})
if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
for query in sys.stdin:
if query.strip().lower().startswith("new search:"):
query = query.strip().lower().replace("new search:", "")
chat_history = []
elif len(chat_history) == MAX_HISTORY_LENGTH:
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result["answer"] + bcolors.ENDC)
if "source_documents" in result:
print(bcolors.OKGREEN + "Sources:")
for d in result["source_documents"]:
print(d.metadata["source"])
print(bcolors.ENDC)
print(
bcolors.OKCYAN
+ "Ask a question, start a New search: or CTRL-D to exit."
+ bcolors.ENDC
)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)
| [
"\n システム: システムは資料から抜粋して質問に答えます。資料にない内容には答えず、正直に「わかりません」と答えます。\n\n {context}\n\n 上記の資料に基づいて以下の質問について資料から抜粋して回答を生成します。資料にない内容には答えず「わかりません」と答えます。\n ユーザー: {question}\n システム:\n ",
"question",
"context",
"\n 次のような会話とフォローアップの質問に基づいて、フォローアップの質問を独立した質問に言い換えてください。\n\n フォローアップの質問: {question}\n 独立した質問:"
] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~zep.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from zep_python import MemorySearchResult
class ZepRetriever(BaseRetriever):
"""Retriever for the Zep long-term memory store.
Search your user's long-term chat history with Zep.
Note: You will need to provide the user's `session_id` to use this retriever.
More on Zep:
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions, see:
https://docs.getzep.com/deployment/quickstart/
"""
zep_client: Any
"""Zep client."""
session_id: str
"""Zep session ID."""
top_k: Optional[int]
"""Number of documents to return."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
try:
from zep_python import ZepClient
except ImportError:
raise ValueError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
values["zep_client"] = values.get(
"zep_client",
ZepClient(base_url=values["url"], api_key=values.get("api_key")),
)
return values
def _search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
if r.message
]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
metadata: Optional[Dict] = None,
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
metadata: Optional[Dict] = None,
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = await self.zep_client.memory.asearch_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~elastic_search_bm25.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
class ElasticSearchBM25Retriever(BaseRetriever):
"""Retriever for the Elasticsearch using BM25 as a retrieval method.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
client: Any
"""Elasticsearch client."""
index_name: str
"""Name of the index to use in Elasticsearch."""
@classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
| [
"{'type': 'text', 'similarity': 'custom_bm25'}"
] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~web_research.py | import logging
import re
from typing import List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import Html2TextTransformer
from langchain.llms import LlamaCpp
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseRetriever, Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger(__name__)
class SearchQueries(BaseModel):
"""Search queries to run to research for the user's goal."""
queries: List[str] = Field(
..., description="List of search queries to look up on Google"
)
DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""<<SYS>> \n You are an assistant tasked with improving Google search \
results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \
are similar to this question. The output should be a numbered list of questions \
and each should have a question mark at the end: \n\n {question} [/INST]""",
)
DEFAULT_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an assistant tasked with improving Google search \
results. Generate THREE Google search queries that are similar to \
this question. The output should be a numbered list of questions and each \
should have a question mark at the end: {question}""",
)
class LineList(BaseModel):
"""List of questions."""
lines: List[str] = Field(description="Questions")
class QuestionListOutputParser(PydanticOutputParser):
"""Output parser for a list of numbered questions."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = re.findall(r"\d+\..*?\n", text)
return LineList(lines=lines)
class WebResearchRetriever(BaseRetriever):
"""Retriever for web research based on the Google Search API."""
# Inputs
vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages"
)
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
text_splitter: RecursiveCharacterTextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
url_database: List[str] = Field(
default_factory=list, description="List of processed URLs"
)
@classmethod
def from_llm(
cls,
vectorstore: VectorStore,
llm: BaseLLM,
search: GoogleSearchAPIWrapper,
prompt: Optional[BasePromptTemplate] = None,
num_search_results: int = 1,
text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=150
),
) -> "WebResearchRetriever":
"""Initialize from llm using default template.
Args:
vectorstore: Vector store for storing web pages
llm: llm for search question generation
search: GoogleSearchAPIWrapper
prompt: prompt to generating search questions
num_search_results: Number of pages per Google search
text_splitter: Text splitter for splitting web pages into chunks
Returns:
WebResearchRetriever
"""
if not prompt:
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_SEARCH_PROMPT,
conditionals=[
(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)
],
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)
# Use chat model prompt
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
output_parser=QuestionListOutputParser(),
)
return cls(
vectorstore=vectorstore,
llm_chain=llm_chain,
search=search,
num_search_results=num_search_results,
text_splitter=text_splitter,
)
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1 :]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]:
"""Returns num_serch_results pages per Google search."""
query_clean = self.clean_search_query(query)
result = self.search.results(query_clean, num_search_results)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Search Google for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from all various urls.
"""
# Get search questions
logger.info("Generating questions for Google Search ...")
result = self.llm_chain({"question": query})
logger.info(f"Questions for Google Search (raw): {result}")
questions = getattr(result["text"], "lines", [])
logger.info(f"Questions for Google Search: {questions}")
# Get urls
logger.info("Searching for relevant urls...")
urls_to_look = []
for query in questions:
# Google search
search_results = self.search_tool(query, self.num_search_results)
logger.info("Searching for relevant urls...")
logger.info(f"Search results: {search_results}")
for res in search_results:
if res.get("link", None):
urls_to_look.append(res["link"])
# Relevant urls
urls = set(urls_to_look)
# Check for any new urls that we have not processed
new_urls = list(urls.difference(self.url_database))
logger.info(f"New URLs to load: {new_urls}")
# Load, split, and add new urls to vectorstore
if new_urls:
loader = AsyncHtmlLoader(new_urls)
html2text = Html2TextTransformer()
logger.info("Indexing new urls...")
docs = loader.load()
docs = list(html2text.transform_documents(docs))
docs = self.text_splitter.split_documents(docs)
self.vectorstore.add_documents(docs)
self.url_database.extend(new_urls)
# Search for relevant splits
# TODO: make this async
logger.info("Grabbing most relevant splits from urls...")
docs = []
for query in questions:
docs.extend(self.vectorstore.similarity_search(query))
# Get unique docs
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
| [
"You are an assistant tasked with improving Google search results. Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: {question}",
"question",
"<<SYS>> \n You are an assistant tasked with improving Google search results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \n\n {question} [/INST]"
] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~azure_cognitive_search.py | """Retriever for the Azure Cognitive Search service."""
from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class AzureCognitiveSearchRetriever(BaseRetriever):
"""Retriever for the Azure Cognitive Search service."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
top_k: Optional[int] = None
"""Number of results to retrieve. Set to None to retrieve all results."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
)
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
top_param = f"&$top={self.top_k}" if self.top_k else ""
return base_url + endpoint_path + f"&search={query}" + top_param
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~self_query~elasticsearch.py | from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
class ElasticsearchTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
Comparator.CONTAIN,
Comparator.LIKE,
]
"""Subset of allowed logical comparators."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
map_dict = {
Operator.OR: "should",
Operator.NOT: "must_not",
Operator.AND: "must",
Comparator.EQ: "term",
Comparator.GT: "gt",
Comparator.GTE: "gte",
Comparator.LT: "lt",
Comparator.LTE: "lte",
Comparator.CONTAIN: "match",
Comparator.LIKE: "fuzzy",
}
return map_dict[func]
def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {"bool": {self._format_func(operation.operator): args}}
def visit_comparison(self, comparison: Comparison) -> Dict:
# ElasticsearchStore filters require to target
# the metadata object field
field = f"metadata.{comparison.attribute}"
is_range_comparator = comparison.comparator in [
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
if is_range_comparator:
return {
"range": {
field: {self._format_func(comparison.comparator): comparison.value}
}
}
if comparison.comparator == Comparator.LIKE:
return {
self._format_func(comparison.comparator): {
field: {"value": comparison.value, "fuzziness": "AUTO"}
}
}
if comparison.comparator == Comparator.CONTAIN:
return {self._format_func(comparison.comparator): {field: comparison.value}}
# we assume that if the value is a string,
# we want to use the keyword field
field = f"{field}.keyword" if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}}
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": [structured_query.filter.accept(self)]}
return structured_query.query, kwargs
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~zilliz.py | import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.embeddings.base import Embeddings
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
class ZillizRetriever(BaseRetriever):
"""Retriever for the Zilliz API."""
embedding_function: Embeddings
"""The underlying embedding function from which documents will be retrieved."""
collection_name: str = "LangChainCollection"
"""The name of the collection in Zilliz."""
connection_args: Optional[Dict[str, Any]] = None
"""The connection arguments for the Zilliz client."""
consistency_level: str = "Session"
"""The consistency level for the Zilliz client."""
search_params: Optional[dict] = None
"""The search parameters for the Zilliz client."""
store: Zilliz
"""The underlying Zilliz store."""
retriever: BaseRetriever
"""The underlying retriever."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
values["store"] = Zilliz(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~schema~runnable~passthrough.py | from __future__ import annotations
from typing import Any, AsyncIterator, Iterator, List, Optional
from langchain.load.serializable import Serializable
from langchain.schema.runnable.base import Input, Runnable
from langchain.schema.runnable.config import RunnableConfig
def identity(x: Input) -> Input:
return x
async def aidentity(x: Input) -> Input:
return x
class RunnablePassthrough(Serializable, Runnable[Input, Input]):
"""
A runnable that passes through the input.
"""
@property
def lc_serializable(self) -> bool:
return True
@property
def lc_namespace(self) -> List[str]:
return self.__class__.__module__.split(".")[:-1]
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Input:
return self._call_with_config(identity, input, config)
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None
) -> Input:
return await self._acall_with_config(aidentity, input, config)
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Input]:
return self._transform_stream_with_config(input, identity, config)
def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Input]:
return self._atransform_stream_with_config(input, identity, config)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~time_weighted_retriever.py | import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetime objects."""
return (time - ref_time).total_seconds() / 3600
class TimeWeightedVectorStoreRetriever(BaseRetriever):
"""Retriever that combines embedding similarity with
recency in retrieving values."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
document.metadata["last_accessed_at"],
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~document_compressors~cohere_rerank.py | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from langchain.callbacks.manager import Callbacks
from langchain.pydantic_v1 import Extra, root_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
class CohereRerank(BaseDocumentCompressor):
"""DocumentCompressor that uses Cohere's rerank API to compress documents."""
client: Client
"""Cohere client to use for compressing documents."""
top_n: int = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
raise NotImplementedError()
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~schema~output_parser.py | from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from typing import (
Any,
AsyncIterator,
Dict,
Generic,
Iterator,
List,
Optional,
TypeVar,
Union,
)
from langchain.load.serializable import Serializable
from langchain.schema.messages import BaseMessage
from langchain.schema.output import ChatGeneration, Generation
from langchain.schema.prompt import PromptValue
from langchain.schema.runnable import Runnable, RunnableConfig
T = TypeVar("T")
class BaseLLMOutputParser(Serializable, Generic[T], ABC):
"""Abstract base class for parsing the outputs of a model."""
@abstractmethod
def parse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
async def aparse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return await asyncio.get_running_loop().run_in_executor(
None, self.parse_result, result
)
class BaseGenerationOutputParser(
BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]
):
"""Base class to parse the output of an LLM call."""
def invoke(
self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
) -> T:
if isinstance(input, BaseMessage):
return self._call_with_config(
lambda inner_input: self.parse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return self._call_with_config(
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
async def ainvoke(
self, input: str | BaseMessage, config: RunnableConfig | None = None
) -> T:
if isinstance(input, BaseMessage):
return await self._acall_with_config(
lambda inner_input: self.aparse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return await self._acall_with_config(
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]):
"""Base class to parse the output of an LLM call.
Output parsers help structure language model responses.
Example:
.. code-block:: python
class BooleanOutputParser(BaseOutputParser[bool]):
true_val: str = "YES"
false_val: str = "NO"
def parse(self, text: str) -> bool:
cleaned_text = text.strip().upper()
if cleaned_text not in (self.true_val.upper(), self.false_val.upper()):
raise OutputParserException(
f"BooleanOutputParser expected output value to either be "
f"{self.true_val} or {self.false_val} (case-insensitive). "
f"Received {cleaned_text}."
)
return cleaned_text == self.true_val.upper()
@property
def _type(self) -> str:
return "boolean_output_parser"
""" # noqa: E501
def invoke(
self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
) -> T:
if isinstance(input, BaseMessage):
return self._call_with_config(
lambda inner_input: self.parse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return self._call_with_config(
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
async def ainvoke(
self, input: str | BaseMessage, config: RunnableConfig | None = None
) -> T:
if isinstance(input, BaseMessage):
return await self._acall_with_config(
lambda inner_input: self.aparse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return await self._acall_with_config(
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
def parse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return self.parse(result[0].text)
@abstractmethod
def parse(self, text: str) -> T:
"""Parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
async def aparse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return await self.aparse(result[0].text)
async def aparse(self, text: str) -> T:
"""Parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
return await asyncio.get_running_loop().run_in_executor(None, self.parse, text)
# TODO: rename 'completion' -> 'text'.
def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any:
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input PromptValue.
Returns:
Structured output
"""
return self.parse(completion)
def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
raise NotImplementedError(
f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of output parser."""
output_parser_dict = super().dict(**kwargs)
output_parser_dict["_type"] = self._type
return output_parser_dict
class BaseTransformOutputParser(BaseOutputParser[T]):
"""Base class for an output parser that can handle streaming input."""
def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[T]:
for chunk in input:
if isinstance(chunk, BaseMessage):
yield self.parse_result([ChatGeneration(message=chunk)])
else:
yield self.parse_result([Generation(text=chunk)])
async def _atransform(
self, input: AsyncIterator[Union[str, BaseMessage]]
) -> AsyncIterator[T]:
async for chunk in input:
if isinstance(chunk, BaseMessage):
yield self.parse_result([ChatGeneration(message=chunk)])
else:
yield self.parse_result([Generation(text=chunk)])
def transform(
self,
input: Iterator[Union[str, BaseMessage]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[T]:
yield from self._transform_stream_with_config(
input, self._transform, config, run_type="parser"
)
async def atransform(
self,
input: AsyncIterator[Union[str, BaseMessage]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[T]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, run_type="parser"
):
yield chunk
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@property
def lc_serializable(self) -> bool:
"""Whether the class LangChain serializable."""
return True
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
# TODO: Deprecate
NoOpOutputParser = StrOutputParser
class OutputParserException(ValueError):
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a
model to try and remediate the issue.
llm_output: String model output which is error-ing.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an OutputParserException has been raised. This gives the underlying
model driving the agent the context that the previous output was improperly
structured, in the hopes that it will update the output to the correct
format.
"""
def __init__(
self,
error: Any,
observation: Optional[str] = None,
llm_output: Optional[str] = None,
send_to_llm: bool = False,
):
super(OutputParserException, self).__init__(error)
if send_to_llm:
if observation is None or llm_output is None:
raise ValueError(
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~ensemble.py | """
Ensemble retriever that ensemble the results of
multiple retrievers by using weighted Reciprocal Rank Fusion
"""
from typing import Any, Dict, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
class EnsembleRetriever(BaseRetriever):
"""
This class ensemble the results of multiple retrievers by using rank fusion.
Args:
retrievers: A list of retrievers to ensemble.
weights: A list of weights corresponding to the retrievers. Defaults to equal
weighting for all retrievers.
c: A constant added to the rank, controlling the balance between the importance
of high-ranked items and the consideration given to lower-ranked items.
Default is 60.
"""
retrievers: List[BaseRetriever]
weights: List[float]
c: int = 60
@root_validator(pre=True)
def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if not values.get("weights"):
n_retrievers = len(values["retrievers"])
values["weights"] = [1 / n_retrievers] * n_retrievers
return values
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
fused_documents = self.rank_fusion(query, run_manager)
return fused_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
fused_documents = await self.arank_fusion(query, run_manager)
return fused_documents
def rank_fusion(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.get_relevant_documents(
query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}")
)
for i, retriever in enumerate(self.retrievers)
]
# apply rank fusion
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
async def arank_fusion(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""
Asynchronously retrieve the results of the retrievers
and use rank_fusion_func to get the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = [
await retriever.aget_relevant_documents(
query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}")
)
for i, retriever in enumerate(self.retrievers)
]
# apply rank fusion
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
def weighted_reciprocal_rank(
self, doc_lists: List[List[Document]]
) -> List[Document]:
"""
Perform weighted Reciprocal Rank Fusion on multiple rank lists.
You can find more details about RRF here:
https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf
Args:
doc_lists: A list of rank lists, where each rank list contains unique items.
Returns:
list: The final aggregated list of items sorted by their weighted RRF
scores in descending order.
"""
if len(doc_lists) != len(self.weights):
raise ValueError(
"Number of rank lists must be equal to the number of weights."
)
# Create a union of all unique documents in the input doc_lists
all_documents = set()
for doc_list in doc_lists:
for doc in doc_list:
all_documents.add(doc.page_content)
# Initialize the RRF score dictionary for each document
rrf_score_dic = {doc: 0.0 for doc in all_documents}
# Calculate RRF scores for each document
for doc_list, weight in zip(doc_lists, self.weights):
for rank, doc in enumerate(doc_list, start=1):
rrf_score = weight * (1 / (rank + self.c))
rrf_score_dic[doc.page_content] += rrf_score
# Sort documents by their RRF scores in descending order
sorted_documents = sorted(
rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True
)
# Map the sorted page_content back to the original document objects
page_content_to_doc_map = {
doc.page_content: doc for doc_list in doc_lists for doc in doc_list
}
sorted_docs = [
page_content_to_doc_map[page_content] for page_content in sorted_documents
]
return sorted_docs
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~google_cloud_enterprise_search.py | """Retriever wrapper for Google Cloud Enterprise Search on Gen App Builder."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from google.cloud.discoveryengine_v1beta import (
SearchRequest,
SearchResult,
SearchServiceClient,
)
class GoogleCloudEnterpriseSearchRetriever(BaseRetriever):
"""Retriever for the Google Cloud Enterprise Search Service API.
For the detailed explanation of the Enterprise Search concepts
and configuration parameters refer to the product documentation.
https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction
"""
project_id: str
"""Google Cloud Project ID."""
search_engine_id: str
"""Enterprise Search engine ID."""
serving_config_id: str = "default_config"
"""Enterprise Search serving config ID."""
location_id: str = "global"
"""Enterprise Search engine location."""
filter: Optional[str] = None
"""Filter expression."""
get_extractive_answers: bool = False
"""If True return Extractive Answers, otherwise return Extractive Segments."""
max_documents: int = Field(default=5, ge=1, le=100)
"""The maximum number of documents to return."""
max_extractive_answer_count: int = Field(default=1, ge=1, le=5)
"""The maximum number of extractive answers returned in each search result.
At most 5 answers will be returned for each SearchResult.
"""
max_extractive_segment_count: int = Field(default=1, ge=1, le=1)
"""The maximum number of extractive segments returned in each search result.
Currently one segment will be returned for each SearchResult.
"""
query_expansion_condition: int = Field(default=1, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified query expansion condition. In this case, server behavior defaults
to disabled
1 - Disabled query expansion. Only the exact search query is used, even if
SearchResponse.total_size is zero.
2 - Automatic query expansion built by the Search API.
"""
spell_correction_mode: int = Field(default=2, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified spell correction mode. In this case, server behavior defaults
to auto.
1 - Suggestion only. Search API will try to find a spell suggestion if there is any
and put in the `SearchResponse.corrected_query`.
The spell suggestion will not be used as the search query.
2 - Automatic spell correction built by the Search API.
Search will be based on the corrected query if found.
"""
credentials: Any = None
"""The default custom credentials (google.auth.credentials.Credentials) to use
when making API calls. If not provided, credentials will be ascertained from
the environment."""
_client: SearchServiceClient
_serving_config: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
underscore_attrs_are_private = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validates the environment."""
try:
from google.cloud import discoveryengine_v1beta # noqa: F401
except ImportError as exc:
raise ImportError(
"google.cloud.discoveryengine is not installed. "
"Please install it with pip install google-cloud-discoveryengine"
) from exc
values["project_id"] = get_from_dict_or_env(values, "project_id", "PROJECT_ID")
values["search_engine_id"] = get_from_dict_or_env(
values, "search_engine_id", "SEARCH_ENGINE_ID"
)
return values
def __init__(self, **data: Any) -> None:
"""Initializes private fields."""
from google.cloud.discoveryengine_v1beta import SearchServiceClient
super().__init__(**data)
self._client = SearchServiceClient(credentials=self.credentials)
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.search_engine_id,
serving_config=self.serving_config_id,
)
def _convert_search_response(
self, results: Sequence[SearchResult]
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data", None)
if not derived_struct_data:
continue
doc_metadata = document_dict.get("struct_data", {})
doc_metadata["id"] = document_dict["id"]
chunk_type = (
"extractive_answers"
if self.get_extractive_answers
else "extractive_segments"
)
for chunk in getattr(derived_struct_data, chunk_type, []):
doc_metadata["source"] = derived_struct_data.get("link", "")
if chunk_type == "extractive_answers":
doc_metadata["source"] += f":{chunk.get('pageNumber', '')}"
documents.append(
Document(
page_content=chunk.get("content", ""), metadata=doc_metadata
)
)
return documents
def _create_search_request(self, query: str) -> SearchRequest:
"""Prepares a SearchRequest object."""
from google.cloud.discoveryengine_v1beta import SearchRequest
query_expansion_spec = SearchRequest.QueryExpansionSpec(
condition=self.query_expansion_condition,
)
spell_correction_spec = SearchRequest.SpellCorrectionSpec(
mode=self.spell_correction_mode
)
if self.get_extractive_answers:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
)
)
else:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_segment_count=self.max_extractive_segment_count,
)
)
content_search_spec = SearchRequest.ContentSearchSpec(
extractive_content_spec=extractive_content_spec,
)
return SearchRequest(
query=query,
filter=self.filter,
serving_config=self._serving_config,
page_size=self.max_documents,
content_search_spec=content_search_spec,
query_expansion_spec=query_expansion_spec,
spell_correction_spec=spell_correction_spec,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
search_request = self._create_search_request(query)
response = self._client.search(search_request)
documents = self._convert_search_response(response.results)
return documents
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~kendra.py | import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Union
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseModel, Extra, root_validator, validator
from langchain.schema import BaseRetriever
def clean_excerpt(excerpt: str) -> str:
"""Cleans an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt:
return excerpt
res = re.sub("\s+", " ", excerpt).replace("...", "")
return res
def combined_text(item: "ResultItem") -> str:
"""Combines a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
"""
text = ""
title = item.get_title()
if title:
text += f"Document Title: {title}\n"
excerpt = clean_excerpt(item.get_excerpt())
if excerpt:
text += f"Document Excerpt: \n{excerpt}\n"
return text
DocumentAttributeValueType = Union[str, int, List[str], None]
"""Possible types of a DocumentAttributeValue. Dates are also represented as str."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Highlight(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""
Represents the information that can be
used to highlight key words in the excerpt.
"""
BeginOffset: int
"""The zero-based location in the excerpt where the highlight starts."""
EndOffset: int
"""The zero-based location in the excerpt where the highlight ends."""
TopAnswer: Optional[bool]
"""Indicates whether the result is the best one."""
Type: Optional[str]
"""The highlight type: STANDARD or THESAURUS_SYNONYM."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class TextWithHighLights(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""Text with highlights."""
Text: str
"""The text."""
Highlights: Optional[Any]
"""The highlights."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttributeValue( # type: ignore[call-arg]
BaseModel, extra=Extra.allow
):
"""The value of an additional result attribute."""
TextWithHighlightsValue: TextWithHighLights
"""The text with highlights value."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""An additional result attribute."""
Key: str
"""The key of the attribute."""
ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"]
"""The type of the value."""
Value: AdditionalResultAttributeValue
"""The value of the attribute."""
def get_value_text(self) -> str:
return self.Value.TextWithHighlightsValue.Text
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttributeValue(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""The value of a document attribute."""
DateValue: Optional[str]
"""The date expressed as an ISO 8601 string."""
LongValue: Optional[int]
"""The long value."""
StringListValue: Optional[List[str]]
"""The string list value."""
StringValue: Optional[str]
"""The string value."""
@property
def value(self) -> DocumentAttributeValueType:
"""The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
"""
if self.DateValue:
return self.DateValue
if self.LongValue:
return self.LongValue
if self.StringListValue:
return self.StringListValue
if self.StringValue:
return self.StringValue
return None
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttribute(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""A document attribute."""
Key: str
"""The key of the attribute."""
Value: DocumentAttributeValue
"""The value of the attribute."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class ResultItem(BaseModel, ABC, extra=Extra.allow): # type: ignore[call-arg]
"""Abstract class that represents a result item."""
Id: Optional[str]
"""The ID of the relevant result item."""
DocumentId: Optional[str]
"""The document ID."""
DocumentURI: Optional[str]
"""The document URI."""
DocumentAttributes: Optional[List[DocumentAttribute]] = []
"""The document attributes."""
@abstractmethod
def get_title(self) -> str:
"""Document title."""
@abstractmethod
def get_excerpt(self) -> str:
"""Document excerpt or passage original content as retrieved by Kendra."""
def get_additional_metadata(self) -> dict:
"""Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
"""
return {}
def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]:
"""Document attributes dict."""
return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])}
def to_doc(
self, page_content_formatter: Callable[["ResultItem"], str] = combined_text
) -> Document:
"""Converts this item to a Document."""
page_content = page_content_formatter(self)
metadata = self.get_additional_metadata()
metadata.update(
{
"result_id": self.Id,
"document_id": self.DocumentId,
"source": self.DocumentURI,
"title": self.get_title(),
"excerpt": self.get_excerpt(),
"document_attributes": self.get_document_attributes_dict(),
}
)
return Document(page_content=page_content, metadata=metadata)
class QueryResultItem(ResultItem):
"""A Query API result item."""
DocumentTitle: TextWithHighLights
"""The document title."""
FeedbackToken: Optional[str]
"""Identifies a particular result from a particular query."""
Format: Optional[str]
"""
If the Type is ANSWER, then format is either:
* TABLE: a table excerpt is returned in TableExcerpt;
* TEXT: a text excerpt is returned in DocumentExcerpt.
"""
Type: Optional[str]
"""Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER"""
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
"""One or more additional attributes associated with the result."""
DocumentExcerpt: Optional[TextWithHighLights]
"""Excerpt of the document text."""
def get_title(self) -> str:
return self.DocumentTitle.Text
def get_attribute_value(self) -> str:
if not self.AdditionalAttributes:
return ""
if not self.AdditionalAttributes[0]:
return ""
else:
return self.AdditionalAttributes[0].get_value_text()
def get_excerpt(self) -> str:
if (
self.AdditionalAttributes
and self.AdditionalAttributes[0].Key == "AnswerText"
):
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ""
return excerpt
def get_additional_metadata(self) -> dict:
additional_metadata = {"type": self.Type}
return additional_metadata
class RetrieveResultItem(ResultItem):
"""A Retrieve API result item."""
DocumentTitle: Optional[str]
"""The document title."""
Content: Optional[str]
"""The content of the item."""
def get_title(self) -> str:
return self.DocumentTitle or ""
def get_excerpt(self) -> str:
return self.Content or ""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class QueryResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""
Represents an Amazon Kendra Query API search result, which is composed of:
* Relevant suggested answers: either a text excerpt or table excerpt.
* Matching FAQs or questions-answer from your FAQ file.
* Documents including an excerpt of each document with the its title.
"""
ResultItems: List[QueryResultItem]
"""The result items."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class RetrieveResult(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""
Represents an Amazon Kendra Retrieve API search result, which is composed of:
* relevant passages or text excerpts given an input query.
"""
QueryId: str
"""The ID of the query."""
ResultItems: List[RetrieveResultItem]
"""The result items."""
class AmazonKendraRetriever(BaseRetriever):
"""Retriever for the Amazon Kendra Index.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
top_k: No of results to return
attribute_filter: Additional filtering of results based on metadata
See: https://docs.aws.amazon.com/kendra/latest/APIReference
page_content_formatter: generates the Document page_content
allowing access to all result item attributes. By default, it uses
the item's title and excerpt.
client: boto3 client for Kendra
user_context: Provides information about the user context
See: https://docs.aws.amazon.com/kendra/latest/APIReference
Example:
.. code-block:: python
retriever = AmazonKendraRetriever(
index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03"
)
"""
index_id: str
region_name: Optional[str] = None
credentials_profile_name: Optional[str] = None
top_k: int = 3
attribute_filter: Optional[Dict] = None
page_content_formatter: Callable[[ResultItem], str] = combined_text
client: Any
user_context: Optional[Dict] = None
@validator("top_k")
def validate_top_k(cls, value: int) -> int:
if value < 0:
raise ValueError(f"top_k ({value}) cannot be negative.")
return value
@root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("client") is not None:
return values
try:
import boto3
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
values["client"] = session.client("kendra", **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _kendra_query(self, query: str) -> Sequence[ResultItem]:
kendra_kwargs = {
"IndexId": self.index_id,
"QueryText": query.strip(),
"PageSize": self.top_k,
}
if self.attribute_filter is not None:
kendra_kwargs["AttributeFilter"] = self.attribute_filter
if self.user_context is not None:
kendra_kwargs["UserContext"] = self.user_context
response = self.client.retrieve(**kendra_kwargs)
r_result = RetrieveResult.parse_obj(response)
if r_result.ResultItems:
return r_result.ResultItems
# Retrieve API returned 0 results, fall back to Query API
response = self.client.query(**kendra_kwargs)
q_result = QueryResult.parse_obj(response)
return q_result.ResultItems
def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]:
top_docs = [
item.to_doc(self.page_content_formatter)
for item in result_items[: self.top_k]
]
return top_docs
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Run search on Kendra index and get top k documents
Example:
.. code-block:: python
docs = retriever.get_relevant_documents('This is my query')
"""
result_items = self._kendra_query(query)
top_k_docs = self._get_top_k_docs(result_items)
return top_k_docs
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~pinecone_hybrid_search.py | """Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.embeddings.base import Embeddings
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BaseRetriever, Document
def hash_text(text: str) -> str:
"""Hash a text using SHA256.
Args:
text: Text to hash.
Returns:
Hashed text.
"""
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
"""
Create a Pinecone index from a list of contexts.
Modifies the index argument in-place.
Args:
contexts: List of contexts to embed.
index: Pinecone index to use.
embeddings: Embeddings model to use.
sparse_encoder: Sparse encoder to use.
ids: List of ids to use for the documents.
metadatas: List of metadata to use for the documents.
"""
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors)
class PineconeHybridSearchRetriever(BaseRetriever):
"""Pinecone Hybrid Search Retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
"""description"""
sparse_encoder: Any
"""Sparse encoder to use."""
index: Any
"""Pinecone index to use."""
top_k: int = 4
"""Number of documents to return."""
alpha: float = 0.5
"""Alpha value for hybrid search."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ValueError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~weaviate_hybrid_search.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, cast
from uuid import uuid4
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever
class WeaviateHybridSearchRetriever(BaseRetriever):
"""Retriever for the Weaviate's hybrid search."""
client: Any
"""keyword arguments to pass to the Weaviate client."""
index_name: str
"""The name of the index to use."""
text_key: str
"""The name of the text key to use."""
alpha: float = 0.5
"""The weight of the text key in the hybrid search."""
k: int = 4
"""The number of results to return."""
attributes: List[str]
"""The attributes to return in the results."""
create_schema_if_missing: bool = True
"""Whether to create the schema if it doesn't exist."""
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(values["client"], weaviate.Client):
client = values["client"]
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
if values.get("attributes") is None:
values["attributes"] = []
cast(List, values["attributes"]).append(values["text_key"])
if values.get("create_schema_if_missing", True):
class_obj = {
"class": values["index_name"],
"properties": [{"name": values["text_key"], "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not values["client"].schema.exists(values["index_name"]):
values["client"].schema.create_class(class_obj)
return values
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# added text_key
def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
where_filter: Optional[Dict[str, object]] = None,
score: bool = False,
) -> List[Document]:
"""Look up similar documents in Weaviate."""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(["score", "explainScore"])
result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~metal.py | from typing import Any, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
class MetalRetriever(BaseRetriever):
"""Retriever that uses the Metal API."""
client: Any
"""The Metal client to use."""
params: Optional[dict] = None
"""The parameters to pass to the Metal client."""
@root_validator(pre=True)
def validate_client(cls, values: dict) -> dict:
"""Validate that the client is of the correct type."""
from metal_sdk.metal import Metal
if "client" in values:
client = values["client"]
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
values["params"] = values.get("params", {})
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~wikipedia.py | from typing import List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from langchain.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""Retriever for Wikipedia API.
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load(query=query)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~self_query~qdrant.py | from __future__ import annotations
from typing import TYPE_CHECKING, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
if TYPE_CHECKING:
from qdrant_client.http import models as rest
class QdrantTranslator(Visitor):
"""Translate the internal query language elements to valid filters."""
allowed_comparators = (
Comparator.EQ,
Comparator.LT,
Comparator.LTE,
Comparator.GT,
Comparator.GTE,
)
"""Subset of allowed logical comparators."""
def __init__(self, metadata_key: str):
self.metadata_key = metadata_key
def visit_operation(self, operation: Operation) -> rest.Filter:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
args = [arg.accept(self) for arg in operation.arguments]
operator = {
Operator.AND: "must",
Operator.OR: "should",
Operator.NOT: "must_not",
}[operation.operator]
return rest.Filter(**{operator: args})
def visit_comparison(self, comparison: Comparison) -> rest.FieldCondition:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
self._validate_func(comparison.comparator)
attribute = self.metadata_key + "." + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(
key=attribute, match=rest.MatchValue(value=comparison.value)
)
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
"Cannot import qdrant_client. Please install with `pip install "
"qdrant-client`."
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {"filter": filter}
return structured_query.query, kwargs
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~multi_query.py | import logging
from typing import List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseRetriever, Document
logger = logging.getLogger(__name__)
class LineList(BaseModel):
"""List of lines."""
lines: List[str] = Field(description="Lines of text")
"""List of lines."""
class LineListOutputParser(PydanticOutputParser):
"""Output parser for a list of lines."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = text.strip().split("\n")
return LineList(lines=lines)
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative
questions separated by newlines. Original question: {question}""",
)
class MultiQueryRetriever(BaseRetriever):
"""Given a user query, use an LLM to write a set of queries.
Retrieve docs for each query. Rake the unique union of all retrieved docs."""
retriever: BaseRetriever
llm_chain: LLMChain
verbose: bool = True
parser_key: str = "lines"
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
parser_key: str = "lines",
) -> "MultiQueryRetriever":
"""Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
return cls(
retriever=retriever,
llm_chain=llm_chain,
parser_key=parser_key,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get relevated documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(query, run_manager)
documents = self.retrieve_documents(queries, run_manager)
unique_documents = self.unique_union(documents)
return unique_documents
def generate_queries(
self, question: str, run_manager: CallbackManagerForRetrieverRun
) -> List[str]:
"""Generate queries based upon user input.
Args:
question: user query
Returns:
List of LLM generated queries that are similar to the user input
"""
response = self.llm_chain(
{"question": question}, callbacks=run_manager.get_child()
)
lines = getattr(response["text"], self.parser_key, [])
if self.verbose:
logger.info(f"Generated queries: {lines}")
return lines
def retrieve_documents(
self, queries: List[str], run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
Returns:
List of retrieved Documents
"""
documents = []
for query in queries:
docs = self.retriever.get_relevant_documents(
query, callbacks=run_manager.get_child()
)
documents.extend(docs)
return documents
def unique_union(self, documents: List[Document]) -> List[Document]:
"""Get unique Documents.
Args:
documents: List of retrieved Documents
Returns:
List of unique retrieved Documents
"""
# Create a dictionary with page_content as keys to remove duplicates
# TODO: Add Document ID property (e.g., UUID)
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc
for doc in documents
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
| [
"question",
"You are an AI language model assistant. Your task is \n to generate 3 different versions of the given user \n question to retrieve relevant documents from a vector database. \n By generating multiple perspectives on the user question, \n your goal is to help the user overcome some of the limitations \n of distance-based similarity search. Provide these alternative \n questions separated by newlines. Original question: {question}"
] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~milvus.py | """Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.embeddings.base import Embeddings
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
class MilvusRetriever(BaseRetriever):
"""Retriever that uses the Milvus API."""
embedding_function: Embeddings
collection_name: str = "LangChainCollection"
connection_args: Optional[Dict[str, Any]] = None
consistency_level: str = "Session"
search_params: Optional[dict] = None
store: Milvus
retriever: BaseRetriever
@root_validator(pre=True)
def create_retriever(cls, values: Dict) -> Dict:
"""Create the Milvus store and retriever."""
values["store"] = Milvus(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs)
| [] |
2024-01-10 | leonxia1018/langchain | libs~langchain~langchain~retrievers~parent_document_retriever.py | import uuid
from typing import List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema.document import Document
from langchain.schema.retriever import BaseRetriever
from langchain.schema.storage import BaseStore
from langchain.text_splitter import TextSplitter
from langchain.vectorstores.base import VectorStore
class ParentDocumentRetriever(BaseRetriever):
"""Fetches small chunks, then fetches their parent documents.
When splitting documents for retrieval, there are often conflicting desires:
1. You may want to have small documents, so that their embeddings can most
accurately reflect their meaning. If too long, then the embeddings can
lose meaning.
2. You want to have long enough documents that the context of each chunk is
retained.
The ParentDocumentRetriever strikes that balance by splitting and storing
small chunks of data. During retrieval, it first fetches the small chunks
but then looks up the parent ids for those chunks and returns those larger
documents.
Note that "parent document" refers to the document that a small chunk
originated from. This can either be the whole raw document OR a larger
chunk.
Examples:
.. code-block:: python
# Imports
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
# Initialize the retriever
retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
"""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
docstore: BaseStore[str, Document]
"""The storage layer for the parent documents"""
child_splitter: TextSplitter
"""The text splitter to use to create child documents."""
id_key: str = "doc_id"
"""The key to use to track the parent id. This will be stored in the
metadata of child documents."""
parent_splitter: Optional[TextSplitter] = None
"""The text splitter to use to create parent documents.
If none, then the parent documents will be the raw documents passed in."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
def add_documents(
self,
documents: List[Document],
ids: Optional[List[str]],
add_to_docstore: bool = True,
) -> None:
"""Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
if self.parent_splitter is not None:
documents = self.parent_splitter.split_documents(documents)
if ids is None:
doc_ids = [str(uuid.uuid4()) for _ in documents]
if not add_to_docstore:
raise ValueError(
"If ids are not passed in, `add_to_docstore` MUST be True"
)
else:
if len(documents) != len(ids):
raise ValueError(
"Got uneven list of documents and ids. "
"If `ids` is provided, should be same length as `documents`."
)
doc_ids = ids
docs = []
full_docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = self.child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[self.id_key] = _id
docs.extend(sub_docs)
full_docs.append((_id, doc))
self.vectorstore.add_documents(docs)
if add_to_docstore:
self.docstore.mset(full_docs)
| [] |
2024-01-10 | Sayvai-io/custom-tools | src~sayvai_tools~utils~database~dbsheetsbase.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional, Sequence
import sqlalchemy
import pandas as pd
from langchain.utils import get_from_env
from sayvai_tools.utils.google.sheets import GSheets
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabaseSheetsBase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
sheets : GSheets,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
self.gs = sheets
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@classmethod
def from_cnosdb(
cls,
url: str = "127.0.0.1:8902",
user: str = "root",
password: str = "",
tenant: str = "cnosdb",
database: str = "public",
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a CnosDB connection.
This method requires the 'cnos-connector' package. If not installed, it
can be added using `pip install cnos-connector`.
Args:
url (str): The HTTP connection host name and port number of the CnosDB
service, excluding "http://" or "https://", with a default value
of "127.0.0.1:8902".
user (str): The username used to connect to the CnosDB service, with a
default value of "root".
password (str): The password of the user connecting to the CnosDB service,
with a default value of "".
tenant (str): The name of the tenant used to connect to the CnosDB service,
with a default value of "cnosdb".
database (str): The name of the database in the CnosDB tenant.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
CnosDB connection details.
"""
try:
from cnosdb_connector import make_cnosdb_langchain_uri
uri = make_cnosdb_langchain_uri(url, user, password, tenant, database)
return cls.from_uri(database_uri=uri)
except ImportError:
raise ValueError(
"cnos-connector package not found, please install with"
" `pip install cnos-connector`"
)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _execute(self, command: str, fetch: Optional[str] = "all") -> str:
"""
Executes SQL command through the underlying engine.
If the statement returns no rows, an empty list is returned.
"""
# commands = command.strip().split(";")
# commands = [
# c.strip()
# for c in commands
# if c.strip() and not c.strip().startswith(("Response:", "Answer:"))
# ]
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
f"ALTER SESSION SET search_path='{self._schema}'"
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'")
elif self.dialect == "mssql":
pass
else: # postgresql and compatible dialects
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
df = pd.read_sql_query(text(command), self._engine)
data_dict = df.to_dict("split")
columns = data_dict["columns"]
result = [columns] # Start with the header
result.extend(data_dict["data"]) # Add the data
self.gs.create_sheet()
self.gs.update_values(result)
return "Data has been exported to Google Sheets"
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
result = self._execute(command, fetch)
# Convert columns values to string to avoid issues with sqlalchemy
# truncating text
if not result:
return "please recheck the query"
return str(result)
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
| [] |
2024-01-10 | Sayvai-io/custom-tools | src~sayvai_tools~utils~database~dbbase.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Iterable, List, Optional, Sequence
import sqlalchemy
from langchain.utils import get_from_env
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@classmethod
def from_cnosdb(
cls,
url: str = "127.0.0.1:8902",
user: str = "root",
password: str = "",
tenant: str = "cnosdb",
database: str = "public",
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a CnosDB connection.
This method requires the 'cnos-connector' package. If not installed, it
can be added using `pip install cnos-connector`.
Args:
url (str): The HTTP connection host name and port number of the CnosDB
service, excluding "http://" or "https://", with a default value
of "127.0.0.1:8902".
user (str): The username used to connect to the CnosDB service, with a
default value of "root".
password (str): The password of the user connecting to the CnosDB service,
with a default value of "".
tenant (str): The name of the tenant used to connect to the CnosDB service,
with a default value of "cnosdb".
database (str): The name of the database in the CnosDB tenant.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
CnosDB connection details.
"""
try:
from cnosdb_connector import make_cnosdb_langchain_uri
uri = make_cnosdb_langchain_uri(url, user, password, tenant, database)
return cls.from_uri(database_uri=uri)
except ImportError:
raise ValueError(
"cnos-connector package not found, please install with"
" `pip install cnos-connector`"
)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _execute(self, command: str, fetch: Optional[str] = "all") -> Sequence:
"""
Executes SQL command through the underlying engine.
If the statement returns no rows, an empty list is returned.
"""
commands = command.strip().split(";")
commands = [
c.strip()
for c in commands
if c.strip() and not c.strip().startswith(("Response:", "Answer:"))
]
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
f"ALTER SESSION SET search_path='{self._schema}'"
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'")
elif self.dialect == "mssql":
pass
else: # postgresql and compatible dialects
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
results = []
for single_command in commands:
cursor = connection.execute(text(single_command))
if cursor.returns_rows:
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
result = cursor.fetchone() # type: ignore
else:
raise ValueError(
"Fetch parameter must be either 'one' or 'all'"
)
results.append(result)
else:
results.append(
[]
) # Append an empty list when cursor returns no rows
connection.commit()
return results
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
result = self._execute(command, fetch)
# Convert columns values to string to avoid issues with sqlalchemy
# truncating text
if not result:
return ""
elif isinstance(result, list):
res: Sequence = [
tuple(truncate_word(c, length=self._max_string_length) for c in r)
for r in result
]
else:
res = tuple(
truncate_word(c, length=self._max_string_length) for c in result
)
return str(res)
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
| [] |
2024-01-10 | Sayvai-io/custom-tools | src~sayvai_tools~tools~conversational_human~iohuman.py | """Conversational Human """
import os
from typing import Callable, Optional
from elevenlabs import play
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import Field
from sayvai_tools.utils.voice.stt import STT
from sayvai_tools.utils.voice.tts import ElevenlabsAudioStreaming
class ConversationalHuman:
"""Tool that asks user for input."""
name: str = "human"
description: str = (
"You can ask a human for guidance when you think you "
"got stuck or you are not sure what to do next. "
"The input should be a question for the human."
)
# prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func)
# input_func: Callable = Field(default_factory=lambda: input)
def __init__(self, api_key: str, g_api_key: str, phrase_set_path: str) -> None:
self.stt = STT(audio_format="mp3", speech_context_path=phrase_set_path)
self.tts = ElevenlabsAudioStreaming(api_key=api_key)
self.g_api_key = g_api_key
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.g_api_key
inputbytes = self.tts.audio_streaming(
query,
model="eleven_multilingual_v1",
voice="Adam",
audio_streaming=True,
stability=0.5,
similarity=0.5,
# api_key= self.api_key
)
play(inputbytes)
# self.prompt_func(query)
# return self.input_func()
return self.stt.generate_text()
| [] |
2024-01-10 | Sayvai-io/custom-tools | src~sayvai_tools~utils~database~dbchain.py | """Chain for interacting with SQL Database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
from sayvai_tools.utils.database.dbbase import SQLDatabase
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain_experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
database: SQLDatabase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""[Deprecated] Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_sql: bool = False
"""Will return sql-command directly without executing it"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
use_query_checker: bool = False
"""Whether or not the query checker tool should be used to attempt
to fix the initial SQL from the LLM."""
query_checker_prompt: Optional[BasePromptTemplate] = None
"""The prompt template that should be used by the query checker"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an SQLDatabaseChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
database = values["database"]
prompt = values.get("prompt") or SQL_PROMPTS.get(
database.dialect, PROMPT
)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
sql_cmd = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
if self.return_sql:
return {self.output_key: sql_cmd}
if not self.use_query_checker:
_run_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
sql_cmd
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec
result = self.database.run(sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm, prompt=query_checker_prompt
)
query_checker_inputs = {
"query": sql_cmd,
"dialect": self.database.dialect,
}
checked_sql_command: str = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
).strip()
intermediate_steps.append(
checked_sql_command
) # output: sql generation (checker)
_run_manager.on_text(
checked_sql_command, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_sql_command}
) # input: sql exec
result = self.database.run(checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "sql_database_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabase,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> SQLDatabaseChain:
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, **kwargs)
class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
decider_chain: LLMChain
sql_chain: SQLDatabaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(
llm, database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [
name
for name in table_names_from_chain
if name.lower() in _lowercased_table_names
]
_run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(
new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True
)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"
| [
"None"
] |
2024-01-10 | Sayvai-io/custom-tools | src~sayvai_tools~utils~database~sheetschain.py | """Chain for interacting with SQL Database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
from sayvai_tools.utils.database.dbsheetsbase import SQLDatabaseSheetsBase
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
class SheetsDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain_experimental.sql import SQLDatabaseSheetsBaseChain
from langchain import OpenAI, SQLDatabaseSheetsBase
db = SQLDatabaseSheetsBase(...)
db_chain = SQLDatabaseSheetsBaseChain.from_llm(OpenAI(), db)
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
database: SQLDatabaseSheetsBase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""[Deprecated] Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_sql: bool = False
"""Will return sql-command directly without executing it"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
use_query_checker: bool = False
"""Whether or not the query checker tool should be used to attempt
to fix the initial SQL from the LLM."""
query_checker_prompt: Optional[BasePromptTemplate] = None
"""The prompt template that should be used by the query checker"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an SQLDatabaseSheetsBaseChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
database = values["database"]
prompt = values.get("prompt") or SQL_PROMPTS.get(
database.dialect, PROMPT
)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
sql_cmd = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
if self.return_sql:
return {self.output_key: sql_cmd}
if not self.use_query_checker:
_run_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
sql_cmd
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec
result = self.database.run(sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm, prompt=query_checker_prompt
)
query_checker_inputs = {
"query": sql_cmd,
"dialect": self.database.dialect,
}
checked_sql_command: str = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
).strip()
intermediate_steps.append(
checked_sql_command
) # output: sql generation (checker)
_run_manager.on_text(
checked_sql_command, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_sql_command}
) # input: sql exec
result = self.database.run(checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "sql_database_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabaseSheetsBase,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> SQLDatabaseSheetsBaseChain:
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, **kwargs)
class SQLDatabaseSheetsBaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
decider_chain: LLMChain
sql_chain: SQLDatabaseSheetsBaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabaseSheetsBase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSheetsBaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseSheetsBaseChain.from_llm(
llm, database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [
name
for name in table_names_from_chain
if name.lower() in _lowercased_table_names
]
_run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(
new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True
)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"
| [
"None"
] |
2024-01-10 | kewlamogh/training-poc | use_new_model.py | import json
import os
import openai
from sqlitedict import SqliteDict
db = SqliteDict('./my_db.sqlite', autocommit=True)
json_file = open("fine_tunes.txt", 'rb').read()
data = json.loads(json_file)['data']
final_item = data[len(data) - 1]
name = final_item['fine_tuned_model']
if name == None:
exit("error: fine-tune job has failed.")
openai.api_key = os.environ["OPENAI_API_KEY"]
def make_problem(log):
prompt = f'remove the timestamp, server name, and any hexadecimal values. {log}'
response = openai.Completion.create(
model=name,
max_tokens=1024,
temperature=0,
stop='\n',
prompt=prompt
)
return response['choices'][0]['text']
def process_log(log):
problem = make_problem(log)
if problem in db:
return db[problem]
else:
response = openai.Completion.create(
model=name,
max_tokens=1024,
temperature=0.5,
stop='\n',
prompt=f"Understand these logs and diagnose a problem and a solution: {log} ->"
)
analysis = response['choices'][0]['text']
db[problem] = analysis
return analysis
# Random comment
print(process_log(
'Apr 6 10:30:22 server1 kernel: [ 2601.567890] ata1.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x6'))
print(process_log(
'Apr 6 10:30:22 server1 kernel: [ 2601.567890] ata1.00: exception Emask 0x0 SAct 0x0 SErr 0x0 action 0x6'))
print(process_log('Apr 6 10:30:22 server1 main.exe: info: connected successfully to db3'))
print(process_log('Apr 6 10:50:32 server1 main.exe: too many requests: put on hold, ten requests timing out'))
| [
"remove the timestamp, server name, and any hexadecimal values. PLACEHOLDER",
"Understand these logs and diagnose a problem and a solution: PLACEHOLDER ->"
] |
2024-01-10 | pals-ttic/sjc | run_img_sampling.py | from pathlib import Path
import numpy as np
import torch
from misc import torch_samps_to_imgs
from adapt import Karras, ScoreAdapter, power_schedule
from adapt_gddpm import GuidedDDPM
from adapt_ncsn import NCSN as _NCSN
# from adapt_vesde import VESDE # not included to prevent import conflicts
from adapt_sd import StableDiffusion
from my.utils import tqdm, EventStorage, HeartBeat, EarlyLoopBreak
from my.config import BaseConf, dispatch
from my.utils.seed import seed_everything
class GDDPM(BaseConf):
"""Guided DDPM from OpenAI"""
model: str = "m_lsun_256"
lsun_cat: str = "bedroom"
imgnet_cat: int = -1
def make(self):
args = self.dict()
model = GuidedDDPM(**args)
return model
class SD(BaseConf):
"""Stable Diffusion"""
variant: str = "v1"
v2_highres: bool = False
prompt: str = "a photograph of an astronaut riding a horse"
scale: float = 3.0 # classifier free guidance scale
precision: str = 'autocast'
def make(self):
args = self.dict()
model = StableDiffusion(**args)
return model
class SDE(BaseConf):
def make(self):
args = self.dict()
model = VESDE(**args)
return model
class NCSN(BaseConf):
def make(self):
args = self.dict()
model = _NCSN(**args)
return model
class KarrasGen(BaseConf):
family: str = "gddpm"
gddpm: GDDPM = GDDPM()
sd: SD = SD()
# sde: SDE = SDE()
ncsn: NCSN = NCSN()
batch_size: int = 10
num_images: int = 1250
num_t: int = 40
σ_max: float = 80.0
heun: bool = True
langevin: bool = False
cls_scaling: float = 1.0 # classifier guidance scaling
def run(self):
args = self.dict()
family = args.pop("family")
model = getattr(self, family).make()
self.karras_generate(model, **args)
@staticmethod
def karras_generate(
model: ScoreAdapter,
batch_size, num_images, σ_max, num_t, langevin, heun, cls_scaling,
**kwargs
):
del kwargs # removed extra args
num_batches = num_images // batch_size
fuse = EarlyLoopBreak(5)
with tqdm(total=num_batches) as pbar, \
HeartBeat(pbar) as hbeat, \
EventStorage() as metric:
all_imgs = []
for _ in range(num_batches):
if fuse.on_break():
break
pipeline = Karras.inference(
model, batch_size, num_t,
init_xs=None, heun=heun, σ_max=σ_max,
langevin=langevin, cls_scaling=cls_scaling
)
for imgs in tqdm(pipeline, total=num_t+1, disable=False):
# _std = imgs.std().item()
# print(_std)
hbeat.beat()
pass
if isinstance(model, StableDiffusion):
imgs = model.decode(imgs)
imgs = torch_samps_to_imgs(imgs, uncenter=model.samps_centered())
all_imgs.append(imgs)
pbar.update()
all_imgs = np.concatenate(all_imgs, axis=0)
metric.put_artifact("imgs", ".npy", lambda fn: np.save(fn, all_imgs))
metric.step()
hbeat.done()
class SMLDGen(BaseConf):
family: str = "ncsn"
gddpm: GDDPM = GDDPM()
# sde: SDE = SDE()
ncsn: NCSN = NCSN()
batch_size: int = 16
num_images: int = 16
num_stages: int = 80
num_steps: int = 15
σ_max: float = 80.0
ε: float = 1e-5
def run(self):
args = self.dict()
family = args.pop("family")
model = getattr(self, family).make()
self.smld_generate(model, **args)
@staticmethod
def smld_generate(
model: ScoreAdapter,
batch_size, num_images, num_stages, num_steps, σ_max, ε,
**kwargs
):
num_batches = num_images // batch_size
σs = power_schedule(σ_max, model.σ_min, num_stages)
σs = [model.snap_t_to_nearest_tick(σ)[0] for σ in σs]
fuse = EarlyLoopBreak(5)
with tqdm(total=num_batches) as pbar, \
HeartBeat(pbar) as hbeat, \
EventStorage() as metric:
all_imgs = []
for _ in range(num_batches):
if fuse.on_break():
break
init_xs = torch.rand(batch_size, *model.data_shape(), device=model.device)
if model.samps_centered():
init_xs = init_xs * 2 - 1 # [0, 1] -> [-1, 1]
pipeline = smld_inference(
model, σs, num_steps, ε, init_xs
)
for imgs in tqdm(pipeline, total=(num_stages * num_steps)+1, disable=False):
pbar.set_description(f"{imgs.max().item():.3f}")
metric.put_scalars(
max=imgs.max().item(), min=imgs.min().item(), std=imgs.std().item()
)
metric.step()
hbeat.beat()
pbar.update()
imgs = torch_samps_to_imgs(imgs, uncenter=model.samps_centered())
all_imgs.append(imgs)
all_imgs = np.concatenate(all_imgs, axis=0)
metric.put_artifact("imgs", ".npy", lambda fn: np.save(fn, all_imgs))
metric.step()
hbeat.done()
def smld_inference(model, σs, num_steps, ε, init_xs):
from math import sqrt
# not doing conditioning or cls guidance; for gddpm only lsun works; fine.
xs = init_xs
yield xs
for i in range(len(σs)):
α_i = ε * ((σs[i] / σs[-1]) ** 2)
for _ in range(num_steps):
grad = model.score(xs, σs[i])
z = torch.randn_like(xs)
xs = xs + α_i * grad + sqrt(2 * α_i) * z
yield xs
def load_np_imgs(fname):
fname = Path(fname)
data = np.load(fname)
if fname.suffix == ".npz":
imgs = data['arr_0']
else:
imgs = data
return imgs
def visualize(max_n_imgs=16):
import torchvision.utils as vutils
from imageio import imwrite
from einops import rearrange
all_imgs = load_np_imgs("imgs/step_0.npy")
imgs = all_imgs[:max_n_imgs]
imgs = rearrange(imgs, "N H W C -> N C H W", C=3)
imgs = torch.from_numpy(imgs)
pane = vutils.make_grid(imgs, padding=2, nrow=4)
pane = rearrange(pane, "C H W -> H W C", C=3)
pane = pane.numpy()
imwrite("preview.jpg", pane)
if __name__ == "__main__":
seed_everything(0)
dispatch(KarrasGen)
visualize(16)
| [
"a photograph of an astronaut riding a horse"
] |
2024-01-10 | notaustindaniels/fine-tuning-synthetic-avatar | finet3.py | import openai
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'a', encoding='utf-8') as outfile:
outfile.write(content)
# Set the OpenAI API keys by reading them from files
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Retrieve the state of a fine-tune
openai.FineTuningJob.retrieve("YOUR FT JOB ID")
status = response['status']
print(f"Fine-tuning job status: {status}") | [] |
2024-01-10 | notaustindaniels/fine-tuning-synthetic-avatar | finet2.py | import openai
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'a', encoding='utf-8') as outfile:
outfile.write(content)
# Set the OpenAI API keys by reading them from files
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Using the provided file_id
file_id = "YOUR FILE ID"
model_name = "gpt-3.5-turbo" # Or another base model if you prefer
response = openai.FineTuningJob.create(
training_file=file_id,
model=model_name
)
job_id = response['id']
print(f"Fine-tuning job created successfully with ID: {job_id}")
| [] |
2024-01-10 | notaustindaniels/fine-tuning-synthetic-avatar | SyntD-text.py | import openai
from datetime import datetime
import os
# Initialize folder for saving responses
if not os.path.exists('responses'):
os.mkdir('responses')
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
# Initialize OpenAI API key
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Read the content of the files containing the chatbot's prompts
chatbot_prompt = open_file('sysprompt.txt')
# Initialize an empty list to store the conversations for the chatbot
conversation = []
def chatgpt(api_key, conversation, chatbot_prompt, solver, temperature=1.4, frequency_penalty=0.2, presence_penalty=0):
conversation.append({"role": "user", "content": solver})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot_prompt}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
# Number of loops / examples
num_loops = 2
for i in range(num_loops):
problem = open_file('problems.txt')
prob1 = chatgpt(api_key, conversation, chatbot_prompt, problem)
solver = open_file('prompt1.txt').replace("<<SELLER>>", prob1)
response = chatgpt(api_key, conversation, chatbot_prompt, solver)
# Create a unique filename using the current timestamp
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"responses/response_{timestamp}.txt"
# Combine the input prompt and response
combined_content = f"Input Prompt:\n{prob1}\n\nResponse:\n{response}"
# Save to a file
save_file(filename, combined_content)
print(f"Saved example {i+1} to {filename}")
conversation.clear() | [
"sysprompt.txt"
] |
2024-01-10 | notaustindaniels/fine-tuning-synthetic-avatar | SyntD-json.py | import openai
from datetime import datetime
import os
import json
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
# Initialize folder for saving responses
if not os.path.exists('responses'):
os.mkdir('responses')
# Read the files that don't change during the loops
problem = open_file('problems.txt')
base_solver = open_file('prompt1.txt')
chatbot_prompt = open_file('sysprompt.txt')
# Initialize OpenAI API key
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Initialize an empty list to store the conversations for the chatbot
conversation = []
def chatgpt(api_key, conversation, chatbot_prompt, solver, temperature=1.4, frequency_penalty=0.2, presence_penalty=0):
conversation.append({"role": "user", "content": solver})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot_prompt}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
# Initialize JSONL file
jsonl_file = 'responses/problemsft.jsonl'
# Number of loops / examples
num_loops = 200
for i in range(num_loops):
prob1 = chatgpt(api_key, conversation, chatbot_prompt, problem)
solver = base_solver.replace("<<PROBLEM>>", prob1)
response = chatgpt(api_key, conversation, chatbot_prompt, solver)
# Create JSON object
json_obj = {
"messages": [
{"role": "system", "content": chatbot_prompt},
{"role": "user", "content": prob1},
{"role": "assistant", "content": response}
]
}
# Append JSON object to JSONL file
with open(jsonl_file, 'a') as f:
f.write(json.dumps(json_obj) + '\n')
print(f"Saved example {i+1} to {jsonl_file}")
conversation.clear() | [
"sysprompt.txt"
] |
2024-01-10 | tedgett23/SplunkGPT | bin~splunkgpt.py | #!/usr/bin/env python
# coding=utf-8
import os, sys, json
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "lib"))
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
import openai
def getConfig():
if os.path.exists(os.path.join(os.path.dirname(__file__), "..", "local", "config.json")):
with open(os.path.join(os.path.dirname(__file__), "..", "local", "config.json")) as file:
config = json.load(file)
return config
else:
with open(os.path.join(os.path.dirname(__file__), "..", "default", "config.json")) as file:
config = json.load(file)
return config
config = getConfig()
openai.api_key = config["api_key"]
@Configuration()
class SplunkGPT(StreamingCommand):
def stream(self, records):
for record in records:
if "prompt" in record:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a senior IT engineer."},
{"role": "user", "content": record["prompt"]}
]
)
record["response"] = completion.choices[0].message.content
yield record
else:
yield record
dispatch(SplunkGPT, sys.argv, sys.stdin, sys.stdout, __name__) | [
"You are a senior IT engineer."
] |
2024-01-10 | Skower/document-retrieval | chatbot-rag~logic.py | """
This script is used to generate a RAG chain to answer questions about a pdf document.
"""
# Author: Bastien Pouessel
import os
from typing import List
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
from langchain.llms import HuggingFaceEndpoint
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain_core.runnables.base import RunnableSequence
from langchain_core.vectorstores import VectorStoreRetriever
HF_API_KEY = os.environ["HF_API_KEY"]
class MistralOutputParser(StrOutputParser):
"""OutputParser that parser llm result from Mistral API"""
def parse(self, text: str) -> str:
"""
Returns the input text with no changes.
Args:
text (str): text to parse
Returns:
str: parsed text
"""
return text.split("[/INST]")[-1].strip()
def load_pdf(
document_path: str,
mode: str = "single",
strategy: str = "fast",
chunk_size: int = 500,
chunk_overlap: int = 0,
) -> List[str]:
"""
Load a pdf document and split it into chunks of text.
Args:
document_path (Path): path to the pdf document
mode (str, optional): mode of the loader. Defaults to "single".
strategy (str, optional): strategy of the loader. Defaults to "fast".
chunk_size (int, optional): size of the chunks. Defaults to 500.
chunk_overlap (int, optional): overlap of the chunks. Defaults to 0.
Returns:
List[str]: list of chunks of text
"""
# Load the document
loader = UnstructuredPDFLoader(
document_path,
mode=mode,
strategy=strategy,
)
docs = loader.load()
# Split the document into chunks of text
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
all_splits = text_splitter.split_documents(docs)
return all_splits
def store_vector(all_splits: List[str]) -> VectorStoreRetriever:
"""
Store vector of each chunk of text.
Args:
all_splits (List[str]): list of chunks of text
Returns:
VectorStoreRetriever: retriever that can be used to retrieve the vector of a chunk of text
"""
# Use the HuggingFace distilbert-base-uncased model to embed the text
embeddings_model_url = (
"https://api-inference.huggingface.co/models/distilbert-base-uncased"
)
embeddings = HuggingFaceInferenceAPIEmbeddings(
endpoint_url=embeddings_model_url,
api_key=HF_API_KEY,
)
# Store the embeddings of each chunk of text into ChromaDB
vector_store = Chroma.from_documents(all_splits, embeddings)
retriever = vector_store.as_retriever()
return retriever
def generate_mistral_rag_prompt() -> ChatPromptTemplate:
"""
Generate a prompt for Mistral API wiht RAG.
Returns:
ChatPromptTemplate: prompt for Mistral API
"""
template = "[INST] {context} {prompt} [/INST]"
prompt_template = ChatPromptTemplate.from_template(template)
return prompt_template
def generate_mistral_simple_prompt() -> ChatPromptTemplate:
"""
Generate a simple prompt for Mistral without RAG.
Returns:
ChatPromptTemplate: prompt for Mistral API
"""
template = "[INST] {prompt} [/INST]"
prompt_template = ChatPromptTemplate.from_template(template)
return prompt_template
def generate_rag_chain(retriever: VectorStoreRetriever = None) -> RunnableSequence:
"""
Generate a RAG chain with Mistral API and ChromaDB.
Args:
Retriever (VectorStoreRetriever): retriever that can be used to retrieve the vector of a chunk of text
Returns:
RunnableSequence: RAG chain
"""
# Use the Mistral Free prototype API
mistral_url = (
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
)
model_endpoint = HuggingFaceEndpoint(
endpoint_url=mistral_url,
huggingfacehub_api_token=HF_API_KEY,
task="text2text-generation",
)
# Use a custom output parser
output_parser = MistralOutputParser()
# If no retriever is provided, use a simple prompt
if retriever is None:
entry = {"prompt": RunnablePassthrough()}
return entry | generate_mistral_simple_prompt() | model_endpoint | output_parser
# If a retriever is provided, use a RAG prompt
retrieval = {"context": retriever, "prompt": RunnablePassthrough()}
return retrieval | generate_mistral_rag_prompt() | model_endpoint | output_parser
| [
"[INST] {prompt} [/INST]",
"[INST] {context} {prompt} [/INST]"
] |
2024-01-10 | BradGaddis/ai2ai | Tools~Research~Academic.py | from langchain.document_loaders import TextLoader
from langchain.text_splitter import text_splitter
def get_paper(paper: str) -> str:
"""
Queries SciHub database and returns a text file with a paper.
"""
loader = TextLoader("https://sci-hub.se/" + paper)
document = loader.load()
text = text_splitter(document)
return document
| [] |
2024-01-10 | OCTOPAILTD/octopai.usage.data | pyNeo4jPOC~AzureNeo.py | from langchain.chat_models import AzureChatOpenAI # Import AzureChatOpenAI
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from flask import Flask,render_template, request, jsonify
# Define your Azure API credentials
BASE_URL = "https://octopai-ai.openai.azure.com"
API_KEY = "1296a1757ca44f0a80e022d2cfa6dca2"
DEPLOYMENT_NAME = "gpt-35-turbo" # In Azure, this deployment has version 0613 - input and output tokens are counted separately
DPName = 'TestOri'
# Create an instance of AzureChatOpenAI with your credentials
azure_chat_model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DPName,
openai_api_key=API_KEY,
openai_api_type="azure",
model_version="0613"
)
# Initialize your Neo4jGraph and GraphCypherQAChain as before
graph = Neo4jGraph(
url="bolt://10.0.19.4:7687",
username="neo4j",
password="pleaseletmein",
)
chain = GraphCypherQAChain.from_llm(azure_chat_model, graph=graph, verbose=True)
# Use the chain to make a query
response = chain.run("Who played in Top Gun?")
# Process the response as needed
print(response) | [] |
2024-01-10 | OCTOPAILTD/octopai.usage.data | ChatBotAPI~AzureArango.py | from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import AzureChatOpenAI # Import AzureChatOpenAI
from langchain.chains import GraphCypherQAChain
from arango import ArangoClient
from langchain.graphs import Neo4jGraph
from flask import Flask,render_template, request, jsonify
# Define your Azure API credentials
BASE_URL = "https://octopai-ai.openai.azure.com"
API_KEY = "1296a1757ca44f0a80e022d2cfa6dca2"
DEPLOYMENT_NAME = "gpt-35-turbo" # In Azure, this deployment has version 0613 - input and output tokens are counted separately
DPName = 'TestOri'
# Create an instance of AzureChatOpenAI with your credentials
azure_chat_model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DPName,
openai_api_key=API_KEY,
openai_api_type="azure",
# model_version="0613",
temperature="1"
)
# Initialize your Neo4jGraph and GraphCypherQAChain as before
graph = Neo4jGraph(
url="bolt://10.0.19.4:7687",
username="neo4j",
password="pleaseletmein",
)
# CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
# Instructions:
# Use only the provided relationship types and properties in the schema.
# Do not use any other relationship types or properties that are not provided.
# Schema:
# {schema}
# Note: Do not include any explanations or apologies in your responses.
# Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
# Do not include any text except the generated Cypher statement.
# Examples: Here are a few examples of generated Cypher statements for particular questions:
# # How many people played in Top Gun?
# MATCH (m:Movie {{title:"Top Gun"}})<-[:ACTED_IN]-()
# RETURN count(*) AS numberOfActors
#
# The question is:
# {question}"""
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Cypher examples:
# How many reports in the system?
MATCH (n:LINEAGEOBJECT)
WHERE TOUPPER(n.ToolType) = 'REPORT'
RETURN count(n) as numberOfReports
# Give me all Objects that feeds to a report called:'Marketing weekly Document
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
# The question is:
# {question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
memory = ConversationBufferMemory(memory_key="chat_history", input_key='question')
readonlymemory = ReadOnlySharedMemory(memory=memory)
chain = GraphCypherQAChain.from_llm(azure_chat_model, graph=graph, verbose=True,cypher_prompt=CYPHER_GENERATION_PROMPT, validate_cypher=True,memory=readonlymemory)
# Use the chain to make a query
try:
#response = chain.run("How many reports in the system?")
# response = chain.run("How many Tables in the system?")
response = chain.run("Give me all distinct Objects that feeds to a report called:'Marketing weekly Document' indirectly removing ETL")
# response = chain.run("Tell me about Pele")
# response = chain.run("""
# How many Tables in the system?
# """)
#response = chain.run("How many reports in the system")
print(response)
except Exception as e:
print(e)
# Process the response as needed
| [
"question",
"Task:Generate Cypher statement to query a graph database. \nInstructions: \nUse only the provided relationship types and properties in the schema. \nDo not use any other relationship types or properties that are not provided. \nSchema: \n{schema} \nNote: Do not include any explanations or apologies in your responses. \nDo not respond to any questions that might ask anything else than for you to construct a Cypher statement. \nDo not include any text except the generated Cypher statement. \nCypher examples: \n# How many reports in the system? \nMATCH (n:LINEAGEOBJECT)\nWHERE TOUPPER(n.ToolType) = 'REPORT'\nRETURN count(n) as numberOfReports\n\n# Give me all Objects that feeds to a report called:'Marketing weekly Document\n\n\n\nNote: Do not include any explanations or apologies in your responses.\nDo not respond to any questions that might ask anything else than for you to construct a Cypher statement.\nDo not include any text except the generated Cypher statement.\n\n# The question is:\n# {question}"
] |
2024-01-10 | OCTOPAILTD/octopai.usage.data | ChatBotAPI~AzureNeo.py | from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import AzureChatOpenAI # Import AzureChatOpenAI
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from flask import Flask,render_template, request, jsonify
# Define your Azure API credentials
BASE_URL = "https://octopai-ai.openai.azure.com"
API_KEY = "1296a1757ca44f0a80e022d2cfa6dca2"
DEPLOYMENT_NAME = "gpt-35-turbo" # In Azure, this deployment has version 0613 - input and output tokens are counted separately
DPName = 'TestOri'
# Create an instance of AzureChatOpenAI with your credentials
azure_chat_model = AzureChatOpenAI(
openai_api_base=BASE_URL,
openai_api_version="2023-05-15",
deployment_name=DPName,
openai_api_key=API_KEY,
openai_api_type="azure",
# model_version="0613",
temperature="1"
)
# Initialize your Neo4jGraph and GraphCypherQAChain as before
graph = Neo4jGraph(
url="bolt://10.0.19.4:7687",
username="neo4j",
password="pleaseletmein",
)
# CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
# Instructions:
# Use only the provided relationship types and properties in the schema.
# Do not use any other relationship types or properties that are not provided.
# Schema:
# {schema}
# Note: Do not include any explanations or apologies in your responses.
# Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
# Do not include any text except the generated Cypher statement.
# Examples: Here are a few examples of generated Cypher statements for particular questions:
# # How many people played in Top Gun?
# MATCH (m:Movie {{title:"Top Gun"}})<-[:ACTED_IN]-()
# RETURN count(*) AS numberOfActors
#
# The question is:
# {question}"""
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Cypher examples:
# How many reports in the system?
MATCH (n:LINEAGEOBJECT)
WHERE TOUPPER(n.ToolType) = 'REPORT'
RETURN count(n) as numberOfReports
# Give me all Objects that feeds to a report called:'Marketing weekly Document
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
# The question is:
# {question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
memory = ConversationBufferMemory(memory_key="chat_history", input_key='question')
readonlymemory = ReadOnlySharedMemory(memory=memory)
chain = GraphCypherQAChain.from_llm(azure_chat_model, graph=graph, verbose=True,cypher_prompt=CYPHER_GENERATION_PROMPT, validate_cypher=True,memory=readonlymemory)
# Use the chain to make a query
try:
#response = chain.run("How many reports in the system?")
# response = chain.run("How many Tables in the system?")
response = chain.run("Give me all distinct Objects that feeds to a report called:'Marketing weekly Document' indirectly removing ETL")
# response = chain.run("Tell me about Pele")
# response = chain.run("""
# How many Tables in the system?
# """)
#response = chain.run("How many reports in the system")
print(response)
except Exception as e:
print(e)
# Process the response as needed
| [
"question",
"Task:Generate Cypher statement to query a graph database. \nInstructions: \nUse only the provided relationship types and properties in the schema. \nDo not use any other relationship types or properties that are not provided. \nSchema: \n{schema} \nNote: Do not include any explanations or apologies in your responses. \nDo not respond to any questions that might ask anything else than for you to construct a Cypher statement. \nDo not include any text except the generated Cypher statement. \nCypher examples: \n# How many reports in the system? \nMATCH (n:LINEAGEOBJECT)\nWHERE TOUPPER(n.ToolType) = 'REPORT'\nRETURN count(n) as numberOfReports\n\n# Give me all Objects that feeds to a report called:'Marketing weekly Document\n\n\n\nNote: Do not include any explanations or apologies in your responses.\nDo not respond to any questions that might ask anything else than for you to construct a Cypher statement.\nDo not include any text except the generated Cypher statement.\n\n# The question is:\n# {question}"
] |
2024-01-10 | OCTOPAILTD/octopai.usage.data | ChatBotAPI~ChatAPI.py | import json
from langchain.chat_models import AzureChatOpenAI
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from flask import Flask, render_template, request
from langchain.prompts import PromptTemplate
import threading
class CypherGenerationPrompt:
template = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Cypher examples:
# How many reports in the system?
MATCH (n:LINEAGEOBJECT)
WHERE TOUPPER(n.ToolType) = 'REPORT'
RETURN count(n) as numberOfReports
# How many etls in the system?
MATCH (n:LINEAGEOBJECT)
WHERE TOUPPER(n.ToolType) = 'ETL'
RETURN count(n) as numberOfETLS
# How many views in the system?
MATCH (n:LINEAGEOBJECT)
WHERE TOUPPER(n.ObjectType) = 'VIEW'
RETURN count(n) as numberOfReports
# The question is:
# {question}"""
def __init__(self):
self.prompt_template = PromptTemplate(
input_variables=["schema", "question"],
template=self.template
)
class ChatApp:
def __init__(self, base_url, api_key, deployment_name, dp_name, graph_url, graph_username, graph_password):
self.azure_chat_model = AzureChatOpenAI(
openai_api_base=base_url,
openai_api_version="2023-05-15",
deployment_name=deployment_name,
openai_api_key=api_key,
openai_api_type="azure",
temperature="0.8"
)
self.graph = Neo4jGraph(
url=graph_url,
username=graph_username,
password=graph_password
)
self.chain = GraphCypherQAChain.from_llm(
self.azure_chat_model,
graph=self.graph,
verbose=True,
cypher_prompt=CypherGenerationPrompt().prompt_template,
validate_cypher=True
)
self.response = None
self.lock = threading.Lock() # In
# def run_chain(self, msg,response):
# response=""
# try:
# response = self.chain.run(msg)
# print(response)
# except Exception as e:
# print(e)
# response = str(e)
def run_chain(self, msg):
try:
result = self.chain.run(msg)
print(result)
# Acquire the lock before updating shared data
with self.lock:
self.response = result
except Exception as e:
print(e)
with self.lock:
self.response = str(e)
# def run_chain_with_timeout(self, msg, timeout_seconds):
# thread = threading.Thread(target=self.run_chain, args=(msg,))
# thread.start()
# thread.join(timeout=timeout_seconds)
def run_chain_with_timeout(self, msg, timeout_seconds):
response = ""
thread = threading.Thread(target=self.run_chain, args=(msg,))
thread.start()
thread.join(timeout=timeout_seconds)
if thread.is_alive():
thread.join()
print(f"Timeout! No response within {timeout_seconds} seconds.")
response = "Timeout message"
with self.lock:
return self.response
def get_completion(self, prompt, model="gpt-3.5-turbo", timeout_duration=10):
response = ""
try:
messages = [{"role": "user", "content": prompt}]
msg = messages[0]["content"]
# Use run_chain_with_timeout to run chain in a separate thread with a timeout
response = self.run_chain_with_timeout(msg, timeout_duration)
print(response)
if response == None or response == "":
response = "No results"
except Exception as e:
print(e)
response = str(e)
return response
# Configuration
with open('config.json', 'r') as file:
config = json.load(file)
# Initialize ChatApp with configuration
chat_app = ChatApp(
base_url=config["azure_chat"]["base_url"],
api_key=config["azure_chat"]["api_key"],
deployment_name=config["azure_chat"]["dp_name"],
dp_name=config["azure_chat"]["dp_name"],
graph_url=config["graph"]["url"],
graph_username=config["graph"]["username"],
graph_password=config["graph"]["password"]
)
# user_text="How many reports?"
# try:
# txt=chat_app.get_completion(user_text)
# # txt="zacay"
# print(txt)
# except Exception as e:
# print(e)
app = Flask(__name__)
#
@app.route("/")
def home():
try:
return render_template("index.html")
except Exception as e:
print(e)
@app.route("/get")
def get_bot_response():
try:
user_text = request.args.get('msg')
response = chat_app.get_completion(user_text)
except Exception as e:
print(e)
return response
if __name__ == "__main__":
try:
app.run()
except Exception as e:
print(e)
| [
"Task:Generate Cypher statement to query a graph database. \n Instructions: \n Use only the provided relationship types and properties in the schema. \n Do not use any other relationship types or properties that are not provided. \n Schema: \n {schema} \n Note: Do not include any explanations or apologies in your responses. \n Do not respond to any questions that might ask anything else than for you to construct a Cypher statement. \n Do not include any text except the generated Cypher statement. \n Cypher examples: \n # How many reports in the system? \n MATCH (n:LINEAGEOBJECT)\n WHERE TOUPPER(n.ToolType) = 'REPORT'\n RETURN count(n) as numberOfReports\n \n \n # How many etls in the system? \n MATCH (n:LINEAGEOBJECT)\n WHERE TOUPPER(n.ToolType) = 'ETL'\n RETURN count(n) as numberOfETLS\n \n # How many views in the system? \n MATCH (n:LINEAGEOBJECT)\n WHERE TOUPPER(n.ObjectType) = 'VIEW'\n RETURN count(n) as numberOfReports\n \n \n # The question is:\n # {question}"
] |
2024-01-10 | Hansen-chen/GPTeam | src~utils~logging.py | import atexit
import json
import logging
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List
import openai
import pytz
def clean_json_string(json_string):
cleaned_string = re.sub(r"\\\'", r"'", json_string) # replace \' with '
cleaned_string = re.sub(
r'\\"', r'"', cleaned_string
) # replace \" with " on cleaned_string
return cleaned_string
def get_completion_data(text) -> List[str]:
pattern = r"(api_version=[^\s]+)|(data=(.+?)(?= [^\s]+=))|(message='(.+?)')"
matches = re.findall(pattern, text)
cleaned_matches = []
for match in matches:
for item in match:
if item != "":
cleaned_matches.append(item)
break
return cleaned_matches
def get_key_value(text):
pattern = r"(\w+)=((?:\"(?:\\\"|[^\"])*\")|(?:\'(?:\\\'|[^'])*\'))"
matches = re.findall(pattern, text)
result = {}
for match in matches:
key, value = match[0], match[1]
# Remove the outer quotes and unescape the inner quotes
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
else:
value = value[1:-1].replace("\\'", "'")
result[key] = value
return result
class OpenAIFilter(logging.Filter):
def filter(self, record):
return "openai" in record.name
class JsonArrayFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
self.closed_properly = False
self.stream.write("[")
atexit.register(self.close)
def close(self):
self.acquire()
try:
if not self.closed_properly:
self.stream.write("]")
self.closed_properly = True
super().close()
finally:
self.release()
def emit(self, record):
if self.stream.tell() > 1:
self.stream.write(",\n")
super().emit(record)
class LoggingFilter(logging.Filter):
def filter(self, record):
print("logging filter", record)
return True
def init_logging():
openai.util.logger.setLevel(logging.WARNING)
open("src/web/logs/agent.txt", "w").close()
def get_agent_logger():
# Create a logger
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
# Prevent log messages from being passed to the root logger or any other ancestor logger
logger.propagate = False
# Remove all handlers associated with the logger object.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Create a file handler
Path("src/web/logs/").mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler("src/web/logs/agent.txt")
handler.setLevel(logging.INFO)
# Add the handlers to the logger
logger.addHandler(handler)
return logger
agent_logger = get_agent_logger()
| [] |
2024-01-10 | indabaxbenin/patient_triage | medical_chatbot.py | import openai
openai.api_key = "YOUR_API_KEY_HERE"
def get_severity_chatgpt(user_input):
question_to_ask = f"Strictly classify the complaint in quotes into the following severity groups: Very Severe, Moderately Severe, and Not Severe. Ensure that only the category is returned. No other additional text: '{user_input}'."
response = openai.ChatCompletion.create(\
model = "gpt-3.5-turbo",
messages = [{"role": "system", "content": "You are a chatbot"},
{"role": "user", "content": question_to_ask},
])
result = ''
for answer in response.choices:
result += answer.message.content
print(result)
return result
user_input = "I have diabetes and I am running out of breath. I can't breathe properly and I have fainted 2 times today."
get_severity_chatgpt(user_input) | [
"Strictly classify the complaint in quotes into the following severity groups: Very Severe, Moderately Severe, and Not Severe. Ensure that only the category is returned. No other additional text: 'I have diabetes and I am running out of breath. I can't breathe properly and I have fainted 2 times today.'.",
"You are a chatbot"
] |
2024-01-10 | xl-sr/open_clip | src~open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
require_pretrained=True,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
| [] |
2024-01-10 | timcarpe/nl-asm | scripts~gpt_functions.py | import sys
import os
import json
import openai
import time
from dotenv import load_dotenv
from logging_functions import log_message
# Load variables from .env file
load_dotenv()
# Access environment variables
openai.api_key = os.getenv('OPENAI_API_KEY')
model = "gpt-3.5-turbo"
#Retriece the JSON string and return the JSON object. Useful for ensuring JSON is valid from GPT-3.5 responses
def retrieve_JSON(json_string):
start = json_string.find("{")
end = json_string.rfind("}")
split = json_string[start:end+1]
try:
temp_json = json.loads(split)
except Exception as e:
print("Error: " + str(e) + "\n\n" "JSON string:" + json_string)
sys.exit(1)
return temp_json
#Get the GPT-3.5 response and return the content
def get_gpt_response_content(role, prompt):
try:
response = openai.ChatCompletion.create(
model=model,
temperature=0.4,
messages=[
{"role": "system", "content": role},
{"role": "user", "content": prompt},
]
)
except openai.error as e:
print("Error: " + str(e))
print("Waiting 1 second to try again...")
time.sleep(1)
return get_gpt_response_content(role, prompt)
log_message("\n\nGPT-3.5 Response:\n" + "Role: " + role + "\n" + "Prompt: " + prompt + "\n" + "Response: " + response.choices[0].message.content + "\n\n")
return ''.join(response.choices[0].message.content) | [] |
2024-01-10 | 20001LastOrder/practical-llms | apps~slackbot~bolt_app.py | ##############################################
# Implementation of the slack app using Bolt
# Importing necessary modules
##############################################
import os
from dotenv import load_dotenv
from flask import Flask, request
load_dotenv()
from langchain.chat_models import ChatOpenAI
from os import environ
from vectorstores import ConversationStore, LocalChromaStore
from slack_bolt import App
from slack_bolt.adapter.flask import SlackRequestHandler
from tools import get_tools
from task_agent import TaskAgent
# This `app` represents your existing Flask app
app = App(
token=os.environ.get("SLACK_OAUTH_TOKEN"),
signing_secret=os.environ.get("SLACK_SIGNING_SECRET"),
)
#####################################################################################################
# Setting up environment variables and Slack configuration:
# The code retrieves various environment variables using os.environ.get() method.
# Environment variables include Slack signing secret, OAuth token, verification token, and OpenAI key.
#####################################################################################################
SLACK_SIGNING_SECRET = environ.get("SLACK_SIGNING_SECRET")
SLACK_OAUTH_TOKEN = environ.get("SLACK_OAUTH_TOKEN")
VERIFICATION_TOKEN = environ.get("VERIFICATION_TOKEN")
OPENAI_KEY=environ.get("OPENAI_KEY")
SLACK_PORT = environ.get("SLACK_PORT", 3000)
###########################################################################
# Instantiating Slack client and Flask app:
###########################################################################
#instantiating slack client
os.environ['OPENAI_API_KEY'] = OPENAI_KEY
@app.command("/hello-socket-mode")
def hello_command(ack, body):
user_id = body["user_id"]
ack(f"Hi, <@{user_id}>!")
bot = app.client.auth_test()
print(bot)
def contains_verbose(query: str) -> bool:
'''looks for -verbose in the question and returns True or False'''
return "-verbose" in query.lower()
def contains_verbosex(query: str) -> bool:
'''looks for -verbosex in the question and returns True or False'''
return "-verbosex" in query.lower()
@app.event("app_mention")
def event_test(client, say, event):
question = event['text']
thread_ts = event.get("thread_ts", None) or event["ts"]
replies = client.conversations_replies(channel=event['channel'], ts=thread_ts)
previous_messages = replies['messages'][:-1]
results, verbose_message = get_response(question, previous_messages)
say(results, thread_ts=thread_ts)
if contains_verbose(question):
say(f"#verbose message: \n```{verbose_message}```", thread_ts=thread_ts)
@app.event("app_home_opened")
def update_home_tab(client, event, logger):
try:
# views.publish is the method that your app uses to push a view to the Home tab
client.views_publish(
# the user that opened your app's app home
user_id=event["user"],
# the view object that appears in the app home
view={
"type": "home",
"callback_id": "home_view",
# body of the view
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Welcome to your _App's Home_* :tada:"
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "This button won't do much for now but you can set up a listener for it using the `actions()` method and passing its unique `action_id`. See an example in the `examples` folder within your Bolt app."
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "Click me!"
}
}
]
}
]
}
)
except Exception as e:
logger.error(f"Error publishing home tab: {e}")
flask_app = Flask(__name__)
handler = SlackRequestHandler(app)
@flask_app.route("/slack/events", methods=["POST"])
def slack_events():
return handler.handle(request)
@flask_app.route("/hello", methods=["GET"])
def hello():
return "OK"
if 'PINECONE_API_KEY'not in os.environ:
print("Warning: Pinecone API key not specified. Using local Chroma database.")
local_memory = LocalChromaStore.from_folder('files', OPENAI_KEY).as_retriever()
# ---- add this for verbose output --- #
def log_formatter(logger):
'''Formats the logger into readable string'''
log_strings = []
for log in logger:
reply = log["reply"]
if "thoughts" in reply:
# reply = json.loads(reply)
formatted_reply = f"""-- Step: {log["Step"]} -- \nThoughts: \n {reply["thoughts"]} """
if "command" in reply: # add command if it exists
formatted_reply += f"""\nCommand: \n {reply["command"]}"""
log_strings.append(formatted_reply)
else: # for final response
formatted_reply = f"""-- Step: {log["Step"]} -- \nFinal Response: \n {reply}"""
log_strings.append(formatted_reply)
log_string = "\n".join(log_strings)
return log_string
def show_commands_only(logger):
'''Modified version of log_formatter that only shows commands'''
log_strings = []
for log in logger:
reply = log["reply"]
if "command" in reply:
# reply = json.loads(reply)
formatted_reply = f"""-- Step: {log["Step"]} -- \nCommand: \n {reply["command"]}"""
log_strings.append(formatted_reply)
else: # for final response
formatted_reply = f"""-- Step: {log["Step"]} -- \nFinal Response: \n {reply}"""
log_strings.append(formatted_reply)
log_string = "\n".join(log_strings)
return log_string
def get_response(question, previous_messages):
llm = ChatOpenAI(
openai_api_key=OPENAI_KEY, request_timeout=120
)
if os.environ.get("PINECONE_API_KEY", False):
# If pinecone API is specified, then use the Pinecone Database
memory = ConversationStore.get_vector_retrieval(
'ReadTheDocs', OPENAI_KEY, index_name=os.getenv("PINECONE_INDEX"), search_type='similarity_score_threshold', search_kwargs={'score_threshold': 0.0}
)
else:
# use the local Chroma database
memory = local_memory
tools=get_tools(memory)
ai_name='Sherpa'
ai_id = bot['user_id']
task_agent = TaskAgent.from_llm_and_tools(ai_name="Sherpa",
ai_role="assistant",
ai_id=bot['user_id'],
memory=memory, tools=tools,
previous_messages = previous_messages,
llm=llm)
if contains_verbosex(query = question):
print("Verbose mode is on, show all")
question = question.replace(f'@{ai_id}', f'@{ai_name}')
question = question.replace('-verbose', '')
response = task_agent.run(question)
logger = task_agent.logger # logger is updated after running task_agent.run(question)
try: # in case log_formatter fails
verbose_message = log_formatter(logger)
except:
verbose_message = str(logger)
return response, verbose_message
elif contains_verbose(query = question):
print("Verbose mode is on, commands only")
question = question.replace(f'@{ai_id}', f'@{ai_name}')
question = question.replace('-verbose', '')
response = task_agent.run(question)
logger = task_agent.logger # logger is updated after running task_agent.run(question)
try: # in case log_formatter fails
verbose_message = show_commands_only(logger)
except:
verbose_message = str(logger)
return response, verbose_message
else:
print("Verbose mode is off")
question = question.replace(f'@{ai_id}', f'@{ai_name}')
response = task_agent.run(question)
return response, None
# Start the server on port 3000
if __name__ == "__main__":
# documents = getDocuments('files')
# vectorstore = getVectoreStore(documents)
# qa = createLangchainQA(vectorstore)
# chain = createIndex("files")
print('Running the app')
flask_app.run(host="0.0.0.0", port=SLACK_PORT)
# SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| [] |
2024-01-10 | 20001LastOrder/practical-llms | apps~slackbot~output_parser.py | import json
import re
from abc import abstractmethod
from typing import Dict, NamedTuple
from langchain.schema import BaseOutputParser
class TaskAction(NamedTuple):
name: str
args: Dict
class BaseTaskOutputParser(BaseOutputParser):
@abstractmethod
def parse(self, text: str) -> TaskAction:
"""Return TaskAction"""
def preprocess_json_input(input_str: str) -> str:
# Replace single backslashes with double backslashes,
# while leaving already escaped ones intact
corrected_str = re.sub(
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
)
return corrected_str
class TaskOutputParser(BaseTaskOutputParser):
def parse(self, text: str) -> TaskAction:
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return TaskAction(
name="ERROR",
args={"error": f"Could not parse invalid json: {text}"},
)
try:
return TaskAction(
name=parsed["command"]["name"],
args=parsed["command"]["args"],
)
except (KeyError, TypeError):
# If the command is null or incomplete, return an erroneous tool
return TaskAction(
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
) | [] |
2024-01-10 | 20001LastOrder/practical-llms | apps~slackbot~task_agent.py | from typing import List, Optional
from pydantic import ValidationError
import openai
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from output_parser import TaskOutputParser, BaseTaskOutputParser
from prompt import SlackBotPrompt
from langchain.schema import Document
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from tools import UserInputTool
import json
from langchain.schema import (
BaseMessage,
HumanMessage,
SystemMessage,
AIMessage
)
class TaskAgent:
"""Agent class for handling a single task"""
def __init__(
self,
ai_name: str,
ai_id: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseTaskOutputParser,
tools: List[BaseTool],
previous_messages,
feedback_tool: Optional[HumanInputRun] = None,
max_iterations: int = 5,
):
self.ai_name = ai_name
self.memory = memory
# self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
self.max_iterations = max_iterations
self.loop_count = 0
self.ai_id = ai_id
self.previous_message = {}
self.previous_message["chat_history"] = self.process_chat_history(previous_messages)
self.previous_message["react_history"] = []
self.logger = [] # added by JF
# print(self.full_message_history)
# print("message:", self.previous_message)
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
ai_id: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
previous_messages,
human_in_the_loop: bool = False,
output_parser: Optional[BaseTaskOutputParser] = None,
max_iterations: int = 5,
):
prompt = SlackBotPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
# input_variables=["memory", "messages", "user_input", "task"],
input_variables=["memory", "messages", "user_input", "task"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
ai_id,
memory,
chain,
output_parser or TaskOutputParser(),
tools,
previous_messages,
feedback_tool=human_feedback_tool,
max_iterations=max_iterations,
)
def run(self, task: str) -> str:
user_input = (
"Determine which next command to use. "
"and respond using the JSON format specified above without any extra text."
"\n JSON Response: \n"
)
# Interaction Loop
previous_action = ""
while True:
# Discontinue if continuous limit is reached
loop_count = self.loop_count
print(f"Step: {loop_count}/{self.max_iterations}")
logger_step = {"Step": f"{loop_count}/{self.max_iterations}"} # added by JF
if loop_count >= self.max_iterations:
user_input = (
f"Use the above information to respond to the user's message:\n{task}\n\n"
f"If you use any resource, then create inline citation by adding the source link of the reference document at the of the sentence."
f"Only use the link given in the reference document. DO NOT create link by yourself. DO NOT include citation if the resource is not necessary. "
"only write text but NOT the JSON format specified above. \nResult:"
)
# Send message to AI, get response
try:
assistant_reply = self.chain.run(
task=task,
messages=self.previous_message,
memory=self.memory,
user_input=user_input,
)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
return f"OpenAI API returned an API Error: {e}"
except openai.error.APIConnectionError as e:
#Handle connection error here
return f"Failed to connect to OpenAI API: {e}"
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
return f"OpenAI API request exceeded rate limit: {e}"
except openai.error.AuthenticationError as e:
#Handle rate limit error (we recommend using exponential backoff)
return f"OpenAI API failed authentication or incorrect token: {e}"
except openai.error.Timeout as e:
return f"OpenAI API Timeout error: {e}"
except openai.error.ServiceUnavailableError as e:
return f"OpenAI API Service unavailable: {e}"
except openai.error.InvalidRequestError as e:
return f"OpenAI API invalid request error: {e}"
print("reply:", assistant_reply)
# added by JF
try:
reply_json = json.loads(assistant_reply)
logger_step['reply'] = reply_json
except json.JSONDecodeError as e:
logger_step['reply'] = assistant_reply # last reply is a string
self.logger.append(logger_step)
# return assistant_reply
# return if maximum itertation limit is reached
if loop_count >= self.max_iterations:
# TODO: this should be handled better, e.g. message for each task
# self.logger.session.context["full_messages"] = []
# self.logger.session.save()
# self.logger.log(FinishLog(content=assistant_reply))
try:
result = json.loads(assistant_reply)
except:
return assistant_reply
# if the LLM does not propose command
if result["command"] == {}:
return result["thoughts"]["speak"]
return result["command"]["args"]["response"]
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
print("\nprevious action:", previous_action, "\n")
print("\naction:", action, "\n")
tools = {t.name: t for t in self.tools}
if action == previous_action:
print("!!!!! Same action !!!!!")
result = (
f"Determine which next command to use. Do not give the command '{action}''"
f"If you do not need further information to answer the question, you can use the finish command"
)
self.loop_count += 1
else:
if action.name == "finish":
self.loop_count = self.max_iterations
result = "Finished task. "
# try:
# result = json.loads(assistant_reply)
# except:
# return assistant_reply
# return result["command"]["args"]["response"]
elif action.name in tools:
tool = tools[action.name]
if tool.name == "UserInput":
# return {'type': 'user_input', 'query': action.args['query']} // must return text
return str({'type': 'user_input', 'query': action.args['query']})
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
self.loop_count += 1
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
# self.memory.add_documents([Document(page_content=memory_to_add)])
# print("result:: ", result)
self.previous_message["react_history"].append(HumanMessage(content=memory_to_add))
previous_action = action
def set_user_input(self, user_input: str):
result = f"Command UserInput returned: {user_input}"
assistant_reply = self.logger.get_full_messages()[-1].content
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
self.memory.add_documents([Document(page_content=memory_to_add)])
def process_chat_history(self, messages: List[dict]) -> List[BaseMessage]:
results = []
for message in messages:
# print(message)
if message['type'] != 'message' and message['type'] != 'text':
continue
message_cls = AIMessage if message['user'] == self.ai_id else HumanMessage
# replace the at in the message with the name of the bot
text = message['text'].replace(f'@{self.ai_id}', f'@{self.ai_name}')
# added by JF
text = text.split("#verbose", 1)[0] # remove everything after #verbose
text = text.replace('-verbose', '') # remove -verbose if it exists
results.append(message_cls(content=text))
return results
| [
"user_input"
] |
2024-01-10 | charliettaylor/gpt-showdown | api~assistant.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
class Assistant:
def __init__(self):
self.messages = []
self.prompt = self.fetch_prompt()
self.meta_setup()
print("GPT CONNECTED -- ")
def fetch_prompt(self):
dat = None
with open("api/prompt.in", "r") as inp:
dat = inp.read()
return dat
def write_message(self, role: str, content: str) -> str:
to_write = {"role": role, "content": content}
self.messages.append(to_write)
api_response = self.get_api_response()
api_response_message = self.get_response_message(api_response)
api_response_content = self.get_response_content(api_response)
self.update_messages(api_response_message)
return api_response_content
def update_messages(self, message) -> None:
self.messages.append(message)
def meta_setup(self, use_gameshow_prompt=True) -> None:
openai.api_key = os.getenv("API_KEY")
self.write_message(
role="system",
content=self.prompt
if use_gameshow_prompt
else "You are a helpful assistant.",
)
def get_api_response(self):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=self.messages
)
def get_response_message(self, resp):
try:
return resp["choices"][0]["message"]
except:
raise ValueError("Failed to get response message from response object")
def get_response_content(self, resp):
try:
return resp["choices"][0]["message"]["content"]
except:
raise ValueError("Failed to get response content from response object")
if __name__ == "__main__":
gpt = Assistant()
print(gpt.prompt)
| [] |
2024-01-10 | HarinKhakhi/wordcorn | src~grouped_script.py | import sys
import os
import json
from tqdm import tqdm
from dotenv import load_dotenv
from copy import deepcopy
import threading
from csv import DictReader
from openai import OpenAI
import utils as utils
###################### Configuration ######################
TOTAL_THREADS = 8
wordlist_file = sys.argv[1]
output_dir = sys.argv[2]
output_file = sys.argv[3]
operation_mode = sys.argv[4]
if not os.path.isdir(output_dir): os.makedirs(output_dir)
load_dotenv()
openai_client = OpenAI()
current_config = utils.load_configuration(file='./assets/default_config.json')
current_config = current_config.model_dump(exclude_none=True)
logger = utils.get_logger('grouped_script')
logger.info('script started...')
logger.info('current configuration: %s', current_config)
###########################################################
############################ functions ############################
def get_wordlist(input_file):
wordlist = {}
with open(input_file, 'r') as wordlist_file:
reader = DictReader(wordlist_file)
for row in reader:
for group_name, word in row.items():
if not group_name in wordlist:
wordlist[group_name] = []
wordlist[group_name].append(word)
return wordlist
def perform_task(group_name, words):
global output_dir, operation_mode, current_config, logger, openai_client
# check if already requested
if (operation_mode != 'override') and os.path.isfile(f'{output_dir}/{group_name}.txt'):
return
# setting up configuration
new_config = deepcopy(current_config)
new_config['messages'].append({
'role': 'user',
'content': f'the list of word is {list(words)}'
})
# calling api
logger.debug('calling chatgpt-api with args: %s', json.dumps(new_config, indent=4))
response = openai_client.chat.completions.create(
**new_config
)
logger.info('got response: %s', response)
# writing to file
json_object = response.choices[0].message.content
object_file = open(f'{output_dir}/{group_name}.txt', 'w')
object_file.write(json_object)
object_file.close()
def combine_results(input_dir, output_file):
def get_data(file):
try:
with open(file, 'r') as f:
return json.load(f)
except:
print('not json:', file)
return {}
arr = []
for filename in os.listdir(input_dir):
obj = {
'group': filename.split('.')[0],
**get_data(os.path.join(input_dir, filename))
}
arr.append(obj)
with open(output_file, 'w') as output:
json.dump(arr, output, indent=4)
###########################################################
grouped_wordlist = get_wordlist(wordlist_file)
total_count = len(grouped_wordlist) if operation_mode != 'test' else TOTAL_THREADS
for start_i in tqdm(range(0, total_count, TOTAL_THREADS)):
threads = []
for group_name in list(grouped_wordlist.keys())[start_i: start_i+TOTAL_THREADS]:
thread = threading.Thread(target=perform_task, args=(group_name, grouped_wordlist[group_name]))
thread.start()
threads.append(thread)
for thread in threads: thread.join()
combine_results(output_dir, output_file) | [
"the list of word is ['PLACEHOLDER']"
] |
2024-01-10 | HarinKhakhi/wordcorn | src~script.py | import sys
import os
import json
from tqdm import tqdm
from dotenv import load_dotenv
from copy import deepcopy
import threading
from openai import OpenAI
import utils as utils
###################### Configuration ######################
TOTAL_THREADS = 10
wordlist_file = sys.argv[1]
output_dir = sys.argv[2]
output_file = sys.argv[3]
operation_mode = sys.argv[4]
if not os.path.isdir(output_dir): os.makedirs(output_dir)
load_dotenv()
openai_client = OpenAI()
current_config = utils.load_configuration(file='./assets/default_config.json')
current_config = current_config.model_dump(exclude_none=True)
logger = utils.get_logger('script')
logger.info('script started...')
logger.info('current configuration: %s', current_config)
###########################################################
############################ functions ############################
def get_wordlist(input_file):
wordlist_file = open(input_file, encoding='utf-8')
wordlist = [word.strip().lower() for word in wordlist_file.readlines()]
wordlist_file.close()
return wordlist
def perform_task(word):
global output_dir, operation_mode, current_config, logger, openai_client
# check if already requested
if (operation_mode != 'override') and os.path.isfile(f'{output_dir}/{word}.txt'):
return
# setting up configuration
new_config = deepcopy(current_config)
new_config['messages'].append({
'role': 'user',
'content': f'the word is {word}'
})
# calling api
logger.debug('calling chatgpt-api with args: %s', json.dumps(new_config, indent=4))
response = openai_client.chat.completions.create(
**new_config
)
logger.info('got response: %s', response)
# writing to file
json_object = response.choices[0].message.content
object_file = open(f'{output_dir}/{word}.txt', 'w')
object_file.write(json_object)
object_file.close()
def combine_results(input_dir, output_file):
def get_data(file):
try:
with open(file, 'r') as f:
return json.load(f)
except:
print('not json:', word)
return {}
arr = []
for filename in os.listdir(input_dir):
obj = {
'word': filename.split('.')[0],
**get_data(os.path.join(input_dir, filename))
}
arr.append(obj)
with open(output_file, 'w') as output:
json.dump(arr, output, indent=4)
###########################################################
wordlist = get_wordlist(wordlist_file)
total_count = len(wordlist) if operation_mode != 'test' else TOTAL_THREADS
for start_i in tqdm(range(0, total_count, TOTAL_THREADS)):
threads = []
for word in wordlist[start_i: start_i+TOTAL_THREADS]:
thread = threading.Thread(target=perform_task, args=(word, ))
thread.start()
threads.append(thread)
for thread in threads: thread.join()
combine_results(output_dir, output_file) | [
"the word is PLACEHOLDER"
] |
2024-01-10 | latiotech/LAST | LAST.py | from openai import OpenAI
import os
import sys
import requests
from github import Github
import subprocess
openaikey = os.environ.get('OPENAI_API_KEY')
githubkey = os.environ.get('GITHUB_TOKEN')
client = OpenAI(api_key=openaikey)
def get_changed_files_github(directory, base_ref, head_ref):
"""
Returns a list of files that have been changed in the pull request, excluding deleted files.
"""
changed_files = []
try:
os.chdir(directory)
result = subprocess.check_output(["git", "diff", "--name-status", f"{base_ref}...{head_ref}"], text=True)
lines = result.strip().split('\n')
for line in lines:
status, file_path = line.split(maxsplit=1)
if status != 'D': # Exclude deleted files
changed_files.append(file_path)
except subprocess.CalledProcessError as e:
print(f"Error getting changed files: {e}")
return changed_files
def get_changed_files(directory):
"""
Returns a list of files that have been changed locally.
"""
changed_files = []
try:
os.chdir(directory)
result = subprocess.check_output(["git", "diff", "--name-status"], text=True)
if not result.strip():
return None # Indicate no changes
lines = result.strip().split('\n')
for line in lines:
if line: # Check if the line is not empty
status, file_path = line.split(maxsplit=1)
if status != 'D': # Exclude deleted files
changed_files.append(file_path)
except subprocess.CalledProcessError as e:
print(f"Error getting changed files: {e}")
return changed_files
def get_line_changes_github(directory, base_ref, head_ref):
"""
Returns a string containing line changes between the base and head branches of a pull request.
"""
line_changes = ""
try:
os.chdir(directory)
# Getting line changes between the base and head branches of the PR
result = subprocess.check_output(["git", "diff", f"{base_ref}...{head_ref}"], text=True)
line_changes = result.strip()
except subprocess.CalledProcessError as e:
print(f"Error getting line changes: {e}")
return line_changes
def get_line_changes(directory):
"""
Returns a string containing line changes from the latest commit.
"""
line_changes = ""
try:
os.chdir(directory)
# Getting line changes for the last commit
result = subprocess.check_output(["git", "diff", "HEAD", "HEAD~1"], text=True)
line_changes = result.strip()
except subprocess.CalledProcessError as e:
print(f"Error getting line changes: {e}")
return line_changes
def full_sec_scan(application_summary):
"""
This function sends a code snippet to OpenAI's API to check for security vulnerabilities.
"""
try:
response = client.chat.completions.create(
model="gpt-4-1106-preview", # Choose the appropriate engine
messages=[
{"role": "system", "content": "You are an application security expert, skilled in explaining complex programming vulnerabilities with simplicity. You will receive the full code for an application. Your task is to review the code for security vulnerabilities and suggest improvements. Don't overly focus on one file, and instead provide the top security concerns based on what you think the entire application is doing."},
{"role": "user", "content": application_summary}
]
)
message = response.choices[0].message.content
return message
except Exception as e:
return f"Error occurred: {e}"
def full_scan(directory):
"""
Scans all files in the specified directory holistically for security issues.
"""
application_summary = ""
for root, dirs, files in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
try:
with open(file_path, 'r') as f:
application_summary += f"\n\nFile: {file}\n"
application_summary += f.read()
except UnicodeDecodeError:
try:
with open(file_path, 'r', encoding='latin-1') as f:
application_summary += f"\n\nFile: {file}\n"
application_summary += f.read()
except Exception as e:
print(f"Error reading {file_path}: {e}")
result = full_sec_scan(application_summary)
return result
def partial_sec_scan(application_summary):
"""
This function sends a code snippet to OpenAI's API to check for security vulnerabilities.
"""
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106", # Choose the appropriate engine
messages=[
{"role": "system", "content": "You are an application security expert, skilled in explaining complex programming vulnerabilities with simplicity. You will receive changed code as part of a pull request, followed by the rest of the file. Your task is to review the code change for security vulnerabilities and suggest improvements. Pay attention to if the code is getting added or removed. Suggest specific code fixes where applicable."},
{"role": "user", "content": application_summary}
]
)
message = response.choices[0].message.content
return message
except Exception as e:
return f"Error occurred: {e}"
def github_scan(repo_name, pr_number, github_token):
"""
Scans files changed in the specified GitHub pull request holistically.
"""
g = Github(github_token)
repo = g.get_repo(repo_name)
pr = repo.get_pull(pr_number)
files = pr.get_files()
changes_summary = ""
for file in files:
changes_summary += f"\n\nFile: {file.filename}\n"
url = file.raw_url
response = requests.get(url)
if response.status_code == 200:
changes_summary += response.text
else:
print(f"Failed to fetch {file.filename}")
result = partial_sec_scan(changes_summary)
return result
def partial_scan_github(directory, base_ref, head_ref):
"""
Scans files changed locally and includes detailed line changes for security issues.
"""
changed_files = get_changed_files_github(directory, base_ref, head_ref)
line_changes = get_line_changes_github(directory, base_ref, head_ref)
changes_summary = "Detailed Line Changes:\n" + line_changes + "\n\nChanged Files:\n"
for file_path in changed_files:
if file_path:
try:
with open(file_path, 'r') as f:
changes_summary += f"\nFile: {file_path}\n"
changes_summary += f.read()
except UnicodeDecodeError:
try:
with open(file_path, 'r', encoding='latin-1') as f:
changes_summary += f"\nFile: {file_path}\n"
changes_summary += f.read()
except Exception as e:
print(f"Error reading {file_path}: {e}")
else:
print("No changed files to scan.")
return
if changes_summary:
result = partial_sec_scan(changes_summary)
return result
else:
return "No changed files to scan."
return result
else:
return "No changed files to scan."
def partial_scan(directory):
"""
Scans files changed locally and includes detailed line changes for security issues.
"""
changed_files = get_changed_files(directory)
if changed_files is None:
return "You haven't made any changes to test."
line_changes = get_line_changes(directory)
changes_summary = "Detailed Line Changes:\n" + line_changes + "\n\nChanged Files:\n"
for file_path in changed_files:
if file_path:
try:
with open(file_path, 'r') as f:
changes_summary += f"\nFile: {file_path}\n"
changes_summary += f.read()
except UnicodeDecodeError:
try:
with open(file_path, 'r', encoding='latin-1') as f:
changes_summary += f"\nFile: {file_path}\n"
changes_summary += f.read()
except Exception as e:
print(f"Error reading {file_path}: {e}")
else:
print("No changed files to scan.")
return
if changes_summary:
result = partial_sec_scan(changes_summary)
return result
else:
return "No changed files to scan."
def main():
"""
Main function to perform full or partial security scanning.
"""
if len(sys.argv) < 2:
print("Usage: python LAST.py.py <mode> [<directory>|<repo_name pr_number>]")
sys.exit(1)
mode = sys.argv[1]
if mode == 'full':
if len(sys.argv) < 3:
print("Usage for full scan: python LAST.py.py full <directory>")
sys.exit(1)
directory = sys.argv[2]
print(full_scan(directory))
elif mode == 'github':
if len(sys.argv) < 4:
print("Usage for partial scan: python LAST.py.py partial <repo_name> <pr_number>")
sys.exit(1)
repo_name = sys.argv[2]
pr_number = int(sys.argv[3])
github_token = os.environ.get('GITHUB_TOKEN')
print(github_scan(repo_name, pr_number, github_token))
elif mode == 'partial':
if len(sys.argv) < 3:
print("Usage for full scan: python LAST.py.py partial <directory>")
sys.exit(1)
directory = sys.argv[2]
print(partial_scan(directory))
elif mode == 'partial-github':
if len(sys.argv) < 3:
print("Usage for full scan: python LAST.py.py partial <directory>")
sys.exit(1)
directory = sys.argv[2]
base_ref = sys.argv[3]
head_ref = sys.argv[4]
print(partial_scan_github(directory, base_ref, head_ref))
else:
print("Invalid mode. Use 'full' or 'partial'.")
sys.exit(1)
if __name__ == "__main__":
main()
| [
"You are an application security expert, skilled in explaining complex programming vulnerabilities with simplicity. You will receive changed code as part of a pull request, followed by the rest of the file. Your task is to review the code change for security vulnerabilities and suggest improvements. Pay attention to if the code is getting added or removed. Suggest specific code fixes where applicable.",
"You are an application security expert, skilled in explaining complex programming vulnerabilities with simplicity. You will receive the full code for an application. Your task is to review the code for security vulnerabilities and suggest improvements. Don't overly focus on one file, and instead provide the top security concerns based on what you think the entire application is doing."
] |
2024-01-10 | dhruvramani/rl-safety-imagination | safely_train.py | # Inspired from OpenAI Baselines. This uses the same design of having an easily
# substitutable generic policy that can be trained. This allows to easily
# substitute in the I2A policy as opposed to the basic CNN one.
import copy
import argparse
import statistics
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from safe_grid_gym.envs.gridworlds_env import GridworldEnv
#from i2a import I2aPolicy
from utils import SubprocVecEnv
from discretize_env import CONTROLS
from a2c import CnnPolicy, get_actor_critic
from trajectory import *
ENV_NAME = "side_effects_sokoban"
N_ENVS = 1
N_STEPS = 9
END_REWARD = 49
S_ALPHAS = [0.1, 0.3, 1.0, 2.0, 3.0, 10.0, 30.0, 100.0, 300.0]
s_alpha = 2.0 #S_ALPHAS[4]
DEBUG = False
# For early stopping
EARLY_STOPPING = False
REW_HIST = 3
EARLY_STOP_THRESH = 1.5
# Total number of iterations (taking into account number of environments and
# number of steps). You wish to train for.
TOTAL_TIMESTEPS = int(500)
GAMMA = 0.99
LOG_INTERVAL = 100
SAVE_INTERVAL = 100
# Where you want to save the weights
SAVE_PATH = 'safe_a2c_weights/{:.1f}'.format(s_alpha)
def discount_with_dones(rewards, dones, GAMMA):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + GAMMA * r * (1.-done)
discounted.append(r)
return discounted[::-1]
def make_env():
def _thunk():
env = GridworldEnv(ENV_NAME)
return env
return _thunk
def a2c_safe_action(tree, action, base_state, actor_critic):
is_end = False
try :
next_node = tree.children[action[0]]
is_end = next_node.imagined_reward == END_REWARD
except AttributeError:
next_node = None
if(is_end == False and search_node(next_node, base_state) == False):
try:
action = safe_action(actor_critic, tree, base_state, action[0])
except:
pass
return action
def train(policy, save_name, s_alpha, load_count = 0, summarize=True, load_path=None, log_path = './logs', safety=True):
envs = make_env()() #for i in range(N_ENVS)]
#envs = SubprocVecEnv(envs)
with open("./unsafe_state_count_{}.txt".format(safety), "w+") as f:
pass
ob_space = envs.observation_space.shape
nc, nw, nh = ob_space
ac_space = envs.action_space
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
actor_critic = get_actor_critic(sess, N_ENVS, N_STEPS, ob_space,
ac_space, policy, summarize)
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (N_ENVS * N_STEPS, nw, nh, nc)
dones = False #for _ in range(N_ENVS)]
nbatch = N_STEPS # * N_Envs
episode_rewards = np.zeros((1, ))
final_rewards = np.zeros((1, ))
last_rews = [0] * REW_HIST
# Safety part
obs = envs.reset()
ob_np = obs.reshape(nc, nw, nh)
base_state = copy.deepcopy(ob_np).reshape(nc, nw, nh)
base_state[np.where(base_state == 2.0)] = 1.0
print(base_state)
base_tree = generate_tree(sess, ob_np)
for update in tqdm(range(load_count + 1, TOTAL_TIMESTEPS + 1)):
# mb stands for mini batch
unsafe_state_count = 0
tree = copy.deepcopy(base_tree)
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
for n in range(N_STEPS):
ob_np = obs.reshape(nc, nw, nh)
unsafe_state = ob_np.reshape(nw, nh)
x, y = np.where(unsafe_state == 4.0)
if(x == 3 and y == 2):
unsafe_state_count += 1
if(update % LOG_INTERVAL == 0 and DEBUG == True):
print("-- State ---")
print(ob_np)
print("-- Imagined State --")
print(tree.imagined_state.reshape(nc, nw, nh))
ac_ob = ob_np.reshape(1, nw, nh, nc)
actions, values, _ = actor_critic.act(ac_ob)
if(safety):
actions = a2c_safe_action(tree, actions, base_state, actor_critic)
mb_obs.append(ob_np)
mb_actions.append(actions[0])
mb_values.append(values)
mb_dones.append(dones)
if(update % LOG_INTERVAL == 0 and DEBUG == True):
print("Action : ", CONTROLS[actions[0]], " - Safe :", bool(safe[0])," - Done : ", dones[0])
_ = input("")
obs, rewards, dones, _ = envs.step(actions[0])
ob_np = ob_np.reshape(nc, nw, nh)
tree = get_node(base_tree, ob_np)
#rewards = [rewards[i] - s_alpha * (1 - safe[i]) for i in range(len(rewards))]
episode_rewards += rewards
masks = 1 - int(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
with open("./unsafe_state_count_{}.txt".format(safety), "a+") as f:
f.write("{}\n".format(unsafe_state_count))
unsafe_state_count = 0
mb_dones.append(dones)
obs = envs.reset()
tree = copy.deepcopy(base_tree)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).reshape(batch_ob_shape) #.swapaxes(1, 0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)#.swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32)#.swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32)#.swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool)#.swapaxes(1, 0)
mb_masks = mb_dones[:-1]
mb_dones = mb_dones[1:]
ac_ob = ob_np.reshape(1, nw, nh, nc)
last_values = actor_critic.critique(ac_ob).tolist()
#discount/bootstrap off value fn
#for n, (rewards, value) in enumerate(zip(mb_rewards, last_values)):
rewards = mb_rewards.tolist()
d = mb_dones.tolist()
value = last_values
if d[-1] == 0:
rewards = discount_with_dones(rewards+value, d+[0], GAMMA)[:-1]
else:
rewards = discount_with_dones(rewards, d, GAMMA)
mb_rewards = np.array(rewards)
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
if summarize:
loss, policy_loss, value_loss, policy_entropy, _, summary = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update,
summary_op)
writer.add_summary(summary, update)
else:
loss, policy_loss, value_loss, policy_entropy, _ = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update)
if update % LOG_INTERVAL == 0 or update == 1:
print('%i - %.1f => Policy Loss : %.4f, Value Loss : %.4f, Policy Entropy : %.4f, Final Reward : %.4f' % (update, s_alpha, policy_loss, value_loss, policy_entropy, final_rewards.mean()))
if(EARLY_STOPPING and update != 1 and abs(final_rewards.mean() - statistics.mean(last_rews)) < EARLY_STOP_THRESH):
print('Training done - Saving model')
actor_critic.save(SAVE_PATH, save_name + '_' + str(update) + '.ckpt')
with open("./logs_alpha.txt", "a+") as f:
f.write("{:.1f} - {:.4f}\n".format(s_alpha, max(last_rews)))
break
_ = last_rews.pop(0)
last_rews.append(final_rewards.mean())
if update % SAVE_INTERVAL == 0:
print('Saving model')
actor_critic.save(SAVE_PATH, save_name + '_' + str(update) + '.ckpt')
actor_critic.save(SAVE_PATH, save_name + '_done.ckpt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--algo', help='Algorithm to train a2c (or something else in the future)')
args = parser.parse_args()
if args.algo == 'a2c':
policy = CnnPolicy
else:
raise ValueError('Must specify the algo name as either a2c or (something else in the future)')
#for s_alpha in S_ALPHAS:
#tf.reset_default_graph()
train(policy, args.algo + "{:.1f}".format(s_alpha), s_alpha=s_alpha, summarize=True, log_path="safe_" + args.algo + '_logs/'+ "{:.1f}".format(s_alpha))
| [] |
2024-01-10 | dhruvramani/rl-safety-imagination | onpolicy_train.py | # Inspired from OpenAI Baselines. This uses the same design of having an easily
# substitutable generic policy that can be trained. This allows to easily
# substitute in the I2A policy as opposed to the basic CNN one.
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import argparse
from safe_grid_gym.envs.gridworlds_env import GridworldEnv
#from i2a import I2aPolicy
from utils import SubprocVecEnv
from a2c import CnnPolicy, get_actor_critic
ENV_NAME = "side_effects_sokoban"
N_ENVS = 16
N_STEPS = 5
# Total number of iterations (taking into account number of environments and
# number of steps). You wish to train for.
TOTAL_TIMESTEPS = int(2e6)
GAMMA=0.99
LOG_INTERVAL=100
SAVE_INTERVAL = 100
# Where you want to save the weights
SAVE_PATH = 'weights'
def discount_with_dones(rewards, dones, GAMMA):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + GAMMA * r * (1.-done)
discounted.append(r)
return discounted[::-1]
def make_env():
def _thunk():
env = GridworldEnv(ENV_NAME)
return env
return _thunk
def train(policy, save_name, load_count = 0, summarize=True, load_path=None, log_path = './logs'):
envs = [make_env() for i in range(N_ENVS)]
envs = SubprocVecEnv(envs)
ob_space = envs.observation_space.shape
nc, nw, nh = ob_space
ac_space = envs.action_space
obs = envs.reset()
ob_np = np.copy(obs)
ob_np = np.squeeze(ob_np, axis=1)
ob_np = np.expand_dims(ob_np, axis=3)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
actor_critic = get_actor_critic(sess, N_ENVS, N_STEPS, ob_space,
ac_space, policy, summarize)
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (N_ENVS * N_STEPS, nw, nh, nc)
dones = [False for _ in range(N_ENVS)]
nbatch = N_ENVS * N_STEPS
episode_rewards = np.zeros((N_ENVS, ))
final_rewards = np.zeros((N_ENVS, ))
for update in tqdm(range(load_count + 1, TOTAL_TIMESTEPS + 1)):
# mb stands for mini batch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
for n in range(N_STEPS):
ob_np = np.copy(obs)
ob_np = np.squeeze(ob_np, axis=1)
ob_np = np.expand_dims(ob_np, axis=3)
actions, values, _ = actor_critic.act(ob_np)
mb_obs.append(ob_np)
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, _ = envs.step(actions)
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_dones.append(dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).reshape(batch_ob_shape) #.swapaxes(1, 0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(ob_np).tolist()
#discount/bootstrap off value fn
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], GAMMA)[:-1]
else:
rewards = discount_with_dones(rewards, d, GAMMA)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
if summarize:
loss, policy_loss, value_loss, policy_entropy, _, summary = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update,
summary_op)
writer.add_summary(summary, update)
else:
loss, policy_loss, value_loss, policy_entropy, _ = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update)
if update % LOG_INTERVAL == 0 or update == 1:
print('%i => Policy Loss : %.4f, Value Loss : %.4f, Policy Entropy : %.4f, Final Reward : %.4f' % (update, policy_loss, value_loss, policy_entropy, final_rewards.mean()))
if update % SAVE_INTERVAL == 0:
print('Saving model')
actor_critic.save(SAVE_PATH, save_name + '_' + str(update) + '.ckpt')
actor_critic.save(SAVE_PATH, save_name + '_done.ckpt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--algo', help='Algorithm to train a2c (or something else in the future)')
args = parser.parse_args()
if args.algo == 'a2c':
policy = CnnPolicy
else:
raise ValueError('Must specify the algo name as either a2c or (something else in the future)')
train(policy, args.algo, summarize=True, log_path=args.algo + '_logs')
| [] |
2024-01-10 | catherineruoxiwu/MindTech | evaluation~llm_eval.py | import json
import os
import matplotlib.pyplot as plt
import numpy as np
from openai import OpenAI
client = OpenAI()
directory_path = "./out"
exclude = ["gpt3.5-turbo.json"]
num_questions = 99
rubric_criteria = ["Establishing_Rapport", "Active_Listening", "Verbal_Communication", "Assessment_and_Goal_Setting", "Interventions_and_Techniques", "Boundaries_and_Ethics", "Cultural_Competence", "User_Experience"]
eval_rubric = """**Therapist Bot Conversation Evaluation Rubric**
1. Establishing_Rapport (1-5)
- Demonstrates an ability to establish trust and rapport with the user.
- Creates a safe and non-judgmental virtual environment.
- Uses welcoming and empathetic language.
2. Active_Listening (1-5)
- Demonstrates active listening skills through appropriate responses.
- Responds with empathy and validation.
- Asks clarifying questions and encourages the user to share more.
3. Verbal_Communication (1-5)
- Uses clear and concise language.
- Manages pacing and tone appropriately.
4. Assessment_and_Goal_Setting (1-5)
- Gathers user information effectively to understand their needs and goals.
- Collaboratively sets clear and achievable goals for the conversation.
- Provides information about the therapy process.
5. Interventions_and_Techniques (1-5)
- Applies appropriate therapeutic interventions based on the user's input.
- Adapts responses to the user's changing emotional states and needs.
- Provides useful coping strategies or resources when appropriate.
6. Boundaries_and_Ethics (1-5)
- Maintains appropriate boundaries and confidentiality.
- Demonstrates an understanding of and adherence to ethical guidelines for AI therapy.
- Addresses potential ethical issues, such as user privacy and data security.
7. Cultural_Competence (1-5)
- Shows cultural sensitivity and awareness in responses.
- Adapts responses to consider the user's cultural background or preferences.
- Addresses diversity-related issues respectfully.
8. User_Experience (1-5)
- Provides a user-friendly and engaging experience.
- Promotes a sense of comfort and ease during the conversation.
- Encourages the user to return for further interactions.
- Monitors user feedback and adjusts responses accordingly.
- Measures user satisfaction and perceived benefits.
- Demonstrates improvements in user well-being or mental health.
10. Overall Score (1-5)
- Average the scores from each category to determine the overall rating for the therapist bot's conversation.
"""
def evaluate_QA(qa_pairs):
"""
Args:
qa_pairs: list of objects {"model_name": ..., "question": ..., "answer": ...}
"""
num_models = len(qa_pairs)
role_prompt = f"""Please act as an impartial judge and evaluate the quality of the responses provided by {num_models} AI psychiatrist models to the client's complaint or questions. You will only be given the first few sentences of the conversation.
Your evaluation should be based solely on the consultation rubric provided at the end, titled "Therapist Bot Conversation Evaluation Rubric". You cannot solely judge the quality based on "whether or not more advice or suggestions are given". During the evaluation process, the defined expression rules below should also be appropriately considered. For each of the {num_models} question-answer pairs, produce separate evaluation rubric results. After evaluating the {num_models} models, decide which AI psychiatrist model is the best model.
Your response should be in JSON format. The output JSON format should be:
{{"rubric_results": {{INSERT_MODEL_NAME_HERE: INSERT_RUBRIC_RESULT_HERE, ...}},
"best_model_idx": INSERT_INDEX_OF_BEST_MODEL,
"best_model_name": INSERT_INDEX_OF_BEST_MODEL }}
The keys in the rubric result should be {str(rubric_criteria)}.
{eval_rubric}
"""
eval_prompt = ""
for i, qa_pair in enumerate(qa_pairs):
model_name, question, answer = qa_pair["model_name"], qa_pair["question"], qa_pair["answer"]
eval_prompt += f"AI psychiatrist model #{i}:\n\tModel name: {model_name}\n\tQuestion: {question}\n\tAnswer: {answer}\n\n"
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": role_prompt},
{"role": "user", "content": eval_prompt}
],
response_format={"type": "json_object"}
)
return json.loads(completion.choices[0].message.content)
def evaluate_models(num_questions, model_results):
"""
Args:
model_results: list of {"model_name": ...,
"1": {"question": ..., "answer": ...},
"2": {"question": ..., "answer": ...}, ...}
"""
eval_stat = {}
model_names = [model["model_name"] for model in model_results]
for name in model_names:
eval_stat[name] = {
"num_best": 0,
}
for criterion in rubric_criteria:
eval_stat[name][criterion] = 0
for i in range(1, num_questions + 1):
qa_pairs = []
for model in model_results:
pair = model[str(i)]
pair["model_name"] = model["model_name"]
qa_pairs.append(pair)
eval_res = evaluate_QA(qa_pairs)
print(json.dumps(eval_res, indent=4))
best_model = eval_res["best_model_name"]
if best_model in model_names:
eval_stat[eval_res["best_model_name"]]["num_best"] += 1
for model_name, res in eval_res["rubric_results"].items():
for criterion in rubric_criteria:
eval_stat[model_name][criterion] += res[criterion]
# Compute the average evaluation scores
for name in model_names:
for criterion in rubric_criteria:
eval_stat[name][criterion] /= num_questions
print(json.dumps(eval_stat, indent=4))
return eval_stat
def plot_evaluation_chart(eval_stat):
"""
Takes a dictionary of evaluation statistics and plots a bar chart.
Args:
eval_stat (dict): A dictionary containing evaluation statistics for various models.
Returns:
None: This function plots a bar chart.
"""
# Extracting categories and scores for each model
categories = list(eval_stat[next(iter(eval_stat))].keys())[1:] # Excluding 'num_best'
models = list(eval_stat.keys())
scores = {model: [eval_stat[model][cat] for cat in categories] for model in models}
# Number of categories
n_categories = len(categories)
# X locations for the groups
ind = np.arange(n_categories)
width = 0.25 # the width of the bars
# Plotting
fig, ax = plt.subplots(figsize=(12, 8))
# Bar charts
for i, model in enumerate(models):
ax.bar(ind + i*width, scores[model], width, label=model)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Comparison of Scores by Category and Model')
ax.set_xticks(ind + width)
ax.set_xticklabels(categories, rotation=45, ha="right")
ax.legend()
# Show the plot
plt.tight_layout()
plt.savefig('eval_result.png')
def parse_input_files(exclude):
model_results = []
idx_name = []
for filename in os.listdir(directory_path):
if filename.endswith(".json") and filename not in exclude:
model_name = os.path.splitext(os.path.basename(filename))[0]
file_path = os.path.join(directory_path, filename)
with open(file_path, "r") as file:
model_result = json.load(file)
model_result["model_name"] = model_name
idx_name.append(model_name)
model_results.append(model_result)
return idx_name, model_results
if __name__ == "__main__":
idx_name, model_results = parse_input_files(exclude)
eval_stat = evaluate_models(num_questions, model_results)
plot_evaluation_chart(eval_stat)
| [
"Please act as an impartial judge and evaluate the quality of the responses provided by PLACEHOLDER AI psychiatrist models to the client's complaint or questions. You will only be given the first few sentences of the conversation.\n Your evaluation should be based solely on the consultation rubric provided at the end, titled \"Therapist Bot Conversation Evaluation Rubric\". You cannot solely judge the quality based on \"whether or not more advice or suggestions are given\". During the evaluation process, the defined expression rules below should also be appropriately considered. For each of the PLACEHOLDER question-answer pairs, produce separate evaluation rubric results. After evaluating the PLACEHOLDER models, decide which AI psychiatrist model is the best model.\n\n Your response should be in JSON format. The output JSON format should be:\n {\"rubric_results\": {INSERT_MODEL_NAME_HERE: INSERT_RUBRIC_RESULT_HERE, ...},\n \"best_model_idx\": INSERT_INDEX_OF_BEST_MODEL,\n \"best_model_name\": INSERT_INDEX_OF_BEST_MODEL }\n The keys in the rubric result should be PLACEHOLDER.\n \n PLACEHOLDER\n ",
"AI psychiatrist model #PLACEHOLDER:\n\tModel name: PLACEHOLDER\n\tQuestion: PLACEHOLDER\n\tAnswer: PLACEHOLDER\n\n"
] |
2024-01-10 | twerkinghamtori/kb_ai_challenge | project~data~data_generator.py | # 라이브러리 호출 파트
import pandas as pd
import numpy as np
import openai
from tqdm import tqdm
import json
import csv
#################################입력해야 실행 가능####################################
openai.api_key = "<API KEY>"
csv_file_path = '<loaninfo_data_path>'
######################################################################################
# 데이터 호출
info_df = pd.read_csv(csv_file_path, encoding='cp949')
# df에 결측치가 포함되었을때 True를 반환
def isnan(number,column):
is_nan = pd.isna(info_df.loc[number, column])
return is_nan
# 필요항목 추출
result_tray={}
for index in range(len(info_df)):
a,b,c,d,e,f,g,h,i = "","","","","","","","",""
if isnan(index, '대출상품내용') ==False:
a='대출상품내용: '+info_df.loc[index]['대출상품내용']
if isnan(index, '최대금액') ==False:
b='최대금액: '+str(info_df.loc[index]['최대금액'])
if isnan(index, '대상') ==False:
c='대상: '+info_df.loc[index]['대상']
if isnan(index, '상품설명') ==False:
d= '상품설명: '+info_df.loc[index]['상품설명']
if isnan(index, '대상상세') ==False:
e= '대상상세: '+str(info_df.loc[index]['대상상세'])
if isnan(index, '금리') ==False:
f= '금리: '+str(info_df.loc[index]['금리'])
if isnan(index, '대출한도') ==False:
g= '대출한도: '+str(info_df.loc[index]['대출한도'])
if isnan(index, '상환방법') ==False:
h= '상환방법: '+str(info_df.loc[index]['상환방법'])
if isnan(index, '상환방법상세') ==False:
i= '상환방법상세: '+str(info_df.loc[index]['상환방법상세'])
result = ""
result += a + "\n" if a else ""
result += b + "\n" if b else ""
result += c + "\n" if c else ""
result += d + "\n" if d else ""
result += e + "\n" if e else ""
result += f + "\n" if f else ""
result += g + "\n" if g else ""
result += h + "\n" if h else ""
result += i + "\n" if i else ""
result_tray[index]=result
# Chat GPT를 사용한 프롬포트 생성
for index in tqdm(range(len(result_tray))):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "주어지는 대출 상품을 이용할 것 같은 사람들의 특징을 5개의 예시를 들어줘.앞뒤 불필요한 말 붙이지 말고 답만 내놔."},
{"role": "user", "content": result_tray[index]}
]
)
result = completion.choices[0].message
json_file_path = "C:\\Users\\mhkim\\Desktop\\coding\\samples.json"
with open(json_file_path, "r") as json_file:
data = json.load(json_file) # JSON 파일 내용 읽기
data[index] = result["content"]
with open(json_file_path, "w", encoding="utf-8") as json_file:
json.dump(data, json_file, ensure_ascii=False, indent=4)
except:
print(f"{index} error")
pass
| [
"주어지는 대출 상품을 이용할 것 같은 사람들의 특징을 5개의 예시를 들어줘.앞뒤 불필요한 말 붙이지 말고 답만 내놔."
] |
2024-01-10 | twerkinghamtori/kb_ai_challenge | project~backend~application.py | from transformers import BertTokenizer, BertModel
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import openai # openai 모듈 import
import sys
from flask import Flask, render_template, request
from project.model import main
application = Flask(__name__)
#################################입력해야 실행 가능####################################
openai.api_key = "<API KEY>"
#메인 페이지
@application.route("/")
def main_page():
return render_template("main_page.html")
#정보 입력 페이지
@application.route("/info")
def info():
return render_template("info.html")
#결과 페이지
@application.route("/report", methods=['POST', 'GET'])
def report():
if request.method == 'POST':
data = request.form
name = data.get('name') #이름
age = data.get('age') #나이
family = data.getlist('family') #가족구성원(부양가족)
family_members = ', '.join(family)
job = data.get('job') #종사직
loan = data.get('loan') #기존대출여부
loan_status = "없" if loan == "n" else "있"
query_sentence1 = f"나이는 {age}세인 {job}인이고 가족 구성원은 {family_members}인"
query_sentence2 = f"기존대출내역은 {loan_status}고, "
if loan == "y":
loan_name = data.get('loan_name') #상품명
loan_time = data.get('loan_time') #상환 잔여 기간
loan_rate = data.get('loan_rate') #현재 이자율
query_sentence2 += f" 상품명은 {loan_name}, 상환 잔여 기간은 {loan_time}개월, 현재 이자율은 {loan_rate}%인"
asset = data.get('asset') #자산규모
others = data.get('others') #기타 특이사항
query_sentence3 = f"자산규모는 {asset}원이고, 기타 특이사항은 {others}인 {job}인"
job_query = f"{job}인"
# AI 모델 함수 실행
results, titles = main.generate_report(query_sentence1, query_sentence2, query_sentence3)
# 기사 추천 결과 생성
news_title, news_link = main.generate_newslist(job_query)
return render_template("report.html", name=name, title=titles, result=results, news_titles=news_title, news_links=news_link)
return render_template('report.html', name=name, title=titles, result=results, news_titles=news_title, news_links=news_link)
if __name__ == "__main__" :
application.run(host="0.0.0.0", port=9900)
| [] |
2024-01-10 | twerkinghamtori/kb_ai_challenge | project~data~data_preprocessor.py | # 라이브러리 호출 파트
import pandas as pd
import numpy as np
import openai
from tqdm import tqdm
import json
import csv
#################################입력해야 실행 가능####################################
json_file_path = "<sample.json_path>"
csv_file = "<feature_data_path>"
######################################################################################
# json 파일 로드
with open(json_file_path, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
# 생성 데이터 전처리
frame = {}
for index in range(len(data)):
tray=[]
a=data[f"{index}"].split("2.")
b=a[-1].split("3.")
c=b[-1].split("4.")
d=c[-1].split("5.")
tray.append(a[0][3:].replace("\n",""))
tray.append(b[0].replace("\n",""))
tray.append(c[0].replace("\n",""))
tray.append(d[0].replace("\n",""))
tray.append(d[-1].replace("\n",""))
for sample in tray:
frame[sample]=index
# 전처리된 데이터 저장
with open(csv_file, mode="w", newline="", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(['대상', '상품'])
for category, value in frame.items():
writer.writerow([category, value])
| [] |
2024-01-10 | developers221/dddddd | ix~chains~fixture_src~text_splitter.py | from langchain.text_splitter import TextSplitter, RecursiveCharacterTextSplitter
from ix.api.components.types import NodeTypeField
from ix.chains.fixture_src.parsers import LANGUAGE
from ix.chains.fixture_src.targets import DOCUMENT_LOADER_TARGET
RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH = (
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_language"
)
RECURSIVE_CHARACTER_SPLITTER = {
"class_path": RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH,
"type": "text_splitter",
"name": "RecursiveCharacterTextSplitter",
"description": RecursiveCharacterTextSplitter.__doc__,
"fields": [LANGUAGE]
+ NodeTypeField.get_fields(
TextSplitter.__init__,
include=[
"chunk_size",
"chunk_overlap",
"keep_separator",
"add_start_index",
],
),
"connectors": [DOCUMENT_LOADER_TARGET],
}
TEXT_SPLITTERS = [RECURSIVE_CHARACTER_SPLITTER]
__all__ = ["TEXT_SPLITTERS", "RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH"]
| [] |
2024-01-10 | developers221/dddddd | ix~chains~artifacts.py | import asyncio
import logging
import json
from typing import Dict, List, Optional
from uuid import uuid4
from jsonpath_ng import parse as jsonpath_parse
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
from ix.chains.callbacks import IxHandler
from langchain.chains.base import Chain
from ix.commands.filesystem import write_to_file, awrite_to_file
from ix.task_log.models import Artifact, TaskLogMessage
logger = logging.getLogger(__name__)
class SaveArtifact(Chain):
"""
Save an artifact to the database.
This chain is used to save an artifact to the database. It can be used to
save an artifact that was generated by a prior step in the chain, or to
save an artifact from an arbitrary object type.
To save an artifact that was generated by a prior step in the chain, set
the `artifact_from_key` to the key of the artifact in the input.
To save an artifact from an arbitrary object type, set the `artifact_key`
`artifact_storage` is always set from the config for now. The artifact storage
is used to determine how the artifact is stored. For example, if the storage
is set to `write_to_file`, the artifact will be stored in the filesystem.
`artifact_type` is used to determine how the artifact may be used and displayed.
This property must be set in the config.
"""
# indicates artifact is available in the input
artifact_from_key: str = None
# values to use to create an artifact
artifact_key: str = None
artifact_type: str = None
artifact_name: str = None
artifact_description: str = None
artifact_storage: str = None
artifact_storage_id: str = None
artifact_storage_id_key: str = None
# intput / output mapping
content_key: str = "content"
content_path: str = None
output_key: str = "artifact_id"
@property
def _chain_type(self) -> str:
return "ix.save_artifact" # pragma: no cover
@property
def input_keys(self) -> List[str]:
keys = []
if self.artifact_from_key:
keys.append(self.artifact_from_key)
return keys
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
ix_handler = IxHandler.from_manager(run_manager)
if self.artifact_from_key:
# load artifact from input key. Use this when a prior step
# generated the artifact object
jsonpath_expr = jsonpath_parse(self.artifact_from_key)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"SaveArtifact could not find input at {self.artifact_from_key} "
f"searched: {inputs}"
)
artifact = json_matches[0].value.copy()
else:
# generating an artifact using only the config
# use this when the artifact is generated in this step
artifact = {
"key": self.artifact_key,
"name": self.artifact_name,
"description": self.artifact_description,
"identifier": f"{self.artifact_key}_{ix_handler.parent_think_msg.id}",
}
# Storage is always set from the config for now
storage_id = None
if self.artifact_storage:
storage_id_key = self.artifact_storage_id_key or "identifier"
if not self.artifact_storage_id and storage_id_key not in artifact:
raise ValueError(
f"SaveArtifact requires artifact_storage_id or artifact.{storage_id_key} "
f"when artifact_storage is set.\n"
f"\n"
f"artifact={artifact}"
)
storage_id = self.artifact_storage_id or artifact[storage_id_key]
artifact["storage"] = {
"type": self.artifact_storage,
"id": storage_id,
}
if self.artifact_type:
artifact["artifact_type"] = self.artifact_type
# extract content from input
# default path to the content key.
jsonpath_input = self.content_path or self.content_key
jsonpath_expr = jsonpath_parse(jsonpath_input)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"SaveArtifact could not find input at {jsonpath_input} for {inputs}"
)
content = json_matches[0].value
# Associate the artifact with the parent task (chat) until
# frontend API call can include artifacts from any descendant
# of the Chat's task.
task = ix_handler.task
artifact_task_id = task.parent_id if task.parent_id else task.id
# build kwargs
try:
artifact_kwargs = dict(
key=artifact.get("key", None) or storage_id,
name=artifact.get("name", None) or storage_id,
description=artifact["description"],
artifact_type=artifact["artifact_type"],
storage=artifact["storage"],
)
except KeyError as e:
raise ValueError(f"SaveArtifact missing required key {e} for {artifact}")
# save to artifact storage
artifact = Artifact.objects.create(
task_id=artifact_task_id,
**artifact_kwargs,
)
ix_handler = IxHandler.from_manager(run_manager)
# send message to log
TaskLogMessage.objects.create(
role="ASSISTANT",
task=ix_handler.task,
agent=ix_handler.agent,
parent=ix_handler.parent_think_msg,
content={
"type": "ARTIFACT",
"artifact_type": artifact.artifact_type,
"artifact_id": str(artifact.id),
"storage": artifact.storage,
"description": artifact.description,
# TODO: store on message until frontend has subscription to artifacts
"data": content,
},
)
# write to storage (i.e. file, database, or a cache)
if self.artifact_storage == "write_to_file":
file_path = artifact.storage["id"]
logger.debug(f"writing content to file file_path={file_path} {content}")
if not isinstance(content, str):
content = json.dumps(content)
write_to_file(file_path, content)
return {self.output_key: str(artifact.id)}
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
ix_handler = IxHandler.from_manager(run_manager)
if self.artifact_from_key:
# load artifact from input key. Use this when a prior step
# generated the artifact object
jsonpath_expr = jsonpath_parse(self.artifact_from_key)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"SaveArtifact could not find input at {self.artifact_from_key} "
f"searched: {inputs}"
)
artifact = json_matches[0].value.copy()
else:
# generating an artifact using only the config
# use this when the artifact is generated in this step
artifact = {
"key": self.artifact_key,
"name": self.artifact_name,
"description": self.artifact_description,
"identifier": f"{self.artifact_key}_{ix_handler.parent_think_msg.id}",
}
# Storage is always set from the config for now
storage_id = None
if self.artifact_storage:
storage_id_key = self.artifact_storage_id_key or "identifier"
if not self.artifact_storage_id and storage_id_key not in artifact:
raise ValueError(
f"SaveArtifact requires artifact_storage_id or artifact.{storage_id_key} "
f"when artifact_storage is set.\n"
f"\n"
f"artifact={artifact}"
)
storage_id = self.artifact_storage_id or artifact[storage_id_key]
artifact["storage"] = {
"type": self.artifact_storage,
"id": storage_id,
}
if self.artifact_type:
artifact["artifact_type"] = self.artifact_type
# extract content from input
# default path to the content key.
jsonpath_input = self.content_path or self.content_key
jsonpath_expr = jsonpath_parse(jsonpath_input)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"SaveArtifact could not find input at {jsonpath_input} for {inputs}"
)
content = json_matches[0].value
# Associate the artifact with the parent task (chat) until
# frontend API call can include artifacts from any descendant
# of the Chat's task.
task = ix_handler.task
artifact_task_id = task.parent_id if task.parent_id else task.id
# build kwargs
try:
artifact = Artifact(
id=uuid4(),
task_id=artifact_task_id,
key=artifact.get("key", None) or storage_id,
name=artifact.get("name", None) or storage_id,
description=artifact["description"],
artifact_type=artifact["artifact_type"],
storage=artifact["storage"],
)
except KeyError as e:
raise ValueError(f"SaveArtifact missing required key {e} for {artifact}")
# save to artifact storage
# send message to log
save_artifact = artifact.asave()
msg = TaskLogMessage.objects.acreate(
role="ASSISTANT",
task=ix_handler.task,
agent=ix_handler.agent,
parent=ix_handler.parent_think_msg,
content={
"type": "ARTIFACT",
"artifact_type": artifact.artifact_type,
"artifact_id": str(artifact.id),
"storage": artifact.storage,
"description": artifact.description,
# TODO: store on message until frontend has subscription to artifacts
"data": content,
},
)
tasks = [save_artifact, msg]
# write to storage (i.e. file, database, or a cache)
if self.artifact_storage == "write_to_file":
file_path = artifact.storage["id"]
logger.debug(f"writing content to file file_path={file_path} {content}")
if not isinstance(content, str):
content = json.dumps(content)
write_file = awrite_to_file(file_path, content)
tasks.append(write_file)
await asyncio.gather(*tasks)
return {self.output_key: str(artifact.id)}
| [] |
2024-01-10 | developers221/dddddd | ix~chains~tests~components~test_vectorstores.py | import pytest
from langchain.vectorstores import Chroma, VectorStore
from ix.chains.fixture_src.document_loaders import GENERIC_LOADER_CLASS_PATH
from ix.chains.fixture_src.text_splitter import RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH
from ix.chains.fixture_src.vectorstores import (
CHROMA_CLASS_PATH,
)
from ix.chains.tests.test_config_loader import (
EMBEDDINGS,
TEXT_SPLITTER,
LANGUAGE_PARSER,
)
DOCUMENT_LOADER_EMPTY = {
"class_path": GENERIC_LOADER_CLASS_PATH,
"config": {
"parser": LANGUAGE_PARSER,
"path": "/var/doesnotexist",
"suffixes": [".does.not.exist"],
"glob": "doesnotexist",
},
}
TEXT_SPLITTER_EMPTY = {
"class_path": RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH,
"config": {"language": "python", "document_loader": DOCUMENT_LOADER_EMPTY},
}
TEST_TEXTS = [
"def foo1():\n print('hello world foo1')",
"def foo2():\n print('hello world foo2')",
"def bar3():\n print('hello world bar3')",
"def bar4():\n print('hello world bar4')",
"def bar5():\n print('hello world bar5')",
]
TEXT_KWARGS = {
"texts": TEST_TEXTS,
"ids": ["foo1", "foo2", "bar3", "bar4", "bar5"],
"metadatas": [{"foo": "bar"}] * len(TEST_TEXTS),
}
class VectorStoreTestMixin:
"""Test loading retrieval components.
This is a test of loading mechanism for the various retrieval components.
It is not an exhaustive test that all retrieval components work as expected.
The tests verify that any special loading logic for the components is working.
"""
CLASS = None
CONFIG = None
CONFIG_WITH_DOCUMENTS = None
CONFIG_WITH_EMPTY_DOCUMENTS = None
async def test_load_vectorstore(self, aload_chain, mock_openai_embeddings):
vectorstore: VectorStore = await aload_chain(self.CONFIG)
assert isinstance(vectorstore, self.CLASS)
ids = await vectorstore.aadd_texts(**TEXT_KWARGS)
results = await vectorstore.asearch("foo", "similarity")
assert len(results) == 4
assert results[0].metadata["foo"] == "bar"
vectorstore.delete(ids)
vectorstore.delete_collection()
async def test_load_vectorstore_with_document_source(
self, mock_import_class, aload_chain, mock_openai_embeddings
):
vectorstore: VectorStore = await aload_chain(self.CONFIG_WITH_DOCUMENTS)
assert isinstance(vectorstore, self.CLASS)
ids = await vectorstore.aadd_texts(**TEXT_KWARGS)
results = await vectorstore.asearch("foo", "similarity")
assert len(results) == 4
vectorstore.delete(ids)
vectorstore.delete_collection()
async def test_load_vectorstore_with_empty_document_source(
self, aload_chain, mock_openai_embeddings
):
vectorstore: VectorStore = await aload_chain(self.CONFIG_WITH_EMPTY_DOCUMENTS)
assert isinstance(vectorstore, self.CLASS)
ids = await vectorstore.aadd_texts(**TEXT_KWARGS)
results = await vectorstore.asearch("foo", "similarity")
assert len(results) == 4
assert results[0].metadata["foo"] == "bar"
vectorstore.delete(ids)
vectorstore.delete_collection()
CHROMA_VECTORSTORE_WITH_EMPTY_DOCUMENTS = {
"class_path": CHROMA_CLASS_PATH,
"config": {
"embedding": EMBEDDINGS,
"documents": TEXT_SPLITTER_EMPTY,
"collection_name": "tests",
},
}
CHROMA_VECTORSTORE_WITH_DOCUMENTS = {
"class_path": CHROMA_CLASS_PATH,
"config": {
"embedding": EMBEDDINGS,
"documents": TEXT_SPLITTER,
"collection_name": "tests",
},
}
CHROMA_VECTORSTORE = {
"class_path": CHROMA_CLASS_PATH,
"config": {
"embedding": EMBEDDINGS,
"collection_name": "tests",
},
}
@pytest.mark.django_db
class TestChroma(VectorStoreTestMixin):
"""Test Chroma vectorstore component."""
CLASS = Chroma
CONFIG = CHROMA_VECTORSTORE
CONFIG_WITH_DOCUMENTS = CHROMA_VECTORSTORE_WITH_DOCUMENTS
CONFIG_WITH_EMPTY_DOCUMENTS = CHROMA_VECTORSTORE_WITH_EMPTY_DOCUMENTS
| [] |
2024-01-10 | developers221/dddddd | ix~chains~loaders~core.py | import itertools
import logging
import time
from typing import Callable, Any, List
from ix.chains.loaders.context import IxContext
from langchain.chains import SequentialChain
from langchain.chains.base import Chain as LangchainChain
from ix.chains.loaders.prompts import load_prompt
from ix.chains.models import NodeType, ChainNode, ChainEdge
from ix.utils.importlib import import_class
import_node_class = import_class
logger = logging.getLogger(__name__)
def get_node_loader(name: str) -> Callable:
"""
Get a node config loader by node type.
Used to manipulate a nodes config before loading it
"""
from ix.chains.loaders.memory import load_memory_config
from ix.chains.loaders.memory import load_chat_memory_backend_config
return {
"memory": load_memory_config,
"memory_backend": load_chat_memory_backend_config,
"prompt": load_prompt,
}.get(name, None)
def get_property_loader(name: str) -> Callable:
"""Get a property loader.
Used to customize loading a property by key.
"""
from ix.chains.loaders.memory import load_memory_property
from ix.chains.loaders.retriever import load_retriever_property
return {
"memory": load_memory_property,
"retriever": load_retriever_property,
}.get(name, None)
def get_node_initializer(node_type: str) -> Callable:
"""Get a node initializer
Fetches a custom initializer to be used instead of the class initializer.
Used to add shims around specific types of nodes.
"""
from ix.chains.loaders.text_splitter import initialize_text_splitter
from ix.chains.loaders.vectorstore import initialize_vectorstore
return {
"text_splitter": initialize_text_splitter,
"vectorstore": initialize_vectorstore,
}.get(node_type, None)
def get_sequence_inputs(sequence: List[LangchainChain]) -> List[str]:
"""Aggregate all inputs for a list of chains"""
input_variables = set()
output_variables = set()
for sequence_chain in sequence:
# Intermediate outputs are excluded from input_variables.
# Filter out any inputs that are already in the output variables
filtered_inputs = set(sequence_chain.input_keys) - output_variables
input_variables.update(filtered_inputs)
output_variables.update(sequence_chain.output_keys)
return list(input_variables)
def load_node(node: ChainNode, context: IxContext, root=True) -> Any:
"""
Generic loader for loading the Langchain component a ChainNode represents.
This loader will load the component and its config, and then recursively
load any properties that are attached to the node. The loader also handles
recursively loading any child nodes that are attached to the node.
"""
logger.debug(f"Loading chain for name={node.name} class_path={node.class_path}")
start_time = time.time()
node_type: NodeType = node.node_type
config = node.config.copy() if node.config else {}
# resolve secrets and settings
# TODO: implement resolve secrets from vault and settings from vocabulary
# neither of these subsystems are implemented yet. For now load all
# values as text from config dict
# load type specific config options. This is generally for loading
# ix specific features into the config dict
if node_loader := get_node_loader(node_type.type):
logger.debug(
f"Loading config with node config loader for type={node_type.type}"
)
config = node_loader(node, context)
# prepare properties for loading. Properties should be grouped by key.
properties = node.incoming_edges.filter(relation="PROP").order_by("key")
for group in itertools.groupby(properties, lambda x: x.key):
key, edges = group
node_group = [edge.source for edge in edges]
logger.debug(f"Loading property key={key} node_group={node_group}")
# choose the type the incoming connection is processed as. If the source node
# will be converted to another type, use the as_type defined on the connection
# this allows a single property loader to encapsulate any necessary conversions.
# e.g. retriever converting Vectorstore.
connector = node_type.connectors_as_dict[key]
as_type = connector.get("as_type", None) or node_group[0].node_type.type
if node_group[0].node_type.type in {"chain", "agent"}:
# load a sequence of linked nodes into a children property
# this supports loading as a list of chains or auto-SequentialChain
first_instance = load_node(node_group[0], context, root=False)
sequence = load_sequence(node_group[0], first_instance, context)
if connector.get("auto_sequence", True):
input_variables = get_sequence_inputs(sequence)
config[key] = SequentialChain(
chains=sequence, input_variables=input_variables
)
else:
config[key] = sequence
elif property_loader := get_property_loader(as_type):
# load type specific config options. This is generally for loading
# ix specific features into the config dict
logger.debug(f"Loading with property loader for type={node_type.type}")
config[key] = property_loader(node_group, context)
else:
# default recursive loading
if connector.get("multiple", False):
config[key] = [
prop_node.load(context, root=False) for prop_node in node_group
]
else:
if len(node_group) > 1:
raise ValueError(f"Multiple values for {key} not allowed")
config[key] = load_node(node_group[0], context, root=False)
# load component class and initialize. A type specific initializer may be used here
# for initialization common to all components of that type.
node_class = import_node_class(node.class_path)
node_initializer = get_node_initializer(node_type.type)
try:
if node_initializer:
instance = node_initializer(node.class_path, config)
else:
instance = node_class(**config)
except Exception:
logger.error(f"Exception loading node class={node.class_path}")
raise
logger.debug(f"Loaded node class={node.class_path} in {time.time() - start_time}s")
if node_type.type in {"chain"} and root:
# Linked chains but no parent indicates the possible first node in an
# implicit SequentialChain. Traverse the sequence and create a
# SequentialChain if there is more than one node in the sequence.
sequential_nodes = load_sequence(node, instance, context)
if len(sequential_nodes) > 1:
input_variables = get_sequence_inputs(sequential_nodes)
return SequentialChain(
chains=sequential_nodes, input_variables=input_variables
)
return instance
def load_sequence(
first_node: ChainNode,
first_instance: LangchainChain,
context: IxContext,
) -> List[LangchainChain]:
"""
Load a sequence of nodes.
"""
sequential_nodes = [first_instance]
# handle linked nodes
# for now only a single outgoing link is supported
outgoing_link = None
try:
outgoing_link = first_node.outgoing_edges.select_related("target").get(
relation="LINK"
)
except ChainEdge.DoesNotExist:
pass
# traverse the sequence
while outgoing_link:
next_instance = outgoing_link.target.load(context, root=False)
sequential_nodes.append(next_instance)
try:
outgoing_link = outgoing_link.target.outgoing_edges.select_related(
"target"
).get(relation="LINK")
except ChainEdge.DoesNotExist:
outgoing_link = None
return sequential_nodes
| [] |
2024-01-10 | developers221/dddddd | ix~chains~tests~mock_chain.py | import logging
from typing import Dict, List, Any
from langchain.chains.base import Chain
from ix.agents.callback_manager import IxCallbackManager
logger = logging.getLogger(__name__)
def mock_chain_func(inputs: Dict[str, str]) -> Dict[str, str]:
"""Mock chain function that just returns the input"""
chain_input = inputs["mock_chain_input"]
result = {"mock_chain_output": chain_input}
logger.debug(f"MockChain.call input={chain_input}")
logger.debug(f"MockChain.call result={result}")
return result
MOCK_CHAIN_CONFIG = {
"name": "mock_chain",
"description": "mock chain for testing",
"class_path": "ix.chains.tests.mock_chain.MockChain",
"config": {},
}
class MockChain(Chain):
"""
Mock chain for testing. It just returns the input as output
"""
@property
def _chain_type(self) -> str:
return "ix_test"
@property
def input_keys(self) -> List[str]:
return ["mock_chain_input"]
@property
def output_keys(self) -> List[str]:
return ["mock_chain_output"]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
return mock_chain_func(inputs)
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
return mock_chain_func(inputs)
@classmethod
def from_config(
cls, config: Dict[str, Any], callback_manager: IxCallbackManager
) -> "MockChain":
chain = MockChain(**config)
chain.callbacks = callback_manager
return chain
| [] |
2024-01-10 | developers221/dddddd | ix~tools~metaphor.py | import os
from typing import Any, Optional, Literal
from asgiref.sync import sync_to_async
from langchain.tools import Tool
from metaphor_python import Metaphor
from ix.chains.loaders.tools import extract_tool_kwargs
METAPHOR_METHODS = Literal["search", "get_contents", "find_similar"]
def get_metaphor_tool(
method: METAPHOR_METHODS,
description: str,
metaphor_api_key: Optional[str],
**kwargs: Any,
) -> Tool:
"""Common logic for initializing a Metaphor client and returning a Tool object that can be used to run queries."""
tool_kwargs = extract_tool_kwargs(kwargs)
api_key = metaphor_api_key or os.environ.get("METAPHOR_API_KEY", None)
assert (
api_key is not None
), "Metaphor API key must be provided as metaphor_api_key or METAPHOR_API_KEY env var"
client = Metaphor(api_key=api_key)
func = getattr(client, method)
return Tool(
name=f"metaphor_{method}",
description=description,
func=func,
coroutine=sync_to_async(func),
**tool_kwargs,
)
def get_metaphor_search(**kwargs: Any) -> Tool:
"""Initialize a Tool that wraps `Metaphor.client.search`"""
description = (
"Metaphor search engine searches the web for pages that match a given query."
)
return get_metaphor_tool("search", description, **kwargs)
def get_metaphor_contents(**kwargs: Any) -> Tool:
"""Initialize a Tool that wraps `Metaphor.client.get_contents`"""
description = "Get the contents of pages. Pages are identified by ids returned by the search endpoint."
return get_metaphor_tool("get_contents", description, **kwargs)
def get_metaphor_find_similar(**kwargs: Any) -> Tool:
"""Initialize a Tool that wraps `Metaphor.client.find_similar`"""
description = "Find pages that are similar to a given URL."
return get_metaphor_tool("find_similar", description, **kwargs)
| [] |
2024-01-10 | developers221/dddddd | ix~chains~moderator.py | import logging
from typing import Dict, List, Optional, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from ix.chains.callbacks import IxHandler
from langchain.chains.base import Chain
from pydantic import BaseModel
from ix.chat.models import Chat
from ix.task_log.models import Task
from ix.task_log.tasks.agent_runner import start_agent_loop
logger = logging.getLogger(__name__)
MODERATOR_PROMPT = """
You are Ix, an AI assistant. You assist you user with requests by delegating work
to your team of agents.
Respond to all questions for help about Ix and how you work with links to discord and github.
The github includes documentation and source code.
discord: https://discord.gg/jtrMKxzZZQ
github: https://github.com/kreneskyp/ix
AGENTS:
{agents}
CHAT_HISTORY:
{chat_history}
INSTRUCTION:
- Choose the agent from AGENTS who can best process the user request.
- If no AGENT can process then respond as best you can.
- You must choose an agent or respond to the user request.
- Consider the CHAT_HISTORY in your decisions.
"""
CHOOSE_AGENT_PARAMS = {
"type": "object",
"properties": {
"agents": {
"type": "string",
}
},
"required": ["agents"],
}
class ChooseAgent(BaseModel):
agent_id: int
CHOOSE_AGENT_FUNC = {
"class_path": "ix.chains.functions.FunctionSchema",
"config": {
"name": "delegate_to_agent",
"description": "delegate the user request to this agent.",
"parameters": ChooseAgent.schema_json(indent=4),
},
}
LLM_CHOOSE_AGENT_CONFIG = {
"class_path": "ix.chains.llm_chain.LLMChain",
"config": {
"verbose": True,
"output_key": "delegation_or_text",
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
"config": {
"model": "gpt-4-0613",
"request_timeout": 240,
"temperature": 0,
"verbose": True,
"max_tokens": 1000,
"streaming": True,
},
},
"prompt": {
"class_path": "langchain.prompts.chat.ChatPromptTemplate",
"config": {
"messages": [
{
"role": "assistant",
"template": MODERATOR_PROMPT,
"input_variables": ["agents", "chat_history"],
},
{
"role": "user",
"template": "{user_input}",
"input_variables": ["user_input"],
},
]
},
},
"functions": [CHOOSE_AGENT_FUNC],
"output_parser": {
"class_path": "ix.chains.functions.OpenAIFunctionParser",
"config": {
"parse_json": True,
},
},
},
}
class ChatModerator(Chain):
"""
Chain that compares user input to a list of agents and chooses the best agent to handle the task
"""
selection_chain: Chain
@property
def _chain_type(self) -> str:
return "ix_chat_moderator"
@property
def output_keys(self) -> List[str]:
"""Outputs task_id of spawned subtask"""
return ["task_id"]
@property
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
return ["user_input", "chat_id"]
def agent_prompt(self, chat: Chat) -> str:
"""build prompt for configured tools"""
lines = []
agents = chat.agents.all().order_by("alias")
for i, agent in enumerate(agents):
lines.append(f"{i}. {agent.alias}: {agent.purpose}")
return "\n".join(lines)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
# 0. get chat and agents
chat_id = inputs["chat_id"]
chat = Chat.objects.get(id=chat_id)
agents = list(chat.agents.order_by("alias"))
# 1. select agent
agent_prompt = "\n".join(
[f"{i}. {agent.alias}: {agent.purpose}" for i, agent in enumerate(agents)]
)
user_input = inputs["user_input"]
logger.debug(f"Routing user_input={user_input}")
inputs_mutable = inputs.copy()
inputs_mutable["agents"] = agent_prompt
response = self.selection_chain(
inputs=inputs_mutable, callbacks=run_manager.get_child()
)
delegation_or_text = response["delegation_or_text"]
logger.debug(f"Moderator returned response={delegation_or_text}")
# response is either a delegation or a direct text response
if isinstance(delegation_or_text, dict):
agent_index = delegation_or_text["arguments"]["agent_id"]
delegate_to = agents[agent_index].alias
text = f"Delegating to @{delegate_to}"
else:
text = delegation_or_text
delegate_to = None
# 2. send message to chat
ix_handler = IxHandler.from_manager(run_manager)
ix_handler.send_agent_msg(text)
# 3. delegate to the agent
task_id = None
if delegate_to is not None:
agent = chat.agents.get(alias=delegate_to)
subtask = chat.task.delegate_to_agent(agent)
logger.debug(
f"Delegated to agent={agent.alias} task={subtask.id} input={inputs}"
)
start_agent_loop.delay(
task_id=str(subtask.id), chain_id=str(agent.chain_id), inputs=inputs
)
task_id = str(subtask.id)
return {"text": text, "task_id": task_id}
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# 0. get chat and agents
chat_id = inputs["chat_id"]
chat = await Chat.objects.aget(id=chat_id)
agents = [value async for value in chat.agents.order_by("alias").aiterator()]
# 1. select agent
agent_prompt = "\n".join(
[f"{i}. {agent.alias}: {agent.purpose}" for i, agent in enumerate(agents)]
)
user_input = inputs["user_input"]
logger.debug(f"Routing user_input={user_input}")
inputs_mutable = inputs.copy()
inputs_mutable["agents"] = agent_prompt
response = await self.selection_chain.acall(
inputs=inputs_mutable, callbacks=run_manager.get_child()
)
delegation_or_text = response["delegation_or_text"]
logger.debug(f"Moderator returned response={delegation_or_text}")
# response is either a delegation or a direct text response
if isinstance(delegation_or_text, dict):
agent_index = delegation_or_text["arguments"]["agent_id"]
delegate_to = agents[agent_index].alias
text = f"Delegating to @{delegate_to}"
else:
text = delegation_or_text
delegate_to = None
# 2. send message to chat
ix_handler = IxHandler.from_manager(run_manager)
await ix_handler.send_agent_msg(text)
# 3. delegate to the agent
task_id = None
if delegate_to is not None:
task = await Task.objects.aget(id=chat.task_id)
agent = await chat.agents.aget(alias=delegate_to)
subtask = await task.adelegate_to_agent(agent)
logger.debug(
f"Delegated to agent={agent.alias} task={subtask.id} input={inputs}"
)
start_agent_loop.delay(
task_id=str(subtask.id), chain_id=str(agent.chain_id), inputs=inputs
)
task_id = str(subtask.id)
return {"text": text, "task_id": task_id}
| [
"\n",
"\nYou are Ix, an AI assistant. You assist you user with requests by delegating work\nto your team of agents.\n\nRespond to all questions for help about Ix and how you work with links to discord and github.\nThe github includes documentation and source code.\n\ndiscord: https://discord.gg/jtrMKxzZZQ\ngithub: https://github.com/kreneskyp/ix\n\nAGENTS:\n{agents}\n\nCHAT_HISTORY:\n{chat_history}\n\nINSTRUCTION:\n- Choose the agent from AGENTS who can best process the user request.\n- If no AGENT can process then respond as best you can.\n- You must choose an agent or respond to the user request.\n- Consider the CHAT_HISTORY in your decisions.\n"
] |
2024-01-10 | developers221/dddddd | ix~chains~routing.py | import logging
from typing import Dict, Any, List, Optional
from jsonpath_ng import parse as jsonpath_parse
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
from langchain.chains import SequentialChain
from langchain.chains.base import Chain
logger = logging.getLogger(__name__)
class MapSubchain(Chain):
"""
Chain that runs a subchain for each element in a list input
List input is read from inputs using jsonpath set as `map_input` and mapped as
input_variable `map_input_to`. `map_input_to` is automatically added to input_variables
if not already present.
Each iteration will receive the outputs of the previous iteration under the key `outputs`
Results are output as a list under `output_key`
"""
chain: SequentialChain #: :meta private:
chains: List[Chain]
input_variables: List[str]
map_input: str
map_input_to: str
output_key: str
def __init__(self, *args, **kwargs):
input_variables = list(kwargs.get("input_variables", []))
map_input_to = kwargs.get("map_input_to", "map_input")
output_key = kwargs.get("output_key", "outputs")
memory = kwargs.get("memory", None)
chains = kwargs.get("chains", [])
# add input that will be mapped on each iteration
if map_input_to not in input_variables:
input_variables.append(map_input_to)
if output_key not in input_variables:
input_variables.append(output_key)
# create internal chain
chain = SequentialChain(
memory=memory, chains=chains, input_variables=input_variables
)
kwargs["chain"] = chain
super().__init__(*args, **kwargs)
@property
def _chain_type(self) -> str:
return "ix.MapSubchain" # pragma: no cover
@property
def input_keys(self) -> List[str]:
return self.input_variables
@property
def output_keys(self) -> List[str]:
return [self.output_key]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
map_input = self.map_input
map_input_to = self.map_input_to
# map input to values list
logger.debug(
f"MapSubchain mapping values from map_input={map_input} to map_input_to={map_input_to}"
)
jsonpath_expr = jsonpath_parse(map_input)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"MapSubchain could not find input at {map_input} for {map_input_to} searched: {inputs}"
)
values = json_matches[0].value
if not isinstance(values, list):
raise ValueError(
f"MapSubchain input at {map_input} is not a list: {values}"
)
chain_inputs = inputs.copy()
# run chain for each value
outputs = []
for value in values:
logger.debug(f"MapSubchain processing value={value}")
iteration_inputs = chain_inputs.copy()
iteration_inputs[map_input_to] = value
iteration_inputs[self.output_key] = outputs
logger.debug(f"MapSubchain iteration_inputs={iteration_inputs}")
iteration_outputs = self.chain.run(**iteration_inputs)
iteration_mapped_output = iteration_outputs
logger.debug(f"MapSubchain response outputs={iteration_mapped_output}")
outputs.append(iteration_mapped_output)
# return as output_key
return {self.output_key: outputs}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
map_input = self.map_input
map_input_to = self.map_input_to
# map input to values list
logger.debug(
f"MapSubchain mapping values from map_input={map_input} to map_input_to={map_input_to}"
)
jsonpath_expr = jsonpath_parse(map_input)
json_matches = jsonpath_expr.find(inputs)
if len(json_matches) == 0:
raise ValueError(
f"MapSubchain could not find input at {map_input} for {map_input_to} searched: {inputs}"
)
values = json_matches[0].value
if not isinstance(values, list):
raise ValueError(
f"MapSubchain input at {map_input} is not a list: {values}"
)
chain_inputs = inputs.copy()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
# run chain for each value
outputs = []
for value in values:
logger.debug(f"MapSubchain processing value={value}")
iteration_inputs = chain_inputs.copy()
iteration_inputs[map_input_to] = value
iteration_inputs[self.output_key] = outputs
logger.debug(f"MapSubchain iteration_inputs={iteration_inputs}")
iteration_outputs = await self.chain.arun(
callbacks=_run_manager.get_child(), **iteration_inputs
)
iteration_mapped_output = iteration_outputs
logger.debug(f"MapSubchain response outputs={iteration_mapped_output}")
outputs.append(iteration_mapped_output)
# return as output_key
return {self.output_key: outputs}
| [] |
2024-01-10 | developers221/dddddd | ix~chains~tests~test_llm_chain.py | from copy import deepcopy
import pytest
from langchain.prompts import ChatPromptTemplate
from ix.chains.llm_chain import LLMChain, TEMPLATE_CLASSES
from ix.chains.loaders.prompts import create_message
from ix.chains.tests.mock_memory import MockMemory
from ix.chains.tests.test_config_loader import OPENAI_LLM, MOCK_MEMORY
PROMPT_TEMPLATE = {
"class_path": "langchain.prompts.chat.ChatPromptTemplate",
"config": {
"messages": [
{
"role": "system",
"template": "Say hello to {name} and answer user question",
"partial_variables": {
"name": "test user",
},
},
{
"role": "user",
"template": "Question: {user_input}",
"input_variables": ["user_input"],
},
{"role": "assistant", "template": "Answer: ANSWER"},
],
},
}
EXAMPLE_CONFIG = {
"class_path": "ix.chains.llm_chain.LLMChain",
"config": {
"llm": OPENAI_LLM,
"memory": MOCK_MEMORY,
"prompt": PROMPT_TEMPLATE,
},
}
@pytest.mark.django_db
class TestChatPromptTemplate:
def test_create_message(self):
message = {
"role": "user",
"template": "hello {name} i will answer {user_input}",
"input_variables": ["user_input"],
"partial_variables": {"name": "test user"},
}
result = create_message(message)
assert isinstance(result, TEMPLATE_CLASSES["user"])
assert result.prompt.partial_variables == {"name": "test user"}
def test_from_config(self, load_chain):
config = deepcopy(PROMPT_TEMPLATE)
chain = load_chain(config)
assert isinstance(chain, ChatPromptTemplate)
assert len(chain.messages) == 3
assert isinstance(chain.messages[0], TEMPLATE_CLASSES["system"])
assert isinstance(chain.messages[1], TEMPLATE_CLASSES["user"])
assert isinstance(chain.messages[2], TEMPLATE_CLASSES["assistant"])
@pytest.mark.django_db
class TestLLMChain:
def test_from_config(self, load_chain, mock_openai_key):
config = deepcopy(EXAMPLE_CONFIG)
chain = load_chain(config)
assert isinstance(chain, LLMChain)
assert (
chain.prompt.messages[0].prompt.partial_variables
== EXAMPLE_CONFIG["config"]["prompt"]["config"]["messages"][0][
"partial_variables"
]
)
assert chain.prompt.messages[1].prompt.partial_variables == {}
assert isinstance(chain.memory, MockMemory)
| [
"{'class_path': 'langchain.prompts.chat.ChatPromptTemplate', 'config': {'messages': [{'role': 'system', 'template': 'Say hello to {name} and answer user question', 'partial_variables': {'name': 'test user'}}, {'role': 'user', 'template': 'Question: {user_input}', 'input_variables': ['user_input']}, {'role': 'assistant', 'template': 'Answer: ANSWER'}]}}"
] |
2024-01-10 | developers221/dddddd | ix~chains~tests~mock_memory.py | from typing import Dict, Any, List
from langchain.schema import BaseMemory
class MockMemory(BaseMemory):
"""
Mock memory that returns a fixed set of values
Used for testing only.
"""
value_map: Dict[str, str] = {"chat_history": "mock memory"}
session_id: str = "mock_session_id"
supports_session: bool = True
@property
def memory_variables(self) -> List[str]:
return list(self.value_map.keys())
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
return self.value_map
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
pass
def clear(self) -> None:
pass
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.