date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Alexanthe/language_project | beeai.py | import requests
import api_key
from datetime import date, timedelta
import openai
def generate_prompt(word, number, style):
return """Write a {num} word {sty} story for and with the word '{wor}'""".format( wor=word.capitalize(), num=number,sty=style)
def generateHive(prompt):
openai.api_key = api_key.openai_api_key
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=200,
temperature=0.6,
)
# print(response.choices[0].text)
return response.choices[0].text
def getwordDict():
today = date.today()
todayWordNik = today - timedelta(days=1)
query = {"date": todayWordNik, "api_key": api_key.wordnik_api_key}
response = requests.get(
"https://api.wordnik.com/v4/words.json/wordOfTheDay", params=query
)
wordDict = response.json()
return wordDict
wordDict = getwordDict()
def getWord (wordDict):
word = wordDict["word"]
word = word.capitalize()
return word
def getDefinitions (wordDict):
definitions = []
for each in wordDict["definitions"]:
definitions.append(each)
# For cases that have multiple definitions
source = []
text = []
for each in definitions:
source.append(each["source"])
text.append(each["text"])
return source,text
def getExamples (wordDict):
examples = []
for each in wordDict["examples"]:
examples.append(each)
return examples
def getpublishedDate (wordDict):
publishedDate = wordDict["pdd"]
return publishedDate
def getNote (wordDict):
note = wordDict["note"]
return note | [] |
2024-01-10 | Alexanthe/language_project | testing.py | import requests
from datetime import date, timedelta
import api_key
import os
import openai
import beeai
# from wordDictExample import *
openai.api_key = api_key.openai_api_key
word = "Obreptitious"
beeai.generateHive(word)
def generate_prompt(word, number, style):
return """Write a {sty} word {num} story for and with the word '{wor}'""".format( wor=word.capitalize(), num=number,sty=style)
number=10
style="alex"
print(generate_prompt(word, number, style))
# response = openai.Completion.create(
# model="text-davinci-003",
# prompt=generate_prompt(word),
# max_tokens=200,
# temperature=0.6,
# )
# print(response)
# print(response.choices[0].text)
# today = date.today()
# todayWordNik = today - timedelta(days=1)
# query = {"date": todayWordNik, "api_key": api_key.api_key}
# response = requests.get(
# "https://api.wordnik.com/v4/words.json/wordOfTheDay", params=query
# )
# wordDict = response.json()
# ## Get Word ####
# word = wordDict["word"]
# print(word)
# ## Get Definitions ####
# definitions = []
# for each in wordDict["definitions"]:
# definitions.append(each)
# # Function to get the different sources and texts definitions (Might have multiple definitions)
# def getSpecificDef(definitions):
# source = []
# text = []
# for each in definitions:
# source.append(each["source"])
# text.append(each["text"])
# return source,text
# source,text = getSpecificDef(definitions)
# ## Get Examples ####
# examples = []
# for each in wordDict["examples"]:
# examples.append(each)
# print(len(examples)) # Cam create a py function to just get specific dictionary key values such as "text"
# ## Get Published Dates ####
# publishedDate = wordDict["pdd"]
# ## Get Note ####
# note = wordDict["note"]
###########################################################################
## Using a different query api type ####
# query = {
# "hasDictionaryDef": True,
# "api_key": api_key.api_key,
# "includePartOfSpeech": True,
# "excludePartOfSpeech": True,
# "maxCorpusCount": -1,
# "minDictionaryCount": 1,
# "maxDictionaryCount": -1,
# "minLength": 5,
# "maxLength": -1
# }
# response = requests.get(
# "https://api.wordnik.com/v4/words.json/randomWord", params=query
# )
# wordsDict = response.json()
# # wordsDict = {"id": 0, "word": "corner"} # Example
# word = wordsDict["word"]
# print(word)
# query = {"useCanonical": True, "api_key": api_key.api_key}
# response = requests.get(
# "https://api.wordnik.com/v4/word.json/" + word + "/topExample", params=query
# )
# wordExample = response.json()
# print(wordExample)
# query = {'lat':'45', 'lon':'180'}
# response = requests.get('http://api.open-notify.org/iss-pass.json', params=query)
# print(response.json())
| [] |
2024-01-10 | ikraaaaam/hasnik_bot_News-Research-Tool_using_LLM_langchain_OpenAI | rough.py | import os
import streamlit as st
import pickle
import time
import requests
from bs4 import BeautifulSoup
from langchain import OpenAI
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
#from langchain_community.document_loaders import BSHTMLLoader
# Load environment variables from .env file
load_dotenv()
# Set up Streamlit UI
st.title("Hasnik Bot: News Research Tool 📈")
st.sidebar.title("News Article URLs")
# Collect user input for URLs
urls = [st.sidebar.text_input(f"URL {i+1}") for i in range(3)]
# Check if the "Process URLs" button is clicked
process_url_clicked = st.sidebar.button("Process URLs")
# Define the file path for storing the FAISS index
file_path = "faiss_store.pkl"
# Create a placeholder for displaying messages
main_placeholder = st.empty()
# Initialize the OpenAI language model
llm = OpenAI(temperature=0.9, max_tokens=500)
# Process URLs if the button is clicked
if process_url_clicked:
data = ""
for url in urls:
try:
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
paragraphs = soup.find_all("p")
for paragraph in paragraphs:
# Add the source metadata variable for each paragraph
data += paragraph.get_text().strip()
except Exception as e:
st.error(f"Error fetching data from {url}: {e}")
#print(data)
#Split the data into documents using the RecursiveCharacterTextSplitter
text_splitter = CharacterTextSplitter( chunk_size=100,chunk_overlap=20)
docs= text_splitter.split_text(data)
#, '\n', '.', ','
#print(type(docs[0]))
#main_placeholder.text("Text Splitter...Started...✅✅✅")
#docs = text_splitter.create_documents(data)
#print(docs[0])
print('docs done')
# Create embeddings and save them to the FAISS index
embeddings = OpenAIEmbeddings()
st.session_state.vectorestore_openai =FAISS.from_texts(texts= docs, embedding= embeddings)
main_placeholder.text("Embedding Vector Building...✅✅✅")
time.sleep(2)
# Save the FAISS index to a pickle file
# with open(file_path, "wb") as f:
# pickle.dump(vectorstore_openai, f)
#print('done')
# Collect user input for the question
query = st.text_input("Question: ")
print(query)
# Process the question and retrieve an answer if the question is provided
if query:
try:
# Load the FAISS index from the pickle file
# with open(file_path, "rb") as f:
# vectorstore = pickle.load(f)
print('query done')
retriever = st.session_state.vectorestore_openai.as_retriever(search_type="similarity", search_kwargs={"k":2})
print('retriever done')
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True)
print('qa done')
result = qa({"query": query})
print('response done')
answer = result['result']
print('answer done')
st.write(answer)
# Use LangChain to find an answer to the question
#chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever())
#result = qa ({"question": query}, return_only_outputs=True)
#result= qa.run(query=query)
#answer = result["result"]
#source_documents = result["source_documents"]
# Display the answer
#st.header("Answer")
#st.write(result["answer"])
except Exception as e:
st.error(f"An error occurred while answering the question: {e}")
| [] |
2024-01-10 | LMisaki/Openaibot | App~Event.py | # -*- coding: utf-8 -*-
# @Time : 9/22/22 11:04 PM
# @FileName: Event.py
# @Software: PyCharm
# @Github :sudoskys
import json
import pathlib
import time
from typing import Union
# from App.chatGPT import PrivateChat
from utils.Base import ReadConfig
from utils.Data import DataWorker, DictUpdate, DefaultData
from utils.Detect import DFA, Censor
from utils.Base import Logger
logger = Logger()
# 工具数据类型
DataUtils = DataWorker(prefix="Open_Ai_bot_")
urlForm = {
"Danger.form": [
"https://raw.githubusercontent.com/fwwdn/sensitive-stop-words/master/%E6%94%BF%E6%B2%BB%E7%B1%BB.txt",
"https://raw.githubusercontent.com/TelechaBot/AntiSpam/main/Danger.txt"
]
}
def InitCensor():
config = ReadConfig().parseFile(str(pathlib.Path.cwd()) + "/Config/app.toml")
if config.proxy.status:
proxies = {
'all://': config.proxy.url,
} # 'http://127.0.0.1:7890' # url
return Censor.InitWords(url=urlForm, home_dir="./Data/", proxy=proxies)
else:
return Censor.InitWords(url=urlForm, home_dir="./Data/")
if not pathlib.Path("./Data/Danger.form").exists():
InitCensor()
# 过滤器
ContentDfa = DFA(path="./Data/Danger.form")
global _csonfig
global Group_Msg
def init_Msg():
global Group_Msg
Group_Msg = {}
return Group_Msg
init_Msg()
# IO
def load_csonfig():
global _csonfig
with open("./Config/config.json", encoding="utf-8") as f:
_csonfig = json.load(f)
now_table = DefaultData.defaultConfig()
DictUpdate.dict_update(now_table, _csonfig)
return now_table
def save_csonfig():
with open("./Config/config.json", "w", encoding="utf8") as f:
json.dump(_csonfig, f, indent=4, ensure_ascii=False)
class rqParser(object):
@staticmethod
def get_response_text(response):
REPLY = []
Choice = response.get("choices")
if Choice:
for item in Choice:
_text = item.get("text")
REPLY.append(_text)
if not REPLY:
REPLY = ["Nothing to say:response null~"]
return REPLY
@staticmethod
def get_response_usage(response):
usage = len("机器资源")
if response.get("usage"):
usage = response.get("usage").get("total_tokens")
return usage
class Usage(object):
@staticmethod
def isOutUsage(user):
"""
累计
:param user:
:return: bool
"""
# 时间
key_time = int(time.strftime("%Y%m%d%H", time.localtime()))
GET = DataUtils.getKey(f"usage_{user}_{key_time}")
if GET:
if GET >= 60000:
return {"status": True, "use": GET, "uid": key_time}
else:
return {"status": False, "use": GET, "uid": key_time}
else:
GET = 0
DataUtils.setKey(f"usage_{user}_{key_time}", GET)
return {"status": False, "use": GET, "uid": key_time}
@staticmethod
def renewUsage(user, usage, father):
GET = father["use"]
key_time = father['uid']
GET = GET + usage
DataUtils.setKey(f"usage_{user}_{key_time}", GET)
# double req in 3 seconds
return True
@staticmethod
def upMsg(user, usage, father):
GET = father["use"]
key_time = father['uid']
GET = GET + usage
DataUtils.setKey(f"usage_{user}_{key_time}", GET)
# double req in 3 seconds
return True
class Utils(object):
@staticmethod
def forget_me(user_id, group_id):
from openai_async.utils.data import MsgFlow
_cid = DefaultData.composing_uid(user_id=user_id, chat_id=group_id)
return MsgFlow(uid=_cid).forget()
@staticmethod
def extract_arg(arg):
return arg.split()[1:]
@staticmethod
def Humanization(strs):
return strs.lstrip('??!!:。')
@staticmethod
def WaitFlood(user, group, usercold_time: int = None, groupcold_time: int = None):
if usercold_time is None:
usercold_time = _csonfig["usercold_time"]
if groupcold_time is None:
groupcold_time = _csonfig["groupcold_time"]
if DataUtils.getKey(f"flood_user_{user}"):
# double req in 3 seconds
return True
else:
if _csonfig["usercold_time"] > 1:
DataUtils.setKey(f"flood_user_{user}", "FAST", exN=usercold_time)
# User
if DataUtils.getKey(f"flood_group_{group}"):
# double req in 3 seconds
return True
else:
if _csonfig["groupcold_time"] > 1:
DataUtils.setKey(f"flood_group_{group}", "FAST", exN=groupcold_time)
return False
@staticmethod
def checkMsg(msg_uid):
global Group_Msg
if Group_Msg is None:
Group_Msg = {}
if len(Group_Msg) > 5000:
Group_Msg = Group_Msg[5000:] # 如果字典中的键数量超过了最大值,则删除一些旧的键
# print(Group_Msg)
return Group_Msg.get(str(msg_uid))
@staticmethod
def trackMsg(msg_uid, user_id):
global Group_Msg
if Group_Msg is None:
Group_Msg = {}
if len(Group_Msg) > 5000:
Group_Msg = Group_Msg[5000:] # 如果字典中的键数量超过了最大值,则删除一些旧的键
Group_Msg[str(msg_uid)] = user_id
# print(Group_Msg)
class GroupChat(object):
@staticmethod
async def load_group_response(user, group, key: Union[str, list], prompt: str = "Say this is a test",
userlimit: int = None):
if not key:
logger.error("SETTING:API key missing")
raise Exception("API key missing")
# 长度限定
if _csonfig["input_limit"] < len(str(prompt)) / 4:
return "TOO LONG"
# 内容审计
if ContentDfa.exists(str(prompt)):
return "I am a robot and not fit to answer dangerous content."
# 洪水防御攻击
if Utils.WaitFlood(user=user, group=group, usercold_time=userlimit):
return "TOO FAST"
# 用量检测
_Usage = Usage.isOutUsage(user)
if _Usage["status"]:
return "OUT OF USAGE"
# 请求
try:
# Openai_python
# import openai
# openai.api_key = key
# response = openai.Completion.create(model="text-davinci-003", prompt=str(prompt), temperature=0,
# max_tokens=int(_csonfig["token_limit"]))
# OPENAI
# await openai_async.Completion(api_key=key).create(model="text-davinci-003", prompt=str(prompt),
# temperature=0.2,
# frequency_penalty=1,
# max_tokens=int(_csonfig["token_limit"]))
# CHAT
import openai_async.Chat
_cid = DefaultData.composing_uid(user_id=user, chat_id=group)
receiver = openai_async.Chat.Chatbot(api_key=key,
conversation_id=_cid)
response = await receiver.get_chat_response(model="text-davinci-003", prompt=str(prompt),
max_tokens=int(_csonfig["token_limit"]))
# print(response)
_deal_rq = rqParser.get_response_text(response)
# print(_deal_rq)
_deal = _deal_rq[0]
_usage = rqParser.get_response_usage(response)
logger.info(f"RUN:{user}:{group} --prompt: {prompt} --req: {_deal} ")
except Exception as e:
logger.error(f"RUN:Api Error:{e}")
_usage = 0
_deal = f"Api Outline \n {prompt}"
# 限额
Usage.renewUsage(user=user, father=_Usage, usage=_usage)
_deal = ContentDfa.filter_all(_deal)
# 人性化处理
_deal = Utils.Humanization(_deal)
return _deal
async def WhiteUserCheck(bot, message, WHITE):
if _csonfig.get("whiteUserSwitch"):
if not str(abs(message.from_user.id)) in _csonfig.get("whiteUser"):
try:
await bot.send_message(message.chat.id,
f"Check the settings to find that you is not whitelisted!...{WHITE}")
except Exception as e:
logger.error(e)
finally:
return True
else:
if _csonfig.get("whiteUserSwitch") is None:
return True
return False
async def WhiteGroupCheck(bot, message, WHITE):
if _csonfig.get("whiteGroupSwitch"):
if not str(abs(message.chat.id)) in _csonfig.get("whiteGroup"):
try:
await bot.send_message(message.chat.id,
f"The group is not whitelisted!...\n\n{WHITE}")
except Exception as e:
logger.error(e)
finally:
logger.info(f"RUN:non-whitelisted groups:{abs(message.chat.id)}")
return True # await bot.leave_chat(message.chat.id)
else:
if _csonfig.get("whiteUserSwitch") is None:
return True
return False
async def Forget(bot, message, config):
from openai_async.utils.data import MsgFlow
_cid = DefaultData.composing_uid(user_id=message.from_user.id, chat_id=message.chat.id)
return MsgFlow(uid=_cid).forget()
async def Chat_P(bot, message, config):
load_csonfig()
# 处理初始化
_prompt = message.text
if message.text.startswith("/chat"):
await Forget(bot, message, config)
_prompt_r = message.text.split(" ", 1)
if len(_prompt_r) < 2:
return
_prompt = _prompt_r[1]
# 处理开关
if not _csonfig.get("statu"):
await bot.reply_to(message, "BOT:Under Maintenance")
return
# 白名单检查
if await WhiteUserCheck(bot, message, config.WHITE):
return
try:
if len(_prompt) > 1:
_req = await GroupChat.load_group_response(user=message.from_user.id, group=message.chat.id,
key=config.OPENAI_API_KEY,
prompt=_prompt[1])
await bot.reply_to(message, f"{_req}\n{config.INTRO}")
except Exception as e:
logger.error(e)
await bot.reply_to(message, f"Error Occur~Maybe Api request rate limit~nya")
async def Text(bot, message, config, reset: bool = False):
load_csonfig()
# 拿到 prompt
_prompt = message.text
if message.text.startswith("/chat"):
await Forget(bot, message, config)
_prompt_r = message.text.split(" ", 1)
if len(_prompt_r) < 2:
return
_prompt = _prompt_r[1]
# 处理是否忘记
if reset:
await Forget(bot, message, config)
else:
# 加判定上文是否为此人的消息
if not str(Utils.checkMsg(f"{message.chat.id}{message.reply_to_message.id}")) == f"{message.from_user.id}":
return
if not _csonfig.get("statu"):
await bot.reply_to(message, "BOT:Under Maintenance")
return
# 群组白名单检查
await WhiteGroupCheck(bot, message, config.WHITE)
try:
_req = await GroupChat.load_group_response(user=message.from_user.id, group=message.chat.id,
key=config.OPENAI_API_KEY,
prompt=_prompt)
msg = await bot.reply_to(message, f"{_req}\n{config.INTRO}")
Utils.trackMsg(f"{message.chat.id}{msg.id}", user_id=message.from_user.id)
except Exception as e:
logger.error(e)
await bot.reply_to(message, f"Error Occur~Maybe Api request rate limit~nya")
async def Master(bot, message, config):
load_csonfig()
if message.from_user.id in config.master:
try:
command = message.text
if command.startswith("/usercold"):
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["usercold_time"] = int(_len_)
await bot.reply_to(message, f"user cooltime:{_len_}")
save_csonfig()
logger.info(f"SETTING:reset user cold time limit to{_len_}")
if command.startswith("/groupcold"):
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["groupcold_time"] = int(_len_)
await bot.reply_to(message, f"group cooltime:{_len_}")
save_csonfig()
logger.info(f"SETTING:reset group cold time limit to{_len_}")
if command.startswith("/tokenlimit"):
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["token_limit"] = int(_len_)
await bot.reply_to(message, f"tokenlimit:{_len_}")
save_csonfig()
logger.info(f"SETTING:reset tokenlimit limit to{_len_}")
if command.startswith("/inputlimit"):
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["input_limit"] = int(_len_)
await bot.reply_to(message, f"inputlimit:{_len_}")
save_csonfig()
logger.info(f"SETTING:reset input limit to{_len_}")
if command == "/onw":
_csonfig["whiteGroupSwitch"] = True
await bot.reply_to(message, "ON:whiteGroup")
save_csonfig()
logger.info("SETTING:whiteGroup ON")
if command == "/offw":
_csonfig["whiteGroupSwitch"] = False
await bot.reply_to(message, "SETTING:whiteGroup OFF")
save_csonfig()
logger.info("SETTING:whiteGroup OFF")
if command == "/open":
_csonfig["statu"] = True
await bot.reply_to(message, "SETTING:BOT ON")
save_csonfig()
logger.info("SETTING:BOT ON")
if command == "/close":
_csonfig["statu"] = False
await bot.reply_to(message, "SETTING:BOT OFF")
save_csonfig()
logger.info("SETTING:BOT OFF")
if command == "/config":
path = str(pathlib.Path().cwd()) + "/" + "Config/config.json"
if pathlib.Path(path).exists():
doc = open(path, 'rb')
await bot.send_document(message.chat.id, doc)
else:
await bot.reply_to(message, "没有找到配置文件")
if "/addw" in command:
for group in Utils.extract_arg(command):
groupId = "".join(list(filter(str.isdigit, group)))
_csonfig["whiteGroup"].append(str(groupId))
await bot.reply_to(message, 'White Group Added' + str(groupId))
logger.info(f"SETTING:White Group Added {group}")
save_csonfig()
if "/delw" in command:
for group in Utils.extract_arg(command):
groupId = "".join(list(filter(str.isdigit, group)))
if int(groupId) in _csonfig["whiteGroup"]:
_csonfig["whiteGroup"].remove(str(groupId))
await bot.reply_to(message, 'White Group Removed ' + str(groupId))
logger.info(f"SETTING:White Group Removed {group}")
if isinstance(_csonfig["whiteGroup"], list):
_csonfig["whiteGroup"] = list(set(_csonfig["whiteGroup"]))
save_csonfig()
if command == "/userwon":
_csonfig["whiteUserSwitch"] = True
await bot.reply_to(message, "SETTING:whiteUserSwitch ON")
save_csonfig()
logger.info("SETTING:whiteUser ON")
if command == "/userwoff":
_csonfig["whiteUserSwitch"] = False
await bot.reply_to(message, "SETTING:whiteUserSwitch OFF")
save_csonfig()
logger.info("SETTING:whiteUser OFF")
if "/adduser" in command:
for group in Utils.extract_arg(command):
groupId = "".join(list(filter(str.isdigit, group)))
_csonfig["whiteUser"].append(str(groupId))
await bot.reply_to(message, 'White User Added' + str(groupId))
logger.info(f"SETTING:White User Added {group}")
save_csonfig()
if "/deluser" in command:
for group in Utils.extract_arg(command):
groupId = "".join(list(filter(str.isdigit, group)))
if int(groupId) in _csonfig["whiteUser"]:
_csonfig["whiteUser"].remove(str(groupId))
await bot.reply_to(message, 'White User Removed ' + str(groupId))
logger.info(f"SETTING:White User Removed {group}")
if isinstance(_csonfig["whiteUser"], list):
_csonfig["whiteUser"] = list(set(_csonfig["whiteUser"]))
save_csonfig()
if command == "/updetect":
keys, _error = InitCensor()
if _error:
error = '\n'.join(_error)
errors = f"Error:\n{error}"
else:
# 重载 AntiSpam 主题库
ContentDfa.change_words(path="./Data/Danger.form")
errors = "Success"
if message:
await bot.reply_to(message, f"{'|'.join(keys)}\n\n{errors}")
if not command.startswith("/"):
await Chat_P(bot, message, config)
except Exception as e:
logger.error(e)
async def Start(bot, message, config):
await bot.reply_to(message, f"Ping,Use /chat start a new chat loop")
async def About(bot, message, config):
await bot.reply_to(message, f"{config.ABOUT}")
| [
" "
] |
2024-01-10 | dnautiyal/AIMakerSpace | Week-3~Tuesday~fastapi~main-chainlit.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
import os
import arxiv
import chainlit as cl
from chainlit import user_session
from MyCustomLLM import FastApiLLM
@cl.langchain_factory(use_async=True)
async def init():
arxiv_query = None
# Wait for the user to ask an Arxiv question
while arxiv_query == None:
arxiv_query = await cl.AskUserMessage(
content="Please enter a topic to begin!", timeout=15
).send()
# Obtain the top 3 results from Arxiv for the query
search = arxiv.Search(
query=arxiv_query["content"],
max_results=3,
sort_by=arxiv.SortCriterion.Relevance,
)
await cl.Message(content="Downloading and chunking articles...").send()
# download each of the pdfs
pdf_data = []
for result in search.results():
loader = PyMuPDFLoader(result.pdf_url)
loaded_pdf = loader.load()
for document in loaded_pdf:
document.metadata["source"] = result.entry_id
document.metadata["file_path"] = result.pdf_url
document.metadata["title"] = result.title
pdf_data.append(document)
# Create a Chroma vector store
embeddings = OpenAIEmbeddings(
disallowed_special=(),
)
# If operation takes too long, make_async allows to run in a thread
# docsearch = await cl.make_async(Chroma.from_documents)(pdf_data, embeddings)
docsearch = Chroma.from_documents(pdf_data, embeddings)
# Create a chain that uses the Chroma vector store
chain = RetrievalQAWithSourcesChain.from_chain_type(
FastApiLLM,
chain_type="stuff",
retriever=docsearch.as_retriever(),
return_source_documents=True,
)
# Let the user know that the system is ready
await cl.Message(
content=f"We found a few papers about `{arxiv_query['content']}` you can now ask questions!"
).send()
return chain
@cl.langchain_postprocess
async def process_response(res):
answer = res["answer"]
source_elements_dict = {}
source_elements = []
for idx, source in enumerate(res["source_documents"]):
title = source.metadata["title"]
if title not in source_elements_dict:
source_elements_dict[title] = {
"page_number": [source.metadata["page"]],
"url": source.metadata["file_path"],
}
else:
source_elements_dict[title]["page_number"].append(source.metadata["page"])
# sort the page numbers
source_elements_dict[title]["page_number"].sort()
for title, source in source_elements_dict.items():
# create a string for the page numbers
page_numbers = ", ".join([str(x) for x in source["page_number"]])
text_for_source = f"Page Number(s): {page_numbers}\nURL: {source['url']}"
source_elements.append(
cl.Text(name=title, content=text_for_source, display="inline")
)
await cl.Message(content=answer, elements=source_elements).send() | [] |
2024-01-10 | dnautiyal/AIMakerSpace | Week-3~Tuesday~fastapi~MyCustomLLM.py | from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from run import generate_text, get_task_status
import time
class FastApiLLM(LLM):
@property
def _llm_type(self) -> str:
return "FastApiLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
if len(prompt) <= 250:
return prompt
try:
task_id = generate_text(prompt)
while True:
status = get_task_status(task_id)
if "Task Pending" not in status:
print(f"Task-id = {task_id} is ready. Time taken = {status['time']}, Memory used = {status['memory']}")
return status["result"]
time.sleep(2)
except:
return "FastApiLLM: Exception encountered"
| [] |
2024-01-10 | rajaswa/guardrails | guardrails~validators.py | """This module contains the validators for the Guardrails framework.
The name with which a validator is registered is the name that is used
in the `RAIL` spec to specify formatters.
"""
import ast
import logging
import os
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Type, Union
import openai
from pydantic import BaseModel, ValidationError
from guardrails.datatypes import registry as types_registry
from guardrails.utils.reask_utils import ReAsk
try:
import numpy as np
except ImportError:
_HAS_NUMPY = False
else:
_HAS_NUMPY = True
validators_registry = {}
types_to_validators = defaultdict(list)
logger = logging.getLogger(__name__)
class Filter:
pass
class Refrain:
pass
def check_refrain_in_list(schema: List) -> bool:
"""Check if a Refrain object exists in a list.
Args:
schema: A list that can contain lists, dicts or scalars.
Returns:
bool: True if a Refrain object exists in the list.
"""
for item in schema:
if isinstance(item, Refrain):
return True
elif isinstance(item, list):
if check_refrain_in_list(item):
return True
elif isinstance(item, dict):
if check_refrain_in_dict(item):
return True
return False
def check_refrain_in_dict(schema: Dict) -> bool:
"""Check if a Refrain object exists in a dict.
Args:
schema: A dict that can contain lists, dicts or scalars.
Returns:
True if a Refrain object exists in the dict.
"""
for key, value in schema.items():
if isinstance(value, Refrain):
return True
elif isinstance(value, list):
if check_refrain_in_list(value):
return True
elif isinstance(value, dict):
if check_refrain_in_dict(value):
return True
return False
def filter_in_list(schema: List) -> List:
"""Remove out all Filter objects from a list.
Args:
schema: A list that can contain lists, dicts or scalars.
Returns:
A list with all Filter objects removed.
"""
filtered_list = []
for item in schema:
if isinstance(item, Filter):
pass
elif isinstance(item, PydanticReAsk):
filtered_list.append(item)
elif isinstance(item, list):
filtered_item = filter_in_list(item)
if len(filtered_item):
filtered_list.append(filtered_item)
elif isinstance(item, dict):
filtered_dict = filter_in_dict(item)
if len(filtered_dict):
filtered_list.append(filtered_dict)
else:
filtered_list.append(item)
return filtered_list
def filter_in_dict(schema: Dict) -> Dict:
"""Remove out all Filter objects from a dictionary.
Args:
schema: A dictionary that can contain lists, dicts or scalars.
Returns:
A dictionary with all Filter objects removed.
"""
filtered_dict = {}
for key, value in schema.items():
if isinstance(value, Filter):
pass
elif isinstance(value, PydanticReAsk):
filtered_dict[key] = value
elif isinstance(value, list):
filtered_item = filter_in_list(value)
if len(filtered_item):
filtered_dict[key] = filtered_item
elif isinstance(value, dict):
filtered_dict[key] = filter_in_dict(value)
else:
filtered_dict[key] = value
return filtered_dict
def register_validator(name: str, data_type: Union[str, List[str]]):
"""Register a validator for a data type."""
def decorator(cls: type):
"""Register a validator for a data type."""
nonlocal data_type
if isinstance(data_type, str):
data_type = (
list(types_registry.keys()) if data_type == "all" else [data_type]
)
# Make sure that the data type string exists in the data types registry.
for dt in data_type:
if dt not in types_registry:
raise ValueError(f"Data type {dt} is not registered.")
types_to_validators[dt].append(name)
validators_registry[name] = cls
cls.rail_alias = name
return cls
return decorator
@dataclass
class EventDetail(BaseException):
"""Event detail."""
key: str
value: Any
schema: Dict[str, Any]
error_message: str
fix_value: Any
class Validator:
"""Base class for validators."""
def __init__(self, on_fail: Optional[Callable] = None, **kwargs):
if isinstance(on_fail, str):
self.on_fail = getattr(self, on_fail, self.noop)
else:
self.on_fail = on_fail or self.noop
# Store the kwargs for the validator.
self._kwargs = kwargs
assert (
self.rail_alias in validators_registry
), f"Validator {self.__class__.__name__} is not registered. "
def validate_with_correction(self, key, value, schema) -> Dict:
try:
return self.validate(key, value, schema)
except EventDetail as e:
logger.debug(
f"Validator {self.__class__.__name__} failed for {key} with error {e}."
)
return self.on_fail(e)
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
"""Validate a value."""
raise NotImplementedError
def fix(self, error: EventDetail) -> Dict:
"""Debug the incorrect value."""
error.schema[error.key] = error.fix_value
return error.schema
def reask(self, error: EventDetail) -> Dict:
"""Reask disambiguates the validation failure into a helpful error
message."""
error.schema[error.key] = ReAsk(
incorrect_value=error.value,
error_message=error.error_message,
fix_value=error.fix_value,
)
return error.schema
def filter(self, error: EventDetail) -> Dict:
"""If validation fails, filter the offending key from the schema."""
logger.debug(f"Filtering {error.key} from schema...")
error.schema[error.key] = Filter()
return error.schema
def refrain(self, error: EventDetail) -> Optional[Dict]:
"""If validation fails, refrain from answering."""
logger.debug(f"Refusing to answer {error.key}...")
error.schema[error.key] = Refrain()
return error.schema
def noop(self, error: EventDetail) -> Dict:
"""If validation fails, do nothing."""
logger.debug(
f"Validator {self.__class__.__name__} failed for {error.key}, "
"but doing nothing..."
)
return error.schema
def to_prompt(self, with_keywords: bool = True) -> str:
"""Convert the validator to a prompt."""
params = " ".join(list(self._kwargs.values()))
if with_keywords:
params = " ".join([f"{k}={v}" for k, v in self._kwargs.items()])
return f"{self.rail_alias}: {params}"
# @register_validator('required', 'all')
# class Required(Validator):
# """Validate that a value is not None."""
# def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> bool:
# """Validate that a value is not None."""
# return value is not None
# @register_validator('description', 'all')
# class Description(Validator):
# """Validate that a value is not None."""
# def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> bool:
# """Validate that a value is not None."""
# return value is not None
class PydanticReAsk(dict):
pass
@register_validator(name="pydantic", data_type="pydantic")
class Pydantic(Validator):
"""Validate an object using Pydantic."""
def __init__(
self,
model: Type[BaseModel],
on_fail: Optional[Callable] = None,
):
super().__init__(on_fail=on_fail)
self.model = model
def validate_with_correction(
self, key: str, value: Dict, schema: Union[Dict, List]
) -> Dict:
"""Validate an object using Pydantic.
For example, consider the following data for a `Person` model
with fields `name`, `age`, and `zipcode`:
{
"user" : {
"name": "John",
"age": 30,
"zipcode": "12345",
}
}
then `key` is "user", `value` is the value of the "user" key, and
`schema` is the entire schema.
If this validator succeeds, then the `schema` is returned and
looks like:
{
"user": Person(name="John", age=30, zipcode="12345")
}
If it fails, then the `schema` is returned and looks like e.g.
{
"user": {
"name": "John",
"age": 30,
"zipcode": ReAsk(
incorrect_value="12345",
error_message="...",
fix_value=None,
path=None,
)
}
}
"""
try:
# Run the Pydantic model on the value.
schema[key] = self.model(**value)
except ValidationError as e:
# Create a copy of the value so that we can modify it
# to insert e.g. ReAsk objects.
new_value = deepcopy(value)
for error in e.errors():
assert (
len(error["loc"]) == 1
), "Pydantic validation errors should only have one location."
field_name = error["loc"][0]
event_detail = EventDetail(
key=field_name,
value=new_value[field_name],
schema=new_value,
error_message=error["msg"],
fix_value=None,
)
# Call the on_fail method and reassign the value.
new_value = self.on_fail(event_detail)
# Insert the new `value` dictionary into the schema.
# This now contains e.g. ReAsk objects.
schema[key] = PydanticReAsk(new_value)
return schema
@register_validator(name="valid-range", data_type=["integer", "float", "percentage"])
class ValidRange(Validator):
"""Validate that a value is within a range.
- Name for `format` attribute: `valid-range`
- Supported data types: `integer`, `float`, `percentage`
- Programmatic fix: Closest value within the range.
"""
def __init__(
self, min: int = None, max: int = None, on_fail: Optional[Callable] = None
):
super().__init__(on_fail=on_fail)
self._min = min
self._max = max
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
"""Validate that a value is within a range."""
logger.debug(f"Validating {value} is in range {self._min} - {self._max}...")
val_type = type(value)
if self._min is not None and value < val_type(self._min):
raise EventDetail(
key,
value,
schema,
f"Value {value} is less than {self._min}.",
self._min,
)
if self._max is not None and value > val_type(self._max):
raise EventDetail(
key,
value,
schema,
f"Value {value} is greater than {self._max}.",
self._max,
)
return schema
@register_validator(name="valid-choices", data_type="all")
class ValidChoices(Validator):
"""Validate that a value is within the acceptable choices.
- Name for `format` attribute: `valid-choices`
- Supported data types: `all`
- Programmatic fix: None.
"""
def __init__(self, choices: List[Any], on_fail: Optional[Callable] = None):
super().__init__(on_fail=on_fail)
self._choices = choices
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
"""Validate that a value is within a range."""
logger.debug(f"Validating {value} is in choices {self._choices}...")
if value not in self._choices:
raise EventDetail(
key,
value,
schema,
f"Value {value} is not in choices {self._choices}.",
None,
)
return schema
@register_validator(name="lower-case", data_type="string")
class LowerCase(Validator):
"""Validate that a value is lower case.
- Name for `format` attribute: `lower-case`
- Supported data types: `string`
- Programmatic fix: Manually convert to lower case.
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is lower case...")
if value.lower() != value:
raise EventDetail(
key,
value,
schema,
f"Value {value} is not lower case.",
value.lower(),
)
return schema
@register_validator(name="upper-case", data_type="string")
class UpperCase(Validator):
"""Validate that a value is upper case.
- Name for `format` attribute: `upper-case`
- Supported data types: `string`
- Programmatic fix: Manually convert to upper case.
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is upper case...")
if value.upper() != value:
raise EventDetail(
key,
value,
schema,
f"Value {value} is not upper case.",
value.upper(),
)
return schema
@register_validator(name="length", data_type=["string", "list", "object"])
class ValidLength(Validator):
"""Validate that the length of value is within the expected range.
- Name for `format` attribute: `length`
- Supported data types: `string`, `list`, `object`
- Programmatic fix: If shorter than the minimum, pad with empty last elements.
If longer than the maximum, truncate.
"""
def __init__(
self, min: int = None, max: int = None, on_fail: Optional[Callable] = None
):
super().__init__(on_fail=on_fail)
self._min = int(min) if min is not None else None
self._max = int(max) if max is not None else None
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
"""Validate that a value is within a range."""
logger.debug(
f"Validating {value} is in length range {self._min} - {self._max}..."
)
if self._min is not None and len(value) < self._min:
logger.debug(f"Value {value} is less than {self._min}.")
# Repeat the last character to make the value the correct length.
corrected_value = value + value[-1] * (self._min - len(value))
raise EventDetail(
key,
value,
schema,
f"Value has length less than {self._min}. "
f"Please return a longer output, "
f"that is shorter than {self._max} characters.",
corrected_value,
)
if self._max is not None and len(value) > self._max:
logger.debug(f"Value {value} is greater than {self._max}.")
raise EventDetail(
key,
value,
schema,
f"Value has length greater than {self._max}. "
f"Please return a shorter output, "
f"that is shorter than {self._max} characters.",
value[: self._max],
)
return schema
@register_validator(name="two-words", data_type="string")
class TwoWords(Validator):
"""Validate that a value is upper case.
- Name for `format` attribute: `two-words`
- Supported data types: `string`
- Programmatic fix: Pick the first two words.
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is two words...")
if len(value.split()) != 2:
raise EventDetail(
key,
value,
schema,
"must be exactly two words",
" ".join(value.split()[:2]),
)
return schema
@register_validator(name="one-line", data_type="string")
class OneLine(Validator):
"""Validate that a value is a single line or sentence.
- Name for `format` attribute: `one-line`
- Supported data types: `string`
- Programmatic fix: Pick the first line.
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is a single line...")
if len(value.splitlines()) != 1:
raise EventDetail(
key,
value,
schema,
f"Value {value} is not a single line.",
value.splitlines()[0],
)
return schema
@register_validator(name="valid-url", data_type=["string", "url"])
class ValidUrl(Validator):
"""Validate that a value is a valid URL.
- Name for `format` attribute: `valid-url`
- Supported data types: `string`, `url`
- Programmatic fix: None
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is a valid URL...")
import requests
# Check that the URL exists and can be reached
try:
response = requests.get(value)
if response.status_code != 200:
raise EventDetail(
key,
value,
schema,
f"URL {value} returned status code {response.status_code}",
None,
)
except requests.exceptions.ConnectionError:
raise EventDetail(
key,
value,
schema,
f"URL {value} could not be reached",
None,
)
return schema
@register_validator(name="bug-free-python", data_type="pythoncode")
class BugFreePython(Validator):
"""Validate that there are no Python syntactic bugs in the generated code.
This validator checks for syntax errors by running `ast.parse(code)`,
and will raise an exception if there are any.
Only the packages in the `python` environment are available to the code snippet.
- Name for `format` attribute: `bug-free-python`
- Supported data types: `pythoncode`
- Programmatic fix: None
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is not a bug...")
# The value is a Python code snippet. We need to check for syntax errors.
try:
ast.parse(value)
except SyntaxError as e:
raise EventDetail(
key,
value,
schema,
e,
None,
)
return schema
@register_validator(name="bug-free-sql", data_type="sql")
class BugFreeSQL(Validator):
"""Validate that there are no SQL syntactic bugs in the generated code.
This is a very minimal implementation that uses the Pypi `sqlvalidator` package
to check if the SQL query is valid. You can implement a custom SQL validator
that uses a database connection to check if the query is valid.
- Name for `format` attribute: `bug-free-sql`
- Supported data types: `sql`
- Programmatic fix: None
"""
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
import sqlvalidator
sql_query = sqlvalidator.parse(value)
if not sql_query.is_valid():
raise EventDetail(
key,
value,
schema,
". ".join(sql_query.errors),
None,
)
return schema
@register_validator(name="similar-to-document", data_type="string")
class SimilarToDocument(Validator):
"""Validate that a value is similar to the document.
This validator checks if the value is similar to the document by checking
the cosine similarity between the value and the document, using an
embedding.
- Name for `format` attribute: `similar-to-document`
- Supported data types: `string`
- Programmatic fix: None
"""
def __init__(
self,
document: str,
threshold: float = 0.7,
model: str = "text-embedding-ada-002",
on_fail: Optional[Callable] = None,
):
super().__init__(on_fail=on_fail)
if not _HAS_NUMPY:
raise ImportError(
f"The {self.__class__.__name__} validator requires the numpy package.\n"
"`pip install numpy` to install it."
)
self._document = document
embedding = openai.Embedding.create(input=[document], model=model)["data"][0][
"embedding"
]
self._document_embedding = np.array(embedding)
self._model = model
self._threshold = float(threshold)
@staticmethod
def cosine_similarity(a: "np.ndarray", b: "np.ndarray") -> float:
"""Calculate the cosine similarity between two vectors.
Args:
a: The first vector.
b: The second vector.
Returns:
float: The cosine similarity between the two vectors.
"""
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} is similar to document...")
value_embedding = np.array(
openai.Embedding.create(input=[value], model=self._model)["data"][0][
"embedding"
]
)
similarity = SimilarToDocument.cosine_similarity(
self._document_embedding,
value_embedding,
)
if similarity < self._threshold:
raise EventDetail(
key,
value,
schema,
f"Value {value} is not similar enough to document {self._document}.",
None,
)
return schema
def to_prompt(self, with_keywords: bool = True) -> str:
return ""
@register_validator(name="is-profanity-free", data_type="string")
class IsProfanityFree(Validator):
"""Validate that a translated text does not contain profanity language.
This validator uses the `alt-profanity-check` package to check if a string
contains profanity language.
- Name for `format` attribute: `is-profanity-free`
- Supported data types: `string`
- Programmatic fix: ""
"""
def validate(self, key, value, schema) -> Dict:
try:
from profanity_check import predict
except ImportError:
raise ImportError(
"`is-profanity-free` validator requires the `alt-profanity-check`"
"package. Please install it with `pip install profanity-check`."
)
prediction = predict([value])
if prediction[0] == 1:
raise EventDetail(
key,
value,
schema,
f"{value} contains profanity. Please return a profanity-free output.",
"",
)
return schema
@register_validator(name="is-high-quality-translation", data_type="string")
class IsHighQualityTranslation(Validator):
"""Using inpiredco.critique to check if a translation is high quality.
- Name for `format` attribute: `is-high-quality-translation`
- Supported data types: `string`
- Programmatic fix: ""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
from inspiredco.critique import Critique
self.critique = Critique(api_key=os.environ["INSPIREDCO_API_KEY"])
except ImportError:
raise ImportError(
"`is-high-quality-translation` validator requires the `inspiredco`"
"package. Please install it with `pip install inspiredco`."
)
def validate(self, key, value, schema) -> Dict:
prediction = self.critique.evaluate(
metric="comet",
config={"model": "unbabel_comet/wmt21-comet-qe-da"},
dataset=[{"source": key, "target": value}],
)
quality = prediction["examples"][0]["value"]
if quality < -0.1:
raise EventDetail(
key,
value,
schema,
f"{value} is a low quality translation."
"Please return a higher quality output.",
"",
)
return schema
@register_validator(name="ends-with", data_type="list")
class EndsWith(Validator):
"""Validate that a list ends with a given value.
- Name for `format` attribute: `ends-with`
- Supported data types: `list`
- Programmatic fix: Append the given value to the list.
"""
def __init__(self, end: str, on_fail: str = "fix"):
super().__init__(on_fail=on_fail)
self._end = end
def validate(self, key: str, value: Any, schema: Union[Dict, List]) -> Dict:
logger.debug(f"Validating {value} ends with {self._end}...")
if not value[-1] == self._end:
raise EventDetail(
key,
value,
schema,
f"{value} must end with {self._end}",
value + [self._end],
)
return schema
| [] |
2024-01-10 | rajaswa/guardrails | tests~integration_tests~test_guard.py | import openai
import pytest
import guardrails as gd
from .mock_llm_outputs import entity_extraction, openai_completion_create
@pytest.fixture(scope="module")
def rail_spec():
return """
<rail version="0.1">
<output>
<string name="dummy_string" description="Any dummy string" />
<integer name="dummy_integer" description="Any dummy integer" />
<float name="dummy_float" description="Any dummy float" />
<bool name="dummy_boolean" description="Any dummy boolean" />
<email name="dummy_email" description="Any dummy email" />
<url name="dummy_url" description="Any dummy url" />
<date name="dummy_date" description="Any dummy date" />
<time name="dummy_time" description="Any dummy time" />
<list name="dummy_list" description="Any dummy list" />
<object name="dummy_object" description="Any dummy object" />
</output>
<prompt>
Generate a JSON of dummy data, where the data types are specified by the user.
@complete_json_suffix
</prompt>
</rail>
"""
@pytest.fixture(scope="module")
def llm_output():
return """
{
"dummy_string": "Some string",
"dummy_integer": 42,
"dummy_float": 3.14,
"dummy_boolean": true,
"dummy_email": "[email protected]",
"dummy_url": "https://www.example.com",
"dummy_date": "2020-01-01",
"dummy_time": "12:00:00",
"dummy_list": ["item1", "item2", "item3"],
"dummy_object": {
"key1": "value1",
"key2": "value2"
}
}
"""
@pytest.fixture(scope="module")
def validated_output():
return {
"dummy_string": "Some string",
"dummy_integer": 42,
"dummy_float": 3.14,
"dummy_boolean": True,
"dummy_email": "[email protected]",
"dummy_url": "https://www.example.com",
"dummy_date": "2020-01-01",
"dummy_time": "12:00:00",
"dummy_list": ["item1", "item2", "item3"],
"dummy_object": {"key1": "value1", "key2": "value2"},
}
def test_rail_spec_output_parse(rail_spec, llm_output, validated_output):
"""Test that the rail_spec fixture is working."""
guard = gd.Guard.from_rail_string(rail_spec)
assert guard.parse(llm_output) == validated_output
def test_entity_extraction_with_reask(mocker):
"""Test that the entity extraction works with re-asking."""
mocker.patch(
"guardrails.llm_providers.openai_wrapper", new=openai_completion_create
)
content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf")
guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_REASK)
_, final_output = guard(
llm_api=openai.Completion.create,
prompt_params={"document": content[:6000]},
num_reasks=1,
)
# Assertions are made on the guard state object.
assert final_output == entity_extraction.VALIDATED_OUTPUT_REASK_2
guard_history = guard.guard_state.most_recent_call.history
# Check that the guard state object has the correct number of re-asks.
assert len(guard_history) == 2
# For orginal prompt and output
assert guard_history[0].prompt == entity_extraction.COMPILED_PROMPT
assert guard_history[0].output == entity_extraction.LLM_OUTPUT
assert (
guard_history[0].validated_output == entity_extraction.VALIDATED_OUTPUT_REASK_1
)
# For re-asked prompt and output
assert guard_history[1].prompt == entity_extraction.COMPILED_PROMPT_REASK
assert guard_history[1].output == entity_extraction.LLM_OUTPUT_REASK
assert (
guard_history[1].validated_output == entity_extraction.VALIDATED_OUTPUT_REASK_2
)
def test_entity_extraction_with_noop(mocker):
"""Test that the entity extraction works with re-asking."""
mocker.patch(
"guardrails.llm_providers.openai_wrapper", new=openai_completion_create
)
content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf")
guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_NOOP)
_, final_output = guard(
llm_api=openai.Completion.create,
prompt_params={"document": content[:6000]},
num_reasks=1,
)
# Assertions are made on the guard state object.
assert final_output == entity_extraction.VALIDATED_OUTPUT_NOOP
guard_history = guard.guard_state.most_recent_call.history
# Check that the guard state object has the correct number of re-asks.
assert len(guard_history) == 1
# For orginal prompt and output
assert guard_history[0].prompt == entity_extraction.COMPILED_PROMPT
assert guard_history[0].output == entity_extraction.LLM_OUTPUT
assert guard_history[0].validated_output == entity_extraction.VALIDATED_OUTPUT_NOOP
def test_entity_extraction_with_filter(mocker):
"""Test that the entity extraction works with re-asking."""
mocker.patch(
"guardrails.llm_providers.openai_wrapper", new=openai_completion_create
)
content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf")
guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_FILTER)
_, final_output = guard(
llm_api=openai.Completion.create,
prompt_params={"document": content[:6000]},
num_reasks=1,
)
# Assertions are made on the guard state object.
assert final_output == entity_extraction.VALIDATED_OUTPUT_FILTER
guard_history = guard.guard_state.most_recent_call.history
# Check that the guard state object has the correct number of re-asks.
assert len(guard_history) == 1
# For orginal prompt and output
assert guard_history[0].prompt == entity_extraction.COMPILED_PROMPT
assert guard_history[0].output == entity_extraction.LLM_OUTPUT
assert (
guard_history[0].validated_output == entity_extraction.VALIDATED_OUTPUT_FILTER
)
def test_entity_extraction_with_fix(mocker):
"""Test that the entity extraction works with re-asking."""
mocker.patch(
"guardrails.llm_providers.openai_wrapper", new=openai_completion_create
)
content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf")
guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_FIX)
_, final_output = guard(
llm_api=openai.Completion.create,
prompt_params={"document": content[:6000]},
num_reasks=1,
)
# Assertions are made on the guard state object.
assert final_output == entity_extraction.VALIDATED_OUTPUT_FIX
guard_history = guard.guard_state.most_recent_call.history
# Check that the guard state object has the correct number of re-asks.
assert len(guard_history) == 1
# For orginal prompt and output
assert guard_history[0].prompt == entity_extraction.COMPILED_PROMPT
assert guard_history[0].output == entity_extraction.LLM_OUTPUT
assert guard_history[0].validated_output == entity_extraction.VALIDATED_OUTPUT_FIX
def test_entity_extraction_with_refrain(mocker):
"""Test that the entity extraction works with re-asking."""
mocker.patch(
"guardrails.llm_providers.openai_wrapper", new=openai_completion_create
)
content = gd.docs_utils.read_pdf("docs/examples/data/chase_card_agreement.pdf")
guard = gd.Guard.from_rail_string(entity_extraction.RAIL_SPEC_WITH_REFRAIN)
_, final_output = guard(
llm_api=openai.Completion.create,
prompt_params={"document": content[:6000]},
num_reasks=1,
)
# Assertions are made on the guard state object.
assert final_output == entity_extraction.VALIDATED_OUTPUT_REFRAIN
guard_history = guard.guard_state.most_recent_call.history
# Check that the guard state object has the correct number of re-asks.
assert len(guard_history) == 1
# For orginal prompt and output
assert guard_history[0].prompt == entity_extraction.COMPILED_PROMPT
assert guard_history[0].output == entity_extraction.LLM_OUTPUT
assert (
guard_history[0].validated_output == entity_extraction.VALIDATED_OUTPUT_REFRAIN
)
| [] |
2024-01-10 | rajaswa/guardrails | guardrails~llm_providers.py | import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import openai
class PromptCallableException(Exception):
pass
@dataclass
class PromptCallable:
"""A wrapper around a callable that takes in a prompt.
Catches exceptions to let the user know clearly if the callable
failed, and how to fix it.
"""
fn: Callable
def __call__(self, *args, **kwargs):
try:
result = self.fn(*args, **kwargs)
except Exception as e:
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` failed"
f" with the following error: `{e}`. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
if not isinstance(result, str):
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` returned"
f" a non-string value: {result}. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
return result
def openai_wrapper(text: str, *args, **kwargs):
api_key = os.environ.get("OPENAI_API_KEY")
openai_response = openai.Completion.create(
api_key=api_key,
prompt=text,
*args,
**kwargs,
)
return openai_response["choices"][0]["text"]
def openai_chat_wrapper(text: str, *args, model="gpt-3.5-turbo", **kwargs):
if "system_prompt" in kwargs:
system_prompt = kwargs.pop("system_prompt")
else:
system_prompt = (
"You are a helpful assistant, "
"able to express yourself purely through JSON, "
"strictly and precisely adhering to the provided XML schemas."
)
api_key = os.environ.get("OPENAI_API_KEY")
openai_response = openai.ChatCompletion.create(
api_key=api_key,
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": text},
],
*args,
**kwargs,
)
return openai_response["choices"][0]["message"]["content"]
def get_llm_ask(llm_api: Callable, *args, **kwargs):
if llm_api == openai.Completion.create:
fn = partial(openai_wrapper, *args, **kwargs)
elif llm_api == openai.ChatCompletion.create:
fn = partial(openai_chat_wrapper, *args, **kwargs)
else:
# Let the user pass in an arbitrary callable.
fn = partial(llm_api, *args, **kwargs)
return PromptCallable(fn=fn)
| [
"system_prompt",
"You are a helpful assistant, able to express yourself purely through JSON, strictly and precisely adhering to the provided XML schemas."
] |
2024-01-10 | IvanIsCoding/ResuLLMe | src~prompt_engineering~__init__.py | from openai import OpenAI
import json
from stqdm import stqdm
SYSTEM_PROMPT = "You are a smart assistant to career advisors at the Harvard Extension School. You will reply with JSON only."
CV_TEXT_PLACEHOLDER = "<CV_TEXT>"
SYSTEM_TAILORING = """
You are a smart assistant to career advisors at the Harvard Extension School. Your take is to rewrite
resumes to be more brief and convincing according to the Resumes and Cover Letters guide.
"""
TAILORING_PROMPT = """
Consider the following CV:
<CV_TEXT>
Your task is to rewrite the given CV. Follow these guidelines:
- Be truthful and objective to the experience listed in the CV
- Be specific rather than general
- Rewrite job highlight items using STAR methodology (but do not mention STAR explicitly)
- Fix spelling and grammar errors
- Writte to express not impress
- Articulate and don't be flowery
- Prefer active voice over passive voice
- Do not include a summary about the candidate
Improved CV:
"""
BASICS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface Basics {
name: string;
email: string;
phone: string;
website: string;
address: string;
}
Write the basics section according to the Basic schema. On the response, include only the JSON.
"""
EDUCATION_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface EducationItem {
institution: string;
area: string;
additionalAreas: string[];
studyType: string;
startDate: string;
endDate: string;
score: string;
location: string;
}
interface Education {
education: EducationItem[];
}
Write the education section according to the Education schema. On the response, include only the JSON.
"""
AWARDS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface AwardItem {
title: string;
date: string;
awarder: string;
summary: string;
}
interface Awards {
awards: AwardItem[];
}
Write the awards section according to the Awards schema. Include only the awards section. On the response, include only the JSON.
"""
PROJECTS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface ProjectItem {
name: string;
description: string;
keywords: string[];
url: string;
}
interface Projects {
projects: ProjectItem[];
}
Write the projects section according to the Projects schema. Include all projects, but only the ones present in the CV. On the response, include only the JSON.
"""
SKILLS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
type HardSkills = "Programming Languages" | "Tools" | "Frameworks" | "Computer Proficiency";
type SoftSkills = "Team Work" | "Communication" | "Leadership" | "Problem Solving" | "Creativity";
type OtherSkills = string;
Now consider the following TypeScript Interface for the JSON schema:
interface SkillItem {
name: HardSkills | SoftSkills | OtherSkills;
keywords: string[];
}
interface Skills {
skills: SkillItem[];
}
Write the skills section according to the Skills schema. Include only up to the top 4 skill names that are present in the CV and related with the education and work experience. On the response, include only the JSON.
"""
WORK_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface WorkItem {
company: string;
position: string;
startDate: string;
endDate: string;
location: string;
highlights: string[];
}
interface Work {
work: WorkItem[];
}
Write a work section for the candidate according to the Work schema. Include only the work experience and not the project experience. For each work experience, provide a company name, position name, start and end date, and bullet point for the highlights. Follow the Harvard Extension School Resume guidelines and phrase the highlights with the STAR methodology
"""
def generate_json_resume(cv_text, api_key, model="gpt-3.5-turbo"):
"""Generate a JSON resume from a CV text"""
sections = []
client = OpenAI(api_key=api_key)
for prompt in stqdm(
[
BASICS_PROMPT,
EDUCATION_PROMPT,
AWARDS_PROMPT,
PROJECTS_PROMPT,
SKILLS_PROMPT,
WORK_PROMPT,
],
desc="This may take a while...",
):
filled_prompt = prompt.replace(CV_TEXT_PLACEHOLDER, cv_text)
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": filled_prompt},
],
)
try:
answer = response.choices[0].message.content
answer = json.loads(answer)
if prompt == BASICS_PROMPT and "basics" not in answer:
answer = {"basics": answer} # common mistake GPT makes
sections.append(answer)
except Exception as e:
print(e)
final_json = {}
for section in sections:
final_json.update(section)
return final_json
def tailor_resume(cv_text, api_key, model="gpt-3.5-turbo"):
filled_prompt = TAILORING_PROMPT.replace("<CV_TEXT>", cv_text)
client = OpenAI(api_key=api_key)
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_TAILORING},
{"role": "user", "content": filled_prompt},
],
)
answer = response.choices[0].message.content
return answer
except Exception as e:
print(e)
print("Failed to tailor resume.")
return cv_text
| [
"\nYou are a smart assistant to career advisors at the Harvard Extension School. Your take is to rewrite\nresumes to be more brief and convincing according to the Resumes and Cover Letters guide.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface EducationItem {\n institution: string;\n area: string;\n additionalAreas: string[];\n studyType: string;\n startDate: string;\n endDate: string;\n score: string;\n location: string;\n}\n\ninterface Education {\n education: EducationItem[];\n}\n\n\nWrite the education section according to the Education schema. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface WorkItem {\n company: string;\n position: string;\n startDate: string;\n endDate: string;\n location: string;\n highlights: string[];\n}\n\ninterface Work {\n work: WorkItem[];\n}\n\nWrite a work section for the candidate according to the Work schema. Include only the work experience and not the project experience. For each work experience, provide a company name, position name, start and end date, and bullet point for the highlights. Follow the Harvard Extension School Resume guidelines and phrase the highlights with the STAR methodology\n",
"You are a smart assistant to career advisors at the Harvard Extension School. You will reply with JSON only.",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface Basics {\n name: string;\n email: string;\n phone: string;\n website: string;\n address: string;\n}\n\nWrite the basics section according to the Basic schema. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface AwardItem {\n title: string;\n date: string;\n awarder: string;\n summary: string;\n}\n\ninterface Awards {\n awards: AwardItem[];\n}\n\nWrite the awards section according to the Awards schema. Include only the awards section. On the response, include only the JSON.\n",
"\nConsider the following CV:\n<CV_TEXT>\n\nYour task is to rewrite the given CV. Follow these guidelines:\n- Be truthful and objective to the experience listed in the CV\n- Be specific rather than general\n- Rewrite job highlight items using STAR methodology (but do not mention STAR explicitly)\n- Fix spelling and grammar errors\n- Writte to express not impress\n- Articulate and don't be flowery\n- Prefer active voice over passive voice\n- Do not include a summary about the candidate\n\nImproved CV:\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface ProjectItem {\n name: string;\n description: string;\n keywords: string[];\n url: string;\n}\n\ninterface Projects {\n projects: ProjectItem[];\n}\n\nWrite the projects section according to the Projects schema. Include all projects, but only the ones present in the CV. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\ntype HardSkills = \"Programming Languages\" | \"Tools\" | \"Frameworks\" | \"Computer Proficiency\";\ntype SoftSkills = \"Team Work\" | \"Communication\" | \"Leadership\" | \"Problem Solving\" | \"Creativity\";\ntype OtherSkills = string;\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface SkillItem {\n name: HardSkills | SoftSkills | OtherSkills;\n keywords: string[];\n}\n\ninterface Skills {\n skills: SkillItem[];\n}\n\nWrite the skills section according to the Skills schema. Include only up to the top 4 skill names that are present in the CV and related with the education and work experience. On the response, include only the JSON.\n"
] |
2024-01-10 | k-sml/AIstudent | backend~app~api~endpoints~execute_gpt_api.py | from fastapi import APIRouter
from crud.topic_crud import *
from crud.answer_crud import *
from schemas.execute_gpt_api_schema import Response
from typing import List
import os
from schemas.topic_schema import Topic
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
router = APIRouter()
@router.post("/execute/new/{topic_id}", tags=["Execute"])
def first_execute_gpt_api(topic:Topic):
# topic = select_topic(topic_id)
client = OpenAI(api_key=os.getenv('OPEN_API_KEY'))
res = client.chat.completions.create(
model = "gpt-4",
messages = [
{"role": "system", "content": topic.first_header},
{"role": "user", "content": topic.first_prompt},
]
)
return res
@router.post("/execute/{answer_id}", tags=["Execute"])
def execute_openai_api(answer_id: str, res: List[Response]):
answer = get_answer(answer_id)
client = OpenAI(api_key=os.getenv('OPEN_API_KEY'))
res.append(
{"role": "user", "content": answer.content}
)
res = client.chat.completions.create(
model = "gpt-4",
messages = res
)
return res
@router.post("/execute/eval/{topic_id}", tags=["Execute"])
def execute_evaluation(res: List[Response]):
client = OpenAI(api_key=os.getenv('OPEN_API_KEY'))
res.append(
{"role": "system", "content": "ここまでのやりとりを踏まえて相手のトピックに対する理解度を0%から100%の間で決めてください。%を抜いた数値を出力してください。"}
)
res = client.chat.completions.create(
model = "gpt-4",
messages = res
)
return res | [
"ここまでのやりとりを踏まえて相手のトピックに対する理解度を0%から100%の間で決めてください。%を抜いた数値を出力してください。"
] |
2024-01-10 | Yfan719/Langchain-Chatchat | startup.py | from multiprocessing import Process, Queue
import multiprocessing as mp
import subprocess
import sys
import os
from pprint import pprint
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import EMBEDDING_DEVICE, EMBEDDING_MODEL, llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, \
logger
from configs.server_config import (WEBUI_SERVER, API_SERVER, OPEN_CROSS_DOMAIN, FSCHAT_CONTROLLER, FSCHAT_MODEL_WORKERS,
FSCHAT_OPENAI_API, fschat_controller_address, fschat_model_worker_address,
fschat_openai_api_address, )
from server.utils import MakeFastAPIOffline, FastAPI
import argparse
from typing import Tuple, List
from configs import VERSION
def set_httpx_timeout(timeout=60.0):
import httpx
httpx._config.DEFAULT_TIMEOUT_CONFIG.connect = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.read = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.write = timeout
def create_controller_app(
dispatch_method: str,
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
return app
def create_model_worker_app(**kwargs) -> Tuple[argparse.ArgumentParser, FastAPI]:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id
import argparse
import threading
import fastchat.serve.model_worker
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
parser = argparse.ArgumentParser()
args = parser.parse_args([])
# default args. should be deleted after pr is merged by fastchat
args.gpus = None
args.max_gpu_memory = "20GiB"
args.load_8bit = False
args.cpu_offloading = None
args.gptq_ckpt = None
args.gptq_wbits = 16
args.gptq_groupsize = -1
args.gptq_act_order = False
args.awq_ckpt = None
args.awq_wbits = 16
args.awq_groupsize = -1
args.num_gpus = 1
args.model_names = []
args.conv_template = None
args.limit_worker_concurrency = 5
args.stream_interval = 2
args.no_register = False
for k, v in kwargs.items():
setattr(args, k, v)
if args.gpus:
if args.num_gpus is None:
args.num_gpus = len(args.gpus.split(','))
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
)
sys.modules["fastchat.serve.model_worker"].worker = worker
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({LLM_MODEL})"
return app
def create_openai_api_app(
controller_address: str,
api_keys: List = [],
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def _set_app_seq(app: FastAPI, q: Queue, run_seq: int):
if run_seq == 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
q.put(run_seq)
elif run_seq > 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
def run_controller(q: Queue, run_seq: int = 1):
import uvicorn
app = create_controller_app(FSCHAT_CONTROLLER.get("dispatch_method"))
_set_app_seq(app, q, run_seq)
host = FSCHAT_CONTROLLER["host"]
port = FSCHAT_CONTROLLER["port"]
uvicorn.run(app, host=host, port=port)
def run_model_worker(
model_name: str = LLM_MODEL,
controller_address: str = "",
q: Queue = None,
run_seq: int = 2,
):
import uvicorn
kwargs = FSCHAT_MODEL_WORKERS[LLM_MODEL].copy()
host = kwargs.pop("host")
port = kwargs.pop("port")
model_path = llm_model_dict[model_name].get("local_model_path", "")
kwargs["model_path"] = model_path
kwargs["model_names"] = [model_name]
kwargs["controller_address"] = controller_address or fschat_controller_address()
kwargs["worker_address"] = fschat_model_worker_address()
app = create_model_worker_app(**kwargs)
_set_app_seq(app, q, run_seq)
uvicorn.run(app, host=host, port=port)
def run_openai_api(q: Queue, run_seq: int = 3):
import uvicorn
controller_addr = fschat_controller_address()
app = create_openai_api_app(controller_addr) # todo: not support keys yet.
_set_app_seq(app, q, run_seq)
host = FSCHAT_OPENAI_API["host"]
port = FSCHAT_OPENAI_API["port"]
uvicorn.run(app, host=host, port=port)
def run_api_server(q: Queue, run_seq: int = 4):
from server.api import create_app
import uvicorn
app = create_app()
_set_app_seq(app, q, run_seq)
host = API_SERVER["host"]
port = API_SERVER["port"]
uvicorn.run(app, host=host, port=port)
def run_webui(q: Queue, run_seq: int = 5):
host = WEBUI_SERVER["host"]
port = WEBUI_SERVER["port"]
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
p = subprocess.Popen(["streamlit", "run", "webui.py",
"--server.address", host,
"--server.port", str(port)])
p.wait()
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all-webui",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py and webui.py",
dest="all_webui",
)
parser.add_argument(
"--all-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py",
dest="all_api",
)
parser.add_argument(
"--llm-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers",
dest="llm_api",
)
parser.add_argument(
"-o",
"--openai-api",
action="store_true",
help="run fastchat's controller/openai_api servers",
dest="openai_api",
)
parser.add_argument(
"-m",
"--model-worker",
action="store_true",
help="run fastchat's model_worker server with specified model name. specify --model-name if not using default LLM_MODEL",
dest="model_worker",
)
parser.add_argument(
"-n",
"--model-name",
type=str,
default=LLM_MODEL,
help="specify model name for model worker.",
dest="model_name",
)
parser.add_argument(
"-c",
"--controller",
type=str,
help="specify controller address the worker is registered to. default is server_config.FSCHAT_CONTROLLER",
dest="controller_address",
)
parser.add_argument(
"--api",
action="store_true",
help="run api.py server",
dest="api",
)
parser.add_argument(
"-w",
"--webui",
action="store_true",
help="run webui.py server",
dest="webui",
)
args = parser.parse_args()
return args
def dump_server_info(after_start=False):
import platform
import langchain
import fastchat
from configs.server_config import api_address, webui_address
print("\n\n")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print(f"操作系统:{platform.platform()}.")
print(f"python版本:{sys.version}")
print(f"项目版本:{VERSION}")
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
print("\n")
print(f"当前LLM模型:{LLM_MODEL} @ {LLM_DEVICE}")
pprint(llm_model_dict[LLM_MODEL])
print(f"当前Embbedings模型: {EMBEDDING_MODEL} @ {EMBEDDING_DEVICE}")
if after_start:
print("\n")
print(f"服务端运行信息:")
if args.openai_api:
print(f" OpenAI API Server: {fschat_openai_api_address()}/v1")
print(" (请确认llm_model_dict中配置的api_base_url与上面地址一致。)")
if args.api:
print(f" Chatchat API Server: {api_address()}")
if args.webui:
print(f" Chatchat WEBUI Server: {webui_address()}")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print("\n\n")
if __name__ == "__main__":
import time
mp.set_start_method("spawn")
queue = Queue()
args = parse_args()
if args.all_webui:
args.openai_api = True
args.model_worker = True
args.api = True
args.webui = True
elif args.all_api:
args.openai_api = True
args.model_worker = True
args.api = True
args.webui = False
elif args.llm_api:
args.openai_api = True
args.model_worker = True
args.api = False
args.webui = False
dump_server_info()
logger.info(f"正在启动服务:")
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
processes = {}
if args.openai_api:
process = Process(
target=run_controller,
name=f"controller({os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["controller"] = process
process = Process(
target=run_openai_api,
name=f"openai_api({os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["openai_api"] = process
if args.model_worker:
process = Process(
target=run_model_worker,
name=f"model_worker({os.getpid()})",
args=(args.model_name, args.controller_address, queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["model_worker"] = process
if args.api:
process = Process(
target=run_api_server,
name=f"API Server{os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["api"] = process
if args.webui:
process = Process(
target=run_webui,
name=f"WEBUI Server{os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["webui"] = process
try:
# log infors
while True:
no = queue.get()
if no == len(processes):
time.sleep(0.5)
dump_server_info(True)
break
else:
queue.put(no)
if model_worker_process := processes.get("model_worker"):
model_worker_process.join()
for name, process in processes.items():
if name != "model_worker":
process.join()
except:
if model_worker_process := processes.get("model_worker"):
model_worker_process.terminate()
for name, process in processes.items():
if name != "model_worker":
process.terminate()
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | Yfan719/Langchain-Chatchat | server~knowledge_base~kb_service~faiss_kb_service.py | import os
import shutil
from configs.model_config import (
KB_ROOT_PATH,
CACHED_VS_NUM,
EMBEDDING_MODEL,
EMBEDDING_DEVICE,
SCORE_THRESHOLD
)
from server.knowledge_base.kb_service.base import KBService, SupportedVSType
from functools import lru_cache
from server.knowledge_base.utils import get_vs_path, load_embeddings, KnowledgeFile
from langchain.vectorstores import FAISS
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from typing import List
from langchain.docstore.document import Document
from server.utils import torch_gc
# make HuggingFaceEmbeddings hashable
def _embeddings_hash(self):
return hash(self.model_name)
HuggingFaceEmbeddings.__hash__ = _embeddings_hash
_VECTOR_STORE_TICKS = {}
@lru_cache(CACHED_VS_NUM)
def load_vector_store(
knowledge_base_name: str,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = EMBEDDING_DEVICE,
embeddings: Embeddings = None,
tick: int = 0, # tick will be changed by upload_doc etc. and make cache refreshed.
):
print(f"loading vector store in '{knowledge_base_name}'.")
vs_path = get_vs_path(knowledge_base_name)
if embeddings is None:
embeddings = load_embeddings(embed_model, embed_device)
if not os.path.exists(vs_path):
os.makedirs(vs_path)
if "index.faiss" in os.listdir(vs_path):
search_index = FAISS.load_local(vs_path, embeddings, normalize_L2=True)
else:
# create an empty vector store
doc = Document(page_content="init", metadata={})
search_index = FAISS.from_documents([doc], embeddings, normalize_L2=True)
ids = [k for k, v in search_index.docstore._dict.items()]
search_index.delete(ids)
search_index.save_local(vs_path)
if tick == 0: # vector store is loaded first time
_VECTOR_STORE_TICKS[knowledge_base_name] = 0
return search_index
def refresh_vs_cache(kb_name: str):
"""
make vector store cache refreshed when next loading
"""
_VECTOR_STORE_TICKS[kb_name] = _VECTOR_STORE_TICKS.get(kb_name, 0) + 1
print(f"知识库 {kb_name} 缓存刷新:{_VECTOR_STORE_TICKS[kb_name]}")
class FaissKBService(KBService):
vs_path: str
kb_path: str
def vs_type(self) -> str:
return SupportedVSType.FAISS
@staticmethod
def get_vs_path(knowledge_base_name: str):
return os.path.join(FaissKBService.get_kb_path(knowledge_base_name), "vector_store")
@staticmethod
def get_kb_path(knowledge_base_name: str):
return os.path.join(KB_ROOT_PATH, knowledge_base_name)
def do_init(self):
self.kb_path = FaissKBService.get_kb_path(self.kb_name)
self.vs_path = FaissKBService.get_vs_path(self.kb_name)
def do_create_kb(self):
if not os.path.exists(self.vs_path):
os.makedirs(self.vs_path)
load_vector_store(self.kb_name)
def do_drop_kb(self):
self.clear_vs()
shutil.rmtree(self.kb_path)
def do_search(self,
query: str,
top_k: int,
score_threshold: float = SCORE_THRESHOLD,
embeddings: Embeddings = None,
) -> List[Document]:
search_index = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name))
docs = search_index.similarity_search_with_score(query, k=top_k, score_threshold=score_threshold)
return docs
def do_add_doc(self,
docs: List[Document],
embeddings: Embeddings,
**kwargs,
):
vector_store = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name, 0))
vector_store.add_documents(docs)
torch_gc()
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
refresh_vs_cache(self.kb_name)
def do_delete_doc(self,
kb_file: KnowledgeFile,
**kwargs):
embeddings = self._load_embeddings()
vector_store = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name, 0))
ids = [k for k, v in vector_store.docstore._dict.items() if v.metadata["source"] == kb_file.filepath]
if len(ids) == 0:
return None
vector_store.delete(ids)
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
refresh_vs_cache(self.kb_name)
return True
def do_clear_vs(self):
shutil.rmtree(self.vs_path)
os.makedirs(self.vs_path)
refresh_vs_cache(self.kb_name)
def exist_doc(self, file_name: str):
if super().exist_doc(file_name):
return "in_db"
content_path = os.path.join(self.kb_path, "content")
if os.path.isfile(os.path.join(content_path, file_name)):
return "in_folder"
else:
return False
| [] |
2024-01-10 | Yfan719/Langchain-Chatchat | server~knowledge_base~kb_service~milvus_kb_service.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import Milvus
from configs.model_config import SCORE_THRESHOLD, kbs_config
from server.knowledge_base.kb_service.base import KBService, SupportedVSType
from server.knowledge_base.utils import KnowledgeFile
class MilvusKBService(KBService):
milvus: Milvus
@staticmethod
def get_collection(milvus_name):
from pymilvus import Collection
return Collection(milvus_name)
@staticmethod
def search(milvus_name, content, limit=3):
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10},
}
c = MilvusKBService.get_collection(milvus_name)
return c.search(content, "embeddings", search_params, limit=limit, output_fields=["content"])
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.MILVUS
def _load_milvus(self, embeddings: Embeddings = None):
if embeddings is None:
embeddings = self._load_embeddings()
self.milvus = Milvus(embedding_function=embeddings,
collection_name=self.kb_name, connection_args=kbs_config.get("milvus"))
def do_init(self):
self._load_milvus()
def do_drop_kb(self):
self.milvus.col.drop()
def do_search(self, query: str, top_k: int, embeddings: Embeddings):
# todo: support score threshold
self._load_milvus(embeddings=embeddings)
return self.milvus.similarity_search_with_score(query, top_k)
def add_doc(self, kb_file: KnowledgeFile):
"""
向知识库添加文件
"""
docs = kb_file.file2text()
self.milvus.add_documents(docs)
from server.db.repository.knowledge_file_repository import add_doc_to_db
status = add_doc_to_db(kb_file)
return status
def do_add_doc(self, docs: List[Document], embeddings: Embeddings):
pass
def do_delete_doc(self, kb_file: KnowledgeFile):
filepath = kb_file.filepath.replace('\\', '\\\\')
delete_list = [item.get("pk") for item in
self.milvus.col.query(expr=f'source == "{filepath}"', output_fields=["pk"])]
self.milvus.col.delete(expr=f'pk in {delete_list}')
def do_clear_vs(self):
self.milvus.col.drop()
if __name__ == '__main__':
# 测试建表使用
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
milvusService = MilvusKBService("test")
milvusService.add_doc(KnowledgeFile("README.md", "test"))
milvusService.delete_doc(KnowledgeFile("README.md", "test"))
milvusService.do_drop_kb()
print(milvusService.search_docs("测试"))
| [] |
2024-01-10 | Yfan719/Langchain-Chatchat | server~knowledge_base~kb_service~pg_kb_service.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import PGVector
from sqlalchemy import text
from configs.model_config import EMBEDDING_DEVICE, kbs_config
from server.knowledge_base.kb_service.base import SupportedVSType, KBService
from server.knowledge_base.utils import load_embeddings, KnowledgeFile
class PGKBService(KBService):
pg_vector: PGVector
def _load_pg_vector(self, embedding_device: str = EMBEDDING_DEVICE, embeddings: Embeddings = None):
_embeddings = embeddings
if _embeddings is None:
_embeddings = load_embeddings(self.embed_model, embedding_device)
self.pg_vector = PGVector(embedding_function=_embeddings,
collection_name=self.kb_name,
connection_string=kbs_config.get("pg").get("connection_uri"))
def do_init(self):
self._load_pg_vector()
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.PG
def do_drop_kb(self):
with self.pg_vector.connect() as connect:
connect.execute(text(f'''
-- 删除 langchain_pg_embedding 表中关联到 langchain_pg_collection 表中 的记录
DELETE FROM langchain_pg_embedding
WHERE collection_id IN (
SELECT uuid FROM langchain_pg_collection WHERE name = '{self.kb_name}'
);
-- 删除 langchain_pg_collection 表中 记录
DELETE FROM langchain_pg_collection WHERE name = '{self.kb_name}';
'''))
connect.commit()
def do_search(self, query: str, top_k: int, embeddings: Embeddings):
# todo: support score threshold
self._load_pg_vector(embeddings=embeddings)
return self.pg_vector.similarity_search_with_score(query, top_k)
def add_doc(self, kb_file: KnowledgeFile):
"""
向知识库添加文件
"""
docs = kb_file.file2text()
self.pg_vector.add_documents(docs)
from server.db.repository.knowledge_file_repository import add_doc_to_db
status = add_doc_to_db(kb_file)
return status
def do_add_doc(self, docs: List[Document], embeddings: Embeddings):
pass
def do_delete_doc(self, kb_file: KnowledgeFile):
with self.pg_vector.connect() as connect:
filepath = kb_file.filepath.replace('\\', '\\\\')
connect.execute(
text(
''' DELETE FROM langchain_pg_embedding WHERE cmetadata::jsonb @> '{"source": "filepath"}'::jsonb;'''.replace(
"filepath", filepath)))
connect.commit()
def do_clear_vs(self):
self.pg_vector.delete_collection()
if __name__ == '__main__':
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
pGKBService = PGKBService("test")
pGKBService.create_kb()
pGKBService.add_doc(KnowledgeFile("README.md", "test"))
pGKBService.delete_doc(KnowledgeFile("README.md", "test"))
pGKBService.drop_kb()
print(pGKBService.search_docs("测试"))
| [] |
2024-01-10 | Yfan719/Langchain-Chatchat | server~chat~knowledge_base_chat.py | from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import (llm_model_dict, LLM_MODEL, PROMPT_TEMPLATE,
VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD)
from server.chat.utils import wrap_done
from server.utils import BaseResponse
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, List, Optional
import asyncio
from langchain.prompts.chat import ChatPromptTemplate
from server.chat.utils import History
from server.knowledge_base.kb_service.base import KBService, KBServiceFactory
import json
import os
from urllib.parse import urlencode
from server.knowledge_base.kb_doc_api import search_docs
def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
history: List[History] = Body([],
description="历史对话",
examples=[[
{"role": "user",
"content": "我们来玩成语接龙,我先来,生龙活虎"},
{"role": "assistant",
"content": "虎头虎脑"}]]
),
stream: bool = Body(False, description="流式输出"),
local_doc_url: bool = Body(False, description="知识文件返回本地路径(true)或URL(false)"),
request: Request = None,
):
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
history = [History(**h) if isinstance(h, dict) else h for h in history]
async def knowledge_base_chat_iterator(query: str,
kb: KBService,
top_k: int,
history: Optional[List[History]],
) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
openai_api_key=llm_model_dict[LLM_MODEL]["api_key"],
openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"],
model_name=LLM_MODEL
)
docs = search_docs(query, knowledge_base_name, top_k, score_threshold)
context = "\n".join([doc.page_content for doc in docs])
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_tuple() for i in history] + [("human", PROMPT_TEMPLATE)])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = []
for inum, doc in enumerate(docs):
filename = os.path.split(doc.metadata["source"])[-1]
if local_doc_url:
url = "file://" + doc.metadata["source"]
else:
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name":filename})
url = f"{request.base_url}knowledge_base/download_doc?" + parameters
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
source_documents.append(text)
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token,
"docs": source_documents},
ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": answer,
"docs": source_documents},
ensure_ascii=False)
await task
return StreamingResponse(knowledge_base_chat_iterator(query, kb, top_k, history),
media_type="text/event-stream")
| [
"虎头虎脑",
"human",
"我们来玩成语接龙,我先来,生龙活虎"
] |
2024-01-10 | Yfan719/Langchain-Chatchat | server~chat~github_chat.py | from langchain.document_loaders.github import GitHubIssuesLoader
from fastapi import Body
from fastapi.responses import StreamingResponse
from configs.model_config import (llm_model_dict, LLM_MODEL, SEARCH_ENGINE_TOP_K, PROMPT_TEMPLATE)
from server.chat.utils import wrap_done
from server.utils import BaseResponse
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable
import asyncio
from langchain.prompts.chat import ChatPromptTemplate
from typing import List, Optional, Literal
from server.chat.utils import History
from langchain.docstore.document import Document
import json
import os
from functools import lru_cache
from datetime import datetime
GITHUB_PERSONAL_ACCESS_TOKEN = os.environ.get("GITHUB_PERSONAL_ACCESS_TOKEN")
@lru_cache(1)
def load_issues(tick: str):
'''
set tick to a periodic value to refresh cache
'''
loader = GitHubIssuesLoader(
repo="chatchat-space/langchain-chatglm",
access_token=GITHUB_PERSONAL_ACCESS_TOKEN,
include_prs=True,
state="all",
)
docs = loader.load()
return docs
def
def github_chat(query: str = Body(..., description="用户输入", examples=["本项目最新进展"]),
top_k: int = Body(SEARCH_ENGINE_TOP_K, description="检索结果数量"),
include_prs: bool = Body(True, description="是否包含PR"),
state: Literal['open', 'closed', 'all'] = Body(None, description="Issue/PR状态"),
creator: str = Body(None, description="创建者"),
history: List[History] = Body([],
description="历史对话",
examples=[[
{"role": "user",
"content": "介绍一下本项目"},
{"role": "assistant",
"content": "LangChain-Chatchat (原 Langchain-ChatGLM): 基于 Langchain 与 ChatGLM 等大语言模型的本地知识库问答应用实现。"}]]
),
stream: bool = Body(False, description="流式输出"),
):
if GITHUB_PERSONAL_ACCESS_TOKEN is None:
return BaseResponse(code=404, msg=f"使用本功能需要 GITHUB_PERSONAL_ACCESS_TOKEN")
async def chat_iterator(query: str,
search_engine_name: str,
top_k: int,
history: Optional[List[History]],
) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
openai_api_key=llm_model_dict[LLM_MODEL]["api_key"],
openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"],
model_name=LLM_MODEL
)
docs = lookup_search_engine(query, search_engine_name, top_k)
context = "\n".join([doc.page_content for doc in docs])
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_tuple() for i in history] + [("human", PROMPT_TEMPLATE)])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = [
f"""出处 [{inum + 1}] [{doc.metadata["source"]}]({doc.metadata["source"]}) \n\n{doc.page_content}\n\n"""
for inum, doc in enumerate(docs)
]
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token,
"docs": source_documents},
ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": token,
"docs": source_documents},
ensure_ascii=False)
await task
return StreamingResponse(search_engine_chat_iterator(query, search_engine_name, top_k, history),
media_type="text/event-stream")
| [
"LangChain-Chatchat (原 Langchain-ChatGLM): 基于 Langchain 与 ChatGLM 等大语言模型的本地知识库问答应用实现。",
"介绍一下本项目",
"human"
] |
2024-01-10 | jkeam/semantic-searcher | searcher~models.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.llms import OpenAI
from chromadb import HttpClient
from chromadb.api.models.Collection import Collection
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from openai.error import AuthenticationError, RateLimitError
from logging import getLogger
from searcher.extensions import db
from datetime import datetime
class Fact(db.Model):
id = db.Column(db.String(40), primary_key=True)
title = db.Column(db.String(256))
body = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer)
def __repr__(self):
return f'<Fact "{self.title}">'
class TrainingError(Exception):
def __init__(self, message) -> None:
self.message = message
super().__init__(self.message)
class Searcher:
def __init__(self, openai_api_key:str, open_ai_model_name:str, chroma_host:str, chroma_port:str):
openai:OpenAI = OpenAI(temperature=0, openai_api_key=openai_api_key)
self._chain:BaseCombineDocumentsChain = load_qa_chain(openai, chain_type='stuff')
self._dbclient = HttpClient(host=chroma_host, port=chroma_port)
self._collection_name = "chroma"
self._embedding_function = OpenAIEmbeddingFunction(
api_key=openai_api_key,
model_name=open_ai_model_name
)
self._collection = self._dbclient.get_or_create_collection(name=self._collection_name)
self._logger = getLogger(__name__)
def train(self, values):
"""
Train the model
"""
doc_str = "\n\n".join(values)
self._collection = self._generate_index(doc_str)
def _generate_index(self, text:str) -> Collection:
"""
Index the document and return the indexed db
"""
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=100)
documents = text_splitter.split_text(text)
self._dbclient.get_or_create_collection(name=self._collection_name)
self._dbclient.delete_collection(name=self._collection_name)
collection = self._dbclient.create_collection(name=self._collection_name, embedding_function=self._embedding_function)
try:
collection.add(documents=documents, ids=list(map(lambda num: str(num), range(len(documents)))))
except AuthenticationError as e:
self._logger.error(e)
raise TrainingError('Invalid OPENAI Key')
except RateLimitError as e:
self._logger.error(e)
raise TrainingError('Rate Limit Error while using OPENAI Key')
return collection
def _answer_question(self, query:str, collection:Collection, chain:BaseCombineDocumentsChain) -> str:
"""
Takes in query, index to search from, and llm chain to generate answer
"""
query_db = Chroma(client=self._dbclient, collection_name=self._collection_name, embedding_function=OpenAIEmbeddings())
docs = query_db.similarity_search(query)
answer:dict[str, str] = chain({'input_documents': docs, 'question': query}, return_only_outputs=True)
return answer['output_text']
def ask(self, query:str) -> str:
"""
Ask the model a query
"""
return self._answer_question(query, self._collection, self._chain)
| [] |
2024-01-10 | Guiliang/ICRL-benchmarks-public | stable_baselines3~ppo_lag~ppo_lag.py | from typing import Any, Callable, Dict, Optional, Type, Union
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.dual_variable import DualVariable, PIDLagrangian
from stable_baselines3.common.on_policy_algorithm import \
OnPolicyWithCostAlgorithm
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
class PPOLagrangian(OnPolicyWithCostAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) augmented with a Lagrangian (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
algo_type: str = 'lagrangian', # lagrangian or pidlagrangian
learning_rate: Union[float, Callable] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
reward_gamma: float = 0.99,
reward_gae_lambda: float = 0.95,
cost_gamma: float = 0.99,
cost_gae_lambda: float = 0.95,
clip_range: float = 0.2,
clip_range_reward_vf: Optional[float] = None,
clip_range_cost_vf: Optional[float] = None,
ent_coef: float = 0.0,
reward_vf_coef: float = 0.5,
cost_vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
penalty_initial_value: float = 1,
penalty_learning_rate: float = 0.01,
penalty_min_value: Optional[float] = None,
update_penalty_after: int = 1,
budget: float = 0.,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
pid_kwargs: Optional[Dict[str, Any]] = None,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(PPOLagrangian, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
reward_gamma=reward_gamma,
reward_gae_lambda=reward_gae_lambda,
cost_gamma=cost_gamma,
cost_gae_lambda=cost_gae_lambda,
ent_coef=ent_coef,
reward_vf_coef=reward_vf_coef,
cost_vf_coef=cost_vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
)
self.algo_type = algo_type
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_reward_vf = clip_range_reward_vf
self.clip_range_cost_vf = clip_range_cost_vf
self.target_kl = target_kl
self.penalty_initial_value = penalty_initial_value
self.penalty_learning_rate = penalty_learning_rate
self.penalty_min_value = penalty_min_value
self.update_penalty_after = update_penalty_after
self.budget = budget
self.pid_kwargs = pid_kwargs
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(PPOLagrangian, self)._setup_model()
if self.algo_type == 'lagrangian':
self.dual = DualVariable(self.budget, self.penalty_learning_rate, self.penalty_initial_value, self.penalty_min_value)
elif self.algo_type == 'pidlagrangian':
self.dual = PIDLagrangian(alpha=self.pid_kwargs['alpha'],
penalty_init=self.pid_kwargs['penalty_init'],
Kp=self.pid_kwargs['Kp'],
Ki=self.pid_kwargs['Ki'],
Kd=self.pid_kwargs['Kd'],
pid_delay=self.pid_kwargs['pid_delay'],
delta_p_ema_alpha=self.pid_kwargs['delta_p_ema_alpha'],
delta_d_ema_alpha=self.pid_kwargs['delta_d_ema_alpha'])
else:
raise ValueError("Unrecognized value for argument 'algo_type' in PPOLagrangian")
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_reward_vf is not None:
if isinstance(self.clip_range_reward_vf, (float, int)):
assert self.clip_range_reward_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_reward_vf = get_schedule_fn(self.clip_range_reward_vf)
if self.clip_range_cost_vf is not None:
if isinstance(self.clip_range_cost_vf, (float, int)):
assert self.clip_range_cost_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_cost_vf = get_schedule_fn(self.clip_range_cost_vf)
def train(self) -> None:
"""
Update policy using the currently gathered
rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value functions
if self.clip_range_reward_vf is not None:
clip_range_reward_vf = self.clip_range_reward_vf(self._current_progress_remaining)
if self.clip_range_cost_vf is not None:
clip_range_cost_vf = self.clip_range_cost_vf(self._current_progress_remaining)
entropy_losses, all_kl_divs = [], []
pg_losses, reward_value_losses, cost_value_losses = [], [], []
clip_fractions = []
# Train for gradient_steps epochs
early_stop_epoch = self.n_epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
reward_values, cost_values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
reward_values = reward_values.flatten()
cost_values = cost_values.flatten()
# Normalize reward advantages
reward_advantages = rollout_data.reward_advantages - rollout_data.reward_advantages.mean()
reward_advantages /= (rollout_data.reward_advantages.std() + 1e-8)
# Center but NOT rescale cost advantages
cost_advantages = rollout_data.cost_advantages - rollout_data.cost_advantages.mean()
#cost_advantages /= (rollout_data.cost_advantages.std() + 1e-8)
# Ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# Clipped surrogate loss
policy_loss_1 = reward_advantages * ratio
policy_loss_2 = reward_advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Add cost to loss
current_penalty = self.dual.nu().item()
policy_loss += current_penalty * th.mean(cost_advantages * ratio)
policy_loss /= (1 + current_penalty)
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_reward_vf is None:
# No clipping
reward_values_pred = reward_values
else:
# Clip the difference between old and new value
# NOTE: this depends on the reward scaling
reward_values_pred = rollout_data.old_reward_values + th.clamp(
reward_values - rollout_data.old_reward_values, -clip_range_reward_vf, clip_range_reward_vf
)
if self.clip_range_cost_vf is None:
# No clipping
cost_values_pred = cost_values
else:
# Clip the difference between old and new value
# NOTE: this depends on the cost scaling
cost_values_pred = rollout_data.old_cost_values + th.clamp(
cost_values - rollout_data.old_cost_values, -clip_range_cost_vf, clip_range_cost_vf
)
# Value loss using the TD(gae_lambda) target
reward_value_loss = F.mse_loss(rollout_data.reward_returns, reward_values_pred)
cost_value_loss = F.mse_loss(rollout_data.cost_returns, cost_values_pred)
reward_value_losses.append(reward_value_loss.item())
cost_value_losses.append(cost_value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = (policy_loss
+ self.ent_coef * entropy_loss
+ self.reward_vf_coef * reward_value_loss
+ self.cost_vf_coef * cost_value_loss)
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
approx_kl_divs.append(th.mean(rollout_data.old_log_prob - log_prob).detach().cpu().numpy())
all_kl_divs.append(np.mean(approx_kl_divs))
if self.target_kl is not None and np.mean(approx_kl_divs) > 1.5 * self.target_kl:
early_stop_epoch = epoch
if self.verbose > 0:
print(f"Early stopping at step {epoch} due to reaching max kl: {np.mean(approx_kl_divs):.2f}")
break
self._n_updates += self.n_epochs
# Update dual variable using original (unnormalized) cost
# TODO: Experiment with discounted cost.
average_cost = np.mean(self.rollout_buffer.orig_costs)
total_cost = np.sum(self.rollout_buffer.orig_costs)
if self.update_penalty_after is None or ((self._n_updates/self.n_epochs) % self.update_penalty_after == 0):
self.dual.update_parameter(average_cost)
mean_reward_advantages = np.mean(self.rollout_buffer.reward_advantages.flatten())
mean_cost_advantages = np.mean(self.rollout_buffer.cost_advantages.flatten())
explained_reward_var = explained_variance(self.rollout_buffer.reward_returns.flatten(), self.rollout_buffer.reward_values.flatten())
explained_cost_var = explained_variance(self.rollout_buffer.cost_returns.flatten(), self.rollout_buffer.cost_values.flatten())
# Logs
logger.record("train/entropy_loss", np.mean(entropy_losses))
logger.record("train/policy_gradient_loss", np.mean(pg_losses))
logger.record("train/reward_value_loss", np.mean(reward_value_losses))
logger.record("train/cost_value_loss", np.mean(cost_value_losses))
logger.record("train/approx_kl", np.mean(approx_kl_divs))
logger.record("train/clip_fraction", np.mean(clip_fractions))
logger.record("train/loss", loss.item())
logger.record("train/mean_reward_advantages", mean_reward_advantages)
logger.record("train/mean_cost_advantages", mean_cost_advantages)
logger.record("train/reward_explained_variance", explained_reward_var)
logger.record("train/cost_explained_variance", explained_cost_var)
logger.record("train/nu", self.dual.nu().item())
logger.record("train/nu_loss", self.dual.loss.item())
logger.record("train/average_cost", average_cost)
logger.record("train/total_cost", total_cost)
logger.record("train/early_stop_epoch", early_stop_epoch)
if hasattr(self.policy, "log_std"):
logger.record("train/std", th.exp(self.policy.log_std).mean().item())
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/clip_range", clip_range)
if self.clip_range_reward_vf is not None:
logger.record("train/clip_range_reward_vf", clip_range_reward_vf)
if self.clip_range_cost_vf is not None:
logger.record("train/clip_range_cost_vf", clip_range_cost_vf)
def learn(
self,
total_timesteps: int,
cost_function: Union[str,Callable],
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPOLagrangian",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "PPOLagrangian":
return super(PPOLagrangian, self).learn(
total_timesteps=total_timesteps,
cost_function=cost_function,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
| [] |
2024-01-10 | schroedermatthew/Math-389-Grades | Assignments~Assingment%208~Sarah%20(Yiping)%20Lu~luyiping_10451_675998_HW10.py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
get_ipython().system('python -m pip install -U gensim')
# In[6]:
import sys
# !{sys.executable} -m spacy download en
import re, numpy as np, pandas as pd
from pprint import pprint
# Gensim
import gensim, spacy, logging, warnings
import gensim.corpora as corpora
from gensim.utils import lemmatize, simple_preprocess
from gensim.models import CoherenceModel
import matplotlib.pyplot as plt
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
get_ipython().run_line_magic('matplotlib', 'inline')
warnings.filterwarnings("ignore",category=DeprecationWarning)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
# In[ ]:
df = pd.read_json('https://raw.githubusercontent.com/selva86/datasets/master/newsgroups.json')
df = df.loc[df.target_names.isin(['soc.religion.christian', 'rec.sport.hockey', 'talk.politics.mideast', 'rec.motorcycles']) , :]
print(df.shape) #> (2361, 3)
df.head()
# In[ ]:
def sent_to_words(sentences):
for sent in sentences:
sent = re.sub('\S*@\S*\s?', '', sent) # remove emails
sent = re.sub('\s+', ' ', sent) # remove newline chars
sent = re.sub("\'", "", sent) # remove single quotes
sent = gensim.utils.simple_preprocess(str(sent), deacc=True)
yield(sent)
# Convert to list
data = df.content.values.tolist()
data_words = list(sent_to_words(data))
print(data_words[:1])
# In[ ]:
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# !python3 -m spacy download en # run in terminal once
# or do
# !conda install -c conda-forge spacy-model-en_core_web_md
# and use nlp=spacy.load('en_core_web_sm') instead in below function.
def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""Remove Stopwords, Form Bigrams, Trigrams and Lemmatization"""
texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
texts = [bigram_mod[doc] for doc in texts]
texts = [trigram_mod[bigram_mod[doc]] for doc in texts]
texts_out = []
nlp = spacy.load('en', disable=['parser', 'ner'])
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
# remove stopwords once more after lemmatization
texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out]
return texts_out
data_ready = process_words(data_words) # processed Text Data!
# In[ ]:
# Create Dictionary
id2word = corpora.Dictionary(data_ready)
# Create Corpus: Term Document Frequency
corpus = [id2word.doc2bow(text) for text in data_ready]
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=4,
random_state=100,
update_every=1,
chunksize=10,
passes=10,
alpha='symmetric',
iterations=100,
per_word_topics=True)
pprint(lda_model.print_topics())
#> [(0,
#> '0.017*"write" + 0.015*"people" + 0.014*"organization" + 0.014*"article" + '
#> '0.013*"time" + 0.008*"give" + 0.008*"first" + 0.007*"tell" + 0.007*"new" + '
#> '0.007*"question"'),
#> (1,
#> '0.008*"christian" + 0.008*"believe" + 0.007*"god" + 0.007*"law" + '
#> '0.006*"state" + 0.006*"israel" + 0.006*"israeli" + 0.005*"exist" + '
#> '0.005*"way" + 0.004*"bible"'),
#> (2,
#> '0.024*"armenian" + 0.012*"bike" + 0.006*"kill" + 0.006*"work" + '
#> '0.005*"well" + 0.005*"year" + 0.005*"sumgait" + 0.005*"soldier" + '
#> '0.004*"way" + 0.004*"ride"'),
#> (3,
#> '0.019*"team" + 0.019*"game" + 0.013*"hockey" + 0.010*"player" + '
#> '0.009*"play" + 0.009*"win" + 0.009*"nhl" + 0.009*"year" + 0.009*"hawk" + '
#> '0.009*"season"')]
# In[ ]:
def format_topics_sentences(ldamodel=None, corpus=corpus, texts=data):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
# print(row)
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_ready)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.head(10)
# In[ ]:
# Display setting to show more characters in column
pd.options.display.max_colwidth = 100
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet,
grp.sort_values(['Perc_Contribution'], ascending=False).head(1)],
axis=0)
# Reset Index
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
# Format
sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Representative Text"]
# Show
sent_topics_sorteddf_mallet.head(10)
# In[ ]:
doc_lens = [len(d) for d in df_dominant_topic.Text]
# Plot
plt.figure(figsize=(16,7), dpi=160)
plt.hist(doc_lens, bins = 1000, color='navy')
plt.text(750, 100, "Mean : " + str(round(np.mean(doc_lens))))
plt.text(750, 90, "Median : " + str(round(np.median(doc_lens))))
plt.text(750, 80, "Stdev : " + str(round(np.std(doc_lens))))
plt.text(750, 70, "1%ile : " + str(round(np.quantile(doc_lens, q=0.01))))
plt.text(750, 60, "99%ile : " + str(round(np.quantile(doc_lens, q=0.99))))
plt.gca().set(xlim=(0, 1000), ylabel='Number of Documents', xlabel='Document Word Count')
plt.tick_params(size=16)
plt.xticks(np.linspace(0,1000,9))
plt.title('Distribution of Document Word Counts', fontdict=dict(size=22))
plt.show()
# In[ ]:
import seaborn as sns
import matplotlib.colors as mcolors
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'
fig, axes = plt.subplots(2,2,figsize=(16,14), dpi=160, sharex=True, sharey=True)
for i, ax in enumerate(axes.flatten()):
df_dominant_topic_sub = df_dominant_topic.loc[df_dominant_topic.Dominant_Topic == i, :]
doc_lens = [len(d) for d in df_dominant_topic_sub.Text]
ax.hist(doc_lens, bins = 1000, color=cols[i])
ax.tick_params(axis='y', labelcolor=cols[i], color=cols[i])
sns.kdeplot(doc_lens, color="black", shade=False, ax=ax.twinx())
ax.set(xlim=(0, 1000), xlabel='Document Word Count')
ax.set_ylabel('Number of Documents', color=cols[i])
ax.set_title('Topic: '+str(i), fontdict=dict(size=16, color=cols[i]))
fig.tight_layout()
fig.subplots_adjust(top=0.90)
plt.xticks(np.linspace(0,1000,9))
fig.suptitle('Distribution of Document Word Counts by Dominant Topic', fontsize=22)
plt.show()
# In[ ]:
# 1. Wordcloud of Top N words in each topic
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import matplotlib.colors as mcolors
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'
cloud = WordCloud(stopwords=stop_words,
background_color='white',
width=2500,
height=1800,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
topics = lda_model.show_topics(formatted=False)
fig, axes = plt.subplots(2, 2, figsize=(10,10), sharex=True, sharey=True)
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))
plt.gca().axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
plt.show()
# In[ ]:
from collections import Counter
topics = lda_model.show_topics(formatted=False)
data_flat = [w for w_list in data_ready for w in w_list]
counter = Counter(data_flat)
out = []
for i, topic in topics:
for word, weight in topic:
out.append([word, i , weight, counter[word]])
df = pd.DataFrame(out, columns=['word', 'topic_id', 'importance', 'word_count'])
# Plot Word Count and Weights of Topic Keywords
fig, axes = plt.subplots(2, 2, figsize=(16,10), sharey=True, dpi=160)
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()]
for i, ax in enumerate(axes.flatten()):
ax.bar(x='word', height="word_count", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.5, alpha=0.3, label='Word Count')
ax_twin = ax.twinx()
ax_twin.bar(x='word', height="importance", data=df.loc[df.topic_id==i, :], color=cols[i], width=0.2, label='Weights')
ax.set_ylabel('Word Count', color=cols[i])
ax_twin.set_ylim(0, 0.030); ax.set_ylim(0, 3500)
ax.set_title('Topic: ' + str(i), color=cols[i], fontsize=16)
ax.tick_params(axis='y', left=False)
ax.set_xticklabels(df.loc[df.topic_id==i, 'word'], rotation=30, horizontalalignment= 'right')
ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')
fig.tight_layout(w_pad=2)
fig.suptitle('Word Count and Importance of Topic Keywords', fontsize=22, y=1.05)
plt.show()
# In[ ]:
# Sentence Coloring of N Sentences
from matplotlib.patches import Rectangle
def sentences_chart(lda_model=lda_model, corpus=corpus, start = 0, end = 13):
corp = corpus[start:end]
mycolors = [color for name, color in mcolors.TABLEAU_COLORS.items()]
fig, axes = plt.subplots(end-start, 1, figsize=(20, (end-start)*0.95), dpi=160)
axes[0].axis('off')
for i, ax in enumerate(axes):
if i > 0:
corp_cur = corp[i-1]
topic_percs, wordid_topics, wordid_phivalues = lda_model[corp_cur]
word_dominanttopic = [(lda_model.id2word[wd], topic[0]) for wd, topic in wordid_topics]
ax.text(0.01, 0.5, "Doc " + str(i-1) + ": ", verticalalignment='center',
fontsize=16, color='black', transform=ax.transAxes, fontweight=700)
# Draw Rectange
topic_percs_sorted = sorted(topic_percs, key=lambda x: (x[1]), reverse=True)
ax.add_patch(Rectangle((0.0, 0.05), 0.99, 0.90, fill=None, alpha=1,
color=mycolors[topic_percs_sorted[0][0]], linewidth=2))
word_pos = 0.06
for j, (word, topics) in enumerate(word_dominanttopic):
if j < 14:
ax.text(word_pos, 0.5, word,
horizontalalignment='left',
verticalalignment='center',
fontsize=16, color=mycolors[topics],
transform=ax.transAxes, fontweight=700)
word_pos += .009 * len(word) # to move the word for the next iter
ax.axis('off')
ax.text(word_pos, 0.5, '. . .',
horizontalalignment='left',
verticalalignment='center',
fontsize=16, color='black',
transform=ax.transAxes)
plt.subplots_adjust(wspace=0, hspace=0)
plt.suptitle('Sentence Topic Coloring for Documents: ' + str(start) + ' to ' + str(end-2), fontsize=22, y=0.95, fontweight=700)
plt.tight_layout()
plt.show()
sentences_chart()
| [] |
2024-01-10 | OEFDataScience/KCNA-LDA | LDA_modeling.py | # -*- coding: utf-8 -*-
"""
LDA Topic Modeling Training and pre-canned dashboard creation
k=10
Created on Wed Apr 29 10:56:40 2020
@author: Claytonious
"""
#modules
import pandas as pd
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import gensim.test.utils
import spacy
import en_core_web_sm
import nltk
#nltk.download('stopwords')
from nltk.corpus import stopwords
import tqdm
import re
import numpy as np
import pickle
########################
#utility functions
def remove_stopwords(texts):
return[[word for word in simple_preprocess(str(doc)) if word not in stop_words]
for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB',
'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
##################################
#DATA IMPORT AND SETUP
df2 = pd.read_pickle("dprk_cleaned.pkl")
df2['year'] = pd.DatetimeIndex(df2['Date']).year
years = range(1996, 2020)
###########################################
#LDA w/ GENSIM FRAMEWORK
#part 1: setup bigrams/trigrams
#################################################
#bigrams and trigram phrase models
###################################################
bigram = gensim.models.Phrases(df2['text_processed2'], min_count = 5,
threshold = 100)
trigram = gensim.models.Phrases(bigram['text_processed2'],
threshold = 100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
#####################################################
#part 2: Remove stopwords and lemmatize text
######################################################
#stopword model setup
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
#remove stopwords
data_words_nostops = remove_stopwords(df2['text_processed2'])
#form bigram
data_words_bigram = make_bigrams(data_words_nostops)
#initialize spacy 'en' model
nlp = en_core_web_sm.load(disable = ['parser', 'ner'])
#lemmatization, keeping only noun, adj, vb, adv
data_lemma = lemmatization(data_words_bigram,
allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
#######################################################
#part 3: Train LDA model
#######################################################
#model object setup
#dictionary
id2word = corpora.Dictionary(data_lemma)
#save dictionary
id2word.save("id2word.pkl")
#corpus
texts = data_lemma
#term document matrix
corpus = [id2word.doc2bow(text) for text in texts]
#save DTM/corpus
with open('corpus.pkl', "wb") as fp:
pickle.dump(corpus, fp)
#train model with k=10, random seed = 100
lda_model = gensim.models.LdaMulticore(corpus = corpus,
id2word = id2word,
num_topics = 10,
random_state = 100,
chunksize = 100,
passes = 10,
per_word_topics=True
#save lda_model
lda_model.save("lda_10.pkl")
#lda_model = gensim.models.LdaModel.load("lda_baseline.pkl")
#inspect for coherence score
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
#coherence
coherence_model_lda = CoherenceModel(model=lda_model,
texts = data_lemma,
dictionary = id2word,
coherence = 'c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
#baseline coherence score is 0.5407 with k = 10
#create dashboard visualization hosted on github repo page
import pyLDAvis.gensim
import pickle
import pyLDAvis
import os
#first visualize the 10 topic baseline model
LDAvis_data_filepath = os.path.join('./ldavis_prepared_'+str(10))
# # this is a bit time consuming - make the if statement True
# # if you want to execute visualization prep yourself
if 1 == 1:
LDAvis_prepared = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
with open(LDAvis_data_filepath, 'w') as f:
pickle.dump(LDAvis_prepared, f)
# load the pre-prepared pyLDAvis data from disk
with open(LDAvis_data_filepath) as f:
LDAvis_prepared = pickle.load(f)
pyLDAvis.save_html(LDAvis_prepared, './ldavis_prepared_'+ str(10) +'.html')
| [] |
2024-01-10 | jumasheff/seasoned_care | webapp~app~chatbot~chains.py | """Create a ChatVectorDBChain for question/answering."""
from langchain.agents import AgentExecutor, LLMSingleActionAgent
from langchain.callbacks.base import AsyncCallbackManager
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ConversationalRetrievalChain, ConversationChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores.base import VectorStoreRetriever
from .utils import (
AppointmentsOutputParser,
get_appointment_chat_prompt,
get_appointment_json_prompt,
get_appointment_tools,
get_general_chat_prompt,
get_intent_prompt,
get_symptoms_qa_prompt,
)
def get_symptoms_chain(
retriever: VectorStoreRetriever,
question_handler,
stream_handler,
tracing: bool = False,
) -> ConversationalRetrievalChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = OpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm,
chain_type="stuff",
prompt=get_symptoms_qa_prompt(),
callback_manager=manager,
)
qa = ConversationalRetrievalChain(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager,
)
return qa
def get_intents_chain():
chat = ChatOpenAI(
temperature=0,
verbose=True,
)
chat_prompt = get_intent_prompt()
return LLMChain(llm=chat, prompt=chat_prompt)
def get_general_chat_chain(stream_handler, memory: ConversationBufferMemory = None):
if memory is None:
memory = ConversationBufferMemory()
chat_prompt = get_general_chat_prompt()
stream_manager = AsyncCallbackManager([stream_handler])
chat = ChatOpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
return LLMChain(llm=chat, prompt=chat_prompt)
def get_appointment_agent_executor(
stream_handler,
memory: ConversationBufferMemory = None,
):
stream_manager = AsyncCallbackManager([stream_handler])
if memory is None:
memory = ConversationBufferMemory()
tools = get_appointment_tools(stream_manager)
tool_names = [tool.name for tool in tools]
chat_prompt = get_appointment_chat_prompt(tools=tools)
chat = ChatOpenAI(
streaming=True,
callback_manager=stream_manager,
temperature=0,
)
llm_chain = LLMChain(llm=chat, prompt=chat_prompt)
output_parser = AppointmentsOutputParser()
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
return agent_executor
def get_appointment_chain(memory: ConversationBufferMemory = None):
if memory is None:
memory = ConversationBufferMemory()
chat_prompt = get_appointment_json_prompt()
chat = ChatOpenAI(
streaming=False,
verbose=True,
temperature=0,
)
return ConversationChain(llm=chat, prompt=chat_prompt, memory=memory)
| [] |
2024-01-10 | jumasheff/seasoned_care | utils~write_data_to_vector_db.py | import os
import csv
import chromadb
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.schema import Document
load_dotenv('../.env')
PERSIST_DIRECTORY = "../vector_db"
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(ABS_PATH, PERSIST_DIRECTORY)
CONDITIONS = "conditions"
settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=DB_DIR,
anonymized_telemetry=False,
)
def get_client():
return chromadb.Client(settings=settings)
def create_health_conditions_qa_db():
EMBEDDINGS = OpenAIEmbeddings()
docs = []
with open("../clean_data/ProcessedData.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
question, answer, focus = row
if question == "Questions":
# skip header
continue
text = f"{question}\n{answer}"
doc = Document(
page_content=text, metadata={"focus": focus, "question": question}
)
docs.append(doc)
db = Chroma(
collection_name=CONDITIONS,
embedding_function=EMBEDDINGS,
client_settings=settings,
persist_directory=DB_DIR,
)
db.add_documents(documents=docs, embedding=EMBEDDINGS)
db.persist()
return db
def get_health_conditions_qa_db(client):
EMBEDDINGS = OpenAIEmbeddings()
collections = [col.name for col in client.list_collections()]
if CONDITIONS in collections:
return Chroma(
collection_name=CONDITIONS,
embedding_function=EMBEDDINGS,
client_settings=settings,
persist_directory=DB_DIR,
)
return create_health_conditions_qa_db()
def match_condition(client, inp):
db = get_health_conditions_qa_db(client)
docs = db.similarity_search(inp)
return docs
def retreiver(client, inp):
db = get_health_conditions_qa_db(client)
retriever = db.as_retriever(search_type="mmr")
docs = retriever.get_relevant_documents(inp)
return docs
if __name__ == "__main__":
# client = get_client()
# time.sleep(5)
text = """Back of my neck is hurting.
I didn't have any injuries. I work as a software engineer.
I have been experiencing this for 2-3 months."""
# docs = match_condition(client, text)
# for doc in docs:
# print(doc.metadata["focus"])
# print(doc.page_content)
# print("=========================")
print("@" * 20)
client = get_client()
docs = retreiver(client, text)
for doc in docs:
print(doc.metadata["focus"])
print(doc.page_content)
print("=========================")
| [] |
2024-01-10 | jumasheff/seasoned_care | webapp~app~consumers.py | import json
from asgiref.sync import sync_to_async
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from langchain.memory import ConversationBufferMemory
from .chatbot.schemas import ChatResponse
from .chatbot.tools import AppointmentJSONException, create_appointment_from_json_str
from .chatbot.callback import (
QuestionGenCallbackHandler,
StreamingLLMCallbackHandler,
)
from .chatbot.chains import (
get_appointment_chain,
get_general_chat_chain,
get_intents_chain,
get_symptoms_chain,
)
from .chatbot.utils import init_retriever
# TODO(murat): Use a single chat history
# TODO(murat): Use a vectorstore for storing chat history
chat_history = []
memory = ConversationBufferMemory()
@database_sync_to_async
def get_profile(user):
prof = user.healthprofile
data = f"""User health data:
Gender: {prof.gender};
Age: {prof.age};
Weight: {prof.weight} kilograms;
Height: {prof.height} centimeters;
Health condition notes: {prof.health_conditions_notes}"""
return data
class ChatRoomConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
# TODO(murat): check if user is authenticated.
# TODO(murat): create a chat session and use session id as chat_box_name.
self.health_data = await get_profile(self.scope["user"])
self.chat_box_name = self.scope["url_route"]["kwargs"]["chat_box_name"]
self.group_name = "chat_%s" % self.chat_box_name
await self.channel_layer.group_add(self.group_name, self.channel_name)
await self.accept()
resp = ChatResponse(username="bot", message="Loading stuff...", type="info")
await self.send(text_data=json.dumps(resp.dict()))
question_handler = QuestionGenCallbackHandler(self)
stream_handler = StreamingLLMCallbackHandler(self)
retriever = await sync_to_async(init_retriever)()
self.intents_chain = await sync_to_async(get_intents_chain)()
self.symptopms_qa_chain = await sync_to_async(get_symptoms_chain)(
retriever, question_handler, stream_handler, tracing=True
)
self.general_chat_chain = await sync_to_async(get_general_chat_chain)(
stream_handler, memory=memory
)
self.appointment_chain = await sync_to_async(get_appointment_chain)(memory)
resp = ChatResponse(
username="bot", message="Ready to accept questions", type="info"
)
await self.send(text_data=json.dumps(resp.dict()))
async def disconnect(self, close_code):
await self.channel_layer.group_discard(self.group_name, self.channel_name)
async def receive(self, text_data):
data = await self.decode_json(text_data)
message = data.get("message", "")
type_of_msg = data.get("type", "")
intent = await self.intents_chain.arun(input=message)
if intent == "appointment" or type_of_msg == "clarification":
chat_hanlder = "appointment_message"
elif intent == "symptom":
chat_hanlder = "symptom_message"
else:
chat_hanlder = "general_chat_message"
await self.channel_layer.group_send(
self.group_name,
{
"type": chat_hanlder,
"message": message,
"username": "you", # TODO: get username from session
},
)
async def appointment_message(self, event):
print("IN APPOINTMENT MESSAGE")
message = event["message"]
username = event["username"]
# send message and username of sender to websocket
resp = ChatResponse(username=username, message=message, type="stream")
await self.send(text_data=json.dumps(resp.dict()))
# Construct a response
start_resp = ChatResponse(username="bot", message="", type="start")
await self.send(text_data=json.dumps(start_resp.dict()))
result = await self.appointment_chain.arun(input=message)
print("@@@@@@@@@@= RESULT: ", result)
try:
output = await sync_to_async(create_appointment_from_json_str)(result)
output_msg = f'Created an appointment with title "{output.name}" on {output.date} at {output.time}.'
chat_history.append((message, output_msg))
resp = ChatResponse(username="bot", message=output_msg, type="stream")
await self.send(text_data=json.dumps(resp.dict()))
end_resp = ChatResponse(username="bot", message="", type="end")
await self.send(text_data=json.dumps(end_resp.dict()))
except AppointmentJSONException as e:
error_msg = str(e)
is_field_error = any([x in error_msg for x in ["date", "time", "name"]])
if is_field_error:
resp = ChatResponse(
username="bot",
message=error_msg,
type="clarification",
)
# Make sure that the bot knows there was
# a missing value and that we asked a user to provide it.
serialized_result = ""
appointment_dict = json.loads(result)
for key, value in appointment_dict.items():
serialized_result += f"{key}: {value}\n"
await sync_to_async(memory.save_context)(
{"input": serialized_result}, {"ouput": error_msg}
)
chat_history.append((message, error_msg))
await self.send(text_data=json.dumps(resp.dict()))
else:
resp = ChatResponse(
username="bot",
message="Sorry, something went wrong. Please try again.",
type="stream",
)
await self.send(text_data=json.dumps(resp.dict()))
end_resp = ChatResponse(username="bot", message="", type="end")
await self.send(text_data=json.dumps(end_resp.dict()))
async def general_chat_message(self, event):
print("IN GENERAL CHAT MESSAGE")
message = event["message"]
username = event["username"]
# send message and username of sender to websocket
resp = ChatResponse(username=username, message=message, type="stream")
await self.send(text_data=json.dumps(resp.dict()))
# Construct a response
start_resp = ChatResponse(username="bot", message="", type="start")
await self.send(text_data=json.dumps(start_resp.dict()))
result = await self.general_chat_chain.acall(
{"text": message, "chat_history": chat_history}
)
chat_history.append((message, result["text"]))
end_resp = ChatResponse(username="bot", message="", type="end")
await self.send(text_data=json.dumps(end_resp.dict()))
async def symptom_message(self, event):
print("IN SYMPTOM MESSAGE")
message = event["message"]
username = event["username"]
# send message and username of sender to websocket
resp = ChatResponse(username=username, message=message, type="stream")
await self.send(text_data=json.dumps(resp.dict()))
# Construct a response
start_resp = ChatResponse(username="bot", message="", type="start")
await self.send(text_data=json.dumps(start_resp.dict()))
question = (
f"Original question: {message}.\nPatient health data: {self.health_data}"
)
result = await self.symptopms_qa_chain.acall(
{"question": question, "chat_history": chat_history}
)
chat_history.append((question, result["answer"]))
end_resp = ChatResponse(username="bot", message="", type="end")
await self.send(text_data=json.dumps(end_resp.dict()))
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~tests~test_exceptions.py | import pickle
import pytest
import openai
EXCEPTION_TEST_CASES = [
openaipro.InvalidRequestError(
"message",
"param",
code=400,
http_body={"test": "test1"},
http_status="fail",
json_body={"text": "iono some text"},
headers={"request-id": "asasd"},
),
openaipro.error.AuthenticationError(),
openaipro.error.PermissionError(),
openaipro.error.RateLimitError(),
openaipro.error.ServiceUnavailableError(),
openaipro.error.SignatureVerificationError("message", "sig_header?"),
openaipro.error.APIConnectionError("message!", should_retry=True),
openaipro.error.TryAgain(),
openaipro.error.Timeout(),
openaipro.error.APIError(
message="message",
code=400,
http_body={"test": "test1"},
http_status="fail",
json_body={"text": "iono some text"},
headers={"request-id": "asasd"},
),
openaipro.error.OpenAIError(),
]
class TestExceptions:
@pytest.mark.parametrize("error", EXCEPTION_TEST_CASES)
def test_exceptions_are_pickleable(self, error) -> None:
assert error.__repr__() == pickle.loads(pickle.dumps(error)).__repr__()
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~abstract~updateable_api_resource.py | from urllib.parse import quote_plus
from typing import Awaitable
from openaipro.api_resources.abstract.api_resource import APIResource
class UpdateableAPIResource(APIResource):
@classmethod
def modify(cls, sid, **params):
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
return cls._static_request("post", url, **params)
@classmethod
def amodify(cls, sid, **params) -> Awaitable:
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
return cls._astatic_request("patch", url, **params)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~tests~asyncio~test_endpoints.py | import io
import json
import pytest
from aiohttp import ClientSession
import openai
from openai import error
pytestmark = [pytest.mark.asyncio]
# FILE TESTS
async def test_file_upload():
result = await openaipro.File.acreate(
file=io.StringIO(json.dumps({"text": "test file data"})),
purpose="search",
)
assert result.purpose == "search"
assert "id" in result
result = await openaipro.File.aretrieve(id=result.id)
assert result.status == "uploaded"
# COMPLETION TESTS
async def test_completions():
result = await openaipro.Completion.acreate(
prompt="This was a test", n=5, engine="ada"
)
assert len(result.choices) == 5
async def test_completions_multiple_prompts():
result = await openaipro.Completion.acreate(
prompt=["This was a test", "This was another test"], n=5, engine="ada"
)
assert len(result.choices) == 10
async def test_completions_model():
result = await openaipro.Completion.acreate(prompt="This was a test", n=5, model="ada")
assert len(result.choices) == 5
assert result.model.startswith("ada")
async def test_timeout_raises_error():
# A query that should take awhile to return
with pytest.raises(error.Timeout):
await openaipro.Completion.acreate(
prompt="test" * 1000,
n=10,
model="ada",
max_tokens=100,
request_timeout=0.01,
)
async def test_timeout_does_not_error():
# A query that should be fast
await openaipro.Completion.acreate(
prompt="test",
model="ada",
request_timeout=10,
)
async def test_completions_stream_finishes_global_session():
async with ClientSession() as session:
openaipro.aiosession.set(session)
# A query that should be fast
parts = []
async for part in await openaipro.Completion.acreate(
prompt="test", model="ada", request_timeout=3, stream=True
):
parts.append(part)
assert len(parts) > 1
async def test_completions_stream_finishes_local_session():
# A query that should be fast
parts = []
async for part in await openaipro.Completion.acreate(
prompt="test", model="ada", request_timeout=3, stream=True
):
parts.append(part)
assert len(parts) > 1
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~object_classes.py | from openai import api_resources
from openaipro.api_resources.experimental.completion_config import CompletionConfig
OBJECT_CLASSES = {
"engine": api_resources.Engine,
"experimental.completion_config": CompletionConfig,
"file": api_resources.File,
"fine-tune": api_resources.FineTune,
"model": api_resources.Model,
"deployment": api_resources.Deployment,
}
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~moderation.py | from typing import List, Optional, Union
from openaipro.openai_object import OpenAIObject
class Moderation(OpenAIObject):
VALID_MODEL_NAMES: List[str] = ["text-moderation-stable", "text-moderation-latest"]
@classmethod
def get_url(cls):
return "/moderations"
@classmethod
def _prepare_create(cls, input, model, api_key):
if model is not None and model not in cls.VALID_MODEL_NAMES:
raise ValueError(
f"The parameter model should be chosen from {cls.VALID_MODEL_NAMES} "
f"and it is default to be None."
)
instance = cls(api_key=api_key)
params = {"input": input}
if model is not None:
params["model"] = model
return instance, params
@classmethod
def create(
cls,
input: Union[str, List[str]],
model: Optional[str] = None,
api_key: Optional[str] = None,
):
instance, params = cls._prepare_create(input, model, api_key)
return instance.request("post", cls.get_url(), params)
@classmethod
def acreate(
cls,
input: Union[str, List[str]],
model: Optional[str] = None,
api_key: Optional[str] = None,
):
instance, params = cls._prepare_create(input, model, api_key)
return instance.arequest("post", cls.get_url(), params)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~deployment.py | from openai import util
from openaipro.api_resources.abstract import (
DeletableAPIResource,
ListableAPIResource,
CreateableAPIResource,
)
from openaipro.error import InvalidRequestError, APIError
class Deployment(CreateableAPIResource, ListableAPIResource, DeletableAPIResource):
OBJECT_NAME = "deployments"
@classmethod
def _check_create(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
if kwargs.get("model", None) is None:
raise InvalidRequestError(
"Must provide a 'model' parameter to create a Deployment.",
param="model",
)
scale_settings = kwargs.get("scale_settings", None)
if scale_settings is None:
raise InvalidRequestError(
"Must provide a 'scale_settings' parameter to create a Deployment.",
param="scale_settings",
)
if "scale_type" not in scale_settings or (
scale_settings["scale_type"].lower() == "manual"
and "capacity" not in scale_settings
):
raise InvalidRequestError(
"The 'scale_settings' parameter contains invalid or incomplete values.",
param="scale_settings",
)
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new deployment for the provided prompt and parameters.
"""
cls._check_create(*args, **kwargs)
return super().create(*args, **kwargs)
@classmethod
def acreate(cls, *args, **kwargs):
"""
Creates a new deployment for the provided prompt and parameters.
"""
cls._check_create(*args, **kwargs)
return super().acreate(*args, **kwargs)
@classmethod
def _check_list(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def list(cls, *args, **kwargs):
cls._check_list(*args, **kwargs)
return super().list(*args, **kwargs)
@classmethod
def alist(cls, *args, **kwargs):
cls._check_list(*args, **kwargs)
return super().alist(*args, **kwargs)
@classmethod
def _check_delete(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def delete(cls, *args, **kwargs):
cls._check_delete(*args, **kwargs)
return super().delete(*args, **kwargs)
@classmethod
def adelete(cls, *args, **kwargs):
cls._check_delete(*args, **kwargs)
return super().adelete(*args, **kwargs)
@classmethod
def _check_retrieve(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def retrieve(cls, *args, **kwargs):
cls._check_retrieve(*args, **kwargs)
return super().retrieve(*args, **kwargs)
@classmethod
def aretrieve(cls, *args, **kwargs):
cls._check_retrieve(*args, **kwargs)
return super().aretrieve(*args, **kwargs)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~fine_tune.py | from urllib.parse import quote_plus
from openai import api_requestor, util, error
from openaipro.api_resources.abstract import (
CreateableAPIResource,
ListableAPIResource,
nested_resource_class_methods,
)
from openaipro.api_resources.abstract.deletable_api_resource import DeletableAPIResource
from openaipro.openai_response import OpenAIResponse
from openaipro.util import ApiType
@nested_resource_class_methods("event", operations=["list"])
class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
OBJECT_NAME = "fine-tunes"
@classmethod
def _prepare_cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s/cancel?api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/cancel" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
instance = cls(id, api_key, **params)
return instance, url
@classmethod
def cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
instance, url = cls._prepare_cancel(
id,
api_key,
api_type,
request_id,
api_version,
**params,
)
return instance.request("post", url, request_id=request_id)
@classmethod
def acancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
instance, url = cls._prepare_cancel(
id,
api_key,
api_type,
request_id,
api_version,
**params,
)
return instance.arequest("post", url, request_id=request_id)
@classmethod
def _prepare_stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s/events?stream=true&api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/events?stream=true" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls._prepare_stream_events(
id,
api_key,
api_base,
api_type,
request_id,
api_version,
organization,
**params,
)
response, _, api_key = requestor.request(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, OpenAIResponse) # must be an iterator
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
)
for line in response
)
@classmethod
async def astream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls._prepare_stream_events(
id,
api_key,
api_base,
api_type,
request_id,
api_version,
organization,
**params,
)
response, _, api_key = await requestor.arequest(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, OpenAIResponse) # must be an iterator
return (
util.convert_to_openai_object(
line,
api_key,
api_version,
organization,
)
async for line in response
)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~wandb_logger.py | try:
import wandb
WANDB_AVAILABLE = True
except:
WANDB_AVAILABLE = False
if WANDB_AVAILABLE:
import datetime
import io
import json
import re
from pathlib import Path
from openai import File, FineTune
from openaipro.datalib import numpy as np
from openaipro.datalib import pandas as pd
class WandbLogger:
"""
Log fine-tunes to [Weights & Biases](https://wandb.me/openai-docs)
"""
if not WANDB_AVAILABLE:
print("Logging requires wandb to be installed. Run `pip install wandb`.")
else:
_wandb_api = None
_logged_in = False
@classmethod
def sync(
cls,
id=None,
n_fine_tunes=None,
project="GPT-3",
entity=None,
force=False,
**kwargs_wandb_init,
):
"""
Sync fine-tunes to Weights & Biases.
:param id: The id of the fine-tune (optional)
:param n_fine_tunes: Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced.
:param project: Name of the project where you're sending runs. By default, it is "GPT-3".
:param entity: Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.
:param force: Forces logging and overwrite existing wandb run of the same fine-tune.
"""
if not WANDB_AVAILABLE:
return
if id:
fine_tune = FineTune.retrieve(id=id)
fine_tune.pop("events", None)
fine_tunes = [fine_tune]
else:
# get list of fine_tune to log
fine_tunes = FineTune.list()
if not fine_tunes or fine_tunes.get("data") is None:
print("No fine-tune has been retrieved")
return
fine_tunes = fine_tunes["data"][
-n_fine_tunes if n_fine_tunes is not None else None :
]
# log starting from oldest fine_tune
show_individual_warnings = (
False if id is None and n_fine_tunes is None else True
)
fine_tune_logged = [
cls._log_fine_tune(
fine_tune,
project,
entity,
force,
show_individual_warnings,
**kwargs_wandb_init,
)
for fine_tune in fine_tunes
]
if not show_individual_warnings and not any(fine_tune_logged):
print("No new successful fine-tunes were found")
return "🎉 wandb sync completed successfully"
@classmethod
def _log_fine_tune(
cls,
fine_tune,
project,
entity,
force,
show_individual_warnings,
**kwargs_wandb_init,
):
fine_tune_id = fine_tune.get("id")
status = fine_tune.get("status")
# check run completed successfully
if status != "succeeded":
if show_individual_warnings:
print(
f'Fine-tune {fine_tune_id} has the status "{status}" and will not be logged'
)
return
# check results are present
try:
results_id = fine_tune["result_files"][0]["id"]
results = File.download(id=results_id).decode("utf-8")
except:
if show_individual_warnings:
print(f"Fine-tune {fine_tune_id} has no results and will not be logged")
return
# check run has not been logged already
run_path = f"{project}/{fine_tune_id}"
if entity is not None:
run_path = f"{entity}/{run_path}"
wandb_run = cls._get_wandb_run(run_path)
if wandb_run:
wandb_status = wandb_run.summary.get("status")
if show_individual_warnings:
if wandb_status == "succeeded":
print(
f"Fine-tune {fine_tune_id} has already been logged successfully at {wandb_run.url}"
)
if not force:
print(
'Use "--force" in the CLI or "force=True" in python if you want to overwrite previous run'
)
else:
print(
f"A run for fine-tune {fine_tune_id} was previously created but didn't end successfully"
)
if wandb_status != "succeeded" or force:
print(
f"A new wandb run will be created for fine-tune {fine_tune_id} and previous run will be overwritten"
)
if wandb_status == "succeeded" and not force:
return
# start a wandb run
wandb.init(
job_type="fine-tune",
config=cls._get_config(fine_tune),
project=project,
entity=entity,
name=fine_tune_id,
id=fine_tune_id,
**kwargs_wandb_init,
)
# log results
df_results = pd.read_csv(io.StringIO(results))
for _, row in df_results.iterrows():
metrics = {k: v for k, v in row.items() if not np.isnan(v)}
step = metrics.pop("step")
if step is not None:
step = int(step)
wandb.log(metrics, step=step)
fine_tuned_model = fine_tune.get("fine_tuned_model")
if fine_tuned_model is not None:
wandb.summary["fine_tuned_model"] = fine_tuned_model
# training/validation files and fine-tune details
cls._log_artifacts(fine_tune, project, entity)
# mark run as complete
wandb.summary["status"] = "succeeded"
wandb.finish()
return True
@classmethod
def _ensure_logged_in(cls):
if not cls._logged_in:
if wandb.login():
cls._logged_in = True
else:
raise Exception("You need to log in to wandb")
@classmethod
def _get_wandb_run(cls, run_path):
cls._ensure_logged_in()
try:
if cls._wandb_api is None:
cls._wandb_api = wandb.Api()
return cls._wandb_api.run(run_path)
except Exception:
return None
@classmethod
def _get_wandb_artifact(cls, artifact_path):
cls._ensure_logged_in()
try:
if cls._wandb_api is None:
cls._wandb_api = wandb.Api()
return cls._wandb_api.artifact(artifact_path)
except Exception:
return None
@classmethod
def _get_config(cls, fine_tune):
config = dict(fine_tune)
for key in ("training_files", "validation_files", "result_files"):
if config.get(key) and len(config[key]):
config[key] = config[key][0]
if config.get("created_at"):
config["created_at"] = datetime.datetime.fromtimestamp(config["created_at"])
return config
@classmethod
def _log_artifacts(cls, fine_tune, project, entity):
# training/validation files
training_file = (
fine_tune["training_files"][0]
if fine_tune.get("training_files") and len(fine_tune["training_files"])
else None
)
validation_file = (
fine_tune["validation_files"][0]
if fine_tune.get("validation_files") and len(fine_tune["validation_files"])
else None
)
for file, prefix, artifact_type in (
(training_file, "train", "training_files"),
(validation_file, "valid", "validation_files"),
):
if file is not None:
cls._log_artifact_inputs(file, prefix, artifact_type, project, entity)
# fine-tune details
fine_tune_id = fine_tune.get("id")
artifact = wandb.Artifact(
"fine_tune_details",
type="fine_tune_details",
metadata=fine_tune,
)
with artifact.new_file(
"fine_tune_details.json", mode="w", encoding="utf-8"
) as f:
json.dump(fine_tune, f, indent=2)
wandb.run.log_artifact(
artifact,
aliases=["latest", fine_tune_id],
)
@classmethod
def _log_artifact_inputs(cls, file, prefix, artifact_type, project, entity):
file_id = file["id"]
filename = Path(file["filename"]).name
stem = Path(file["filename"]).stem
# get input artifact
artifact_name = f"{prefix}-{filename}"
# sanitize name to valid wandb artifact name
artifact_name = re.sub(r"[^a-zA-Z0-9_\-.]", "_", artifact_name)
artifact_alias = file_id
artifact_path = f"{project}/{artifact_name}:{artifact_alias}"
if entity is not None:
artifact_path = f"{entity}/{artifact_path}"
artifact = cls._get_wandb_artifact(artifact_path)
# create artifact if file not already logged previously
if artifact is None:
# get file content
try:
file_content = File.download(id=file_id).decode("utf-8")
except:
print(
f"File {file_id} could not be retrieved. Make sure you are allowed to download training/validation files"
)
return
artifact = wandb.Artifact(artifact_name, type=artifact_type, metadata=file)
with artifact.new_file(filename, mode="w", encoding="utf-8") as f:
f.write(file_content)
# create a Table
try:
table, n_items = cls._make_table(file_content)
artifact.add(table, stem)
wandb.config.update({f"n_{prefix}": n_items})
artifact.metadata["items"] = n_items
except:
print(f"File {file_id} could not be read as a valid JSON file")
else:
# log number of items
wandb.config.update({f"n_{prefix}": artifact.metadata.get("items")})
wandb.run.use_artifact(artifact, aliases=["latest", artifact_alias])
@classmethod
def _make_table(cls, file_content):
df = pd.read_json(io.StringIO(file_content), orient="records", lines=True)
return wandb.Table(dataframe=df), len(df)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~embeddings_utils.py | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openaipro.datalib import numpy as np
from openaipro.datalib import pandas as pd
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text: str, engine="text-similarity-davinci-001") -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return openaipro.Embedding.create(input=[text], engine=engine)["data"][0]["embedding"]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
async def aget_embedding(
text: str, engine="text-similarity-davinci-001"
) -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (await openaipro.Embedding.acreate(input=[text], engine=engine))["data"][0][
"embedding"
]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001"
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = openaipro.Embedding.create(input=list_of_text, engine=engine).data
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
return [d["embedding"] for d in data]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
async def aget_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001"
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = (await openaipro.Embedding.acreate(input=list_of_text, engine=engine)).data
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
return [d["embedding"] for d in data]
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def plot_multiclass_precision_recall(
y_score, y_true_untransformed, class_list, classifier_name
):
"""
Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours.
Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
"""
n_classes = len(class_list)
y_true = pd.concat(
[(y_true_untransformed == class_list[i]) for i in range(n_classes)], axis=1
).values
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_true[:, i], y_score[:, i])
average_precision[i] = average_precision_score(y_true[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision_micro, recall_micro, _ = precision_recall_curve(
y_true.ravel(), y_score.ravel()
)
average_precision_micro = average_precision_score(y_true, y_score, average="micro")
print(
str(classifier_name)
+ " - Average precision score over all classes: {0:0.2f}".format(
average_precision_micro
)
)
# setup plot details
plt.figure(figsize=(9, 10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
(l,) = plt.plot(x[y >= 0], y[y >= 0], color="gray", alpha=0.2)
plt.annotate("f1={0:0.1f}".format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append("iso-f1 curves")
(l,) = plt.plot(recall_micro, precision_micro, color="gold", lw=2)
lines.append(l)
labels.append(
"average Precision-recall (auprc = {0:0.2f})" "".format(average_precision_micro)
)
for i in range(n_classes):
(l,) = plt.plot(recall[i], precision[i], lw=2)
lines.append(l)
labels.append(
"Precision-recall for class `{0}` (auprc = {1:0.2f})"
"".format(class_list[i], average_precision[i])
)
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f"{classifier_name}: Precision-Recall curve for each class")
plt.legend(lines, labels)
def distances_from_embeddings(
query_embedding: List[float],
embeddings: List[List[float]],
distance_metric="cosine",
) -> List[List]:
"""Return the distances between a query embedding and a list of embeddings."""
distance_metrics = {
"cosine": spatial.distance.cosine,
"L1": spatial.distance.cityblock,
"L2": spatial.distance.euclidean,
"Linf": spatial.distance.chebyshev,
}
distances = [
distance_metrics[distance_metric](query_embedding, embedding)
for embedding in embeddings
]
return distances
def indices_of_nearest_neighbors_from_distances(distances) -> np.ndarray:
"""Return a list of indices of nearest neighbors from a list of distances."""
return np.argsort(distances)
def pca_components_from_embeddings(
embeddings: List[List[float]], n_components=2
) -> np.ndarray:
"""Return the PCA components of a list of embeddings."""
pca = PCA(n_components=n_components)
array_of_embeddings = np.array(embeddings)
return pca.fit_transform(array_of_embeddings)
def tsne_components_from_embeddings(
embeddings: List[List[float]], n_components=2, **kwargs
) -> np.ndarray:
"""Returns t-SNE components of a list of embeddings."""
# use better defaults if not specified
if "init" not in kwargs.keys():
kwargs["init"] = "pca"
if "learning_rate" not in kwargs.keys():
kwargs["learning_rate"] = "auto"
tsne = TSNE(n_components=n_components, **kwargs)
array_of_embeddings = np.array(embeddings)
return tsne.fit_transform(array_of_embeddings)
def chart_from_components(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title="Component 0",
y_title="Component 1",
mark_size=5,
**kwargs,
):
"""Return an interactive 2D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter(
data,
x=x_title,
y=y_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart
def chart_from_components_3D(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title: str = "Component 0",
y_title: str = "Component 1",
z_title: str = "Compontent 2",
mark_size: int = 5,
**kwargs,
):
"""Return an interactive 3D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
z_title: components[:, 2],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter_3d(
data,
x=x_title,
y=y_title,
z=z_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~experimental~completion_config.py | from openaipro.api_resources.abstract import (
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
)
class CompletionConfig(
CreateableAPIResource, ListableAPIResource, DeletableAPIResource
):
OBJECT_NAME = "experimental.completion_configs"
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~error_object.py | from typing import Optional
from openaipro.openai_object import OpenAIObject
from openaipro.util import merge_dicts
class ErrorObject(OpenAIObject):
def refresh_from(
self,
values,
api_key=None,
api_version=None,
api_type=None,
organization=None,
response_ms: Optional[int] = None,
):
# Unlike most other API resources, the API will omit attributes in
# error objects when they have a null value. We manually set default
# values here to facilitate generic error handling.
values = merge_dicts({"message": None, "type": None}, values)
return super(ErrorObject, self).refresh_from(
values=values,
api_key=api_key,
api_version=api_version,
api_type=api_type,
organization=organization,
response_ms=response_ms,
)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~tests~test_util.py | from tempfile import NamedTemporaryFile
import pytest
import openai
from openai import util
@pytest.fixture(scope="function")
def api_key_file():
saved_path = openaipro.api_key_path
try:
with NamedTemporaryFile(prefix="openai-api-key", mode="wt") as tmp:
openaipro.api_key_path = tmp.name
yield tmp
finally:
openaipro.api_key_path = saved_path
def test_openai_api_key_path(api_key_file) -> None:
print("sk-foo", file=api_key_file)
api_key_file.flush()
assert util.default_api_key() == "sk-foo"
def test_openai_api_key_path_with_malformed_key(api_key_file) -> None:
print("malformed-api-key", file=api_key_file)
api_key_file.flush()
with pytest.raises(ValueError, match="Malformed API key"):
util.default_api_key()
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~abstract~api_resource.py | from urllib.parse import quote_plus
import openai
from openai import api_requestor, error, util
from openaipro.openai_object import OpenAIObject
from openaipro.util import ApiType
from typing import Optional
class APIResource(OpenAIObject):
api_prefix = ""
azure_api_prefix = "openai"
azure_deployments_prefix = "deployments"
@classmethod
def retrieve(
cls, id, api_key=None, request_id=None, request_timeout=None, **params
):
instance = cls(id, api_key, **params)
instance.refresh(request_id=request_id, request_timeout=request_timeout)
return instance
@classmethod
def aretrieve(
cls, id, api_key=None, request_id=None, request_timeout=None, **params
):
instance = cls(id, api_key, **params)
return instance.arefresh(request_id=request_id, request_timeout=request_timeout)
def refresh(self, request_id=None, request_timeout=None):
self.refresh_from(
self.request(
"get",
self.instance_url(),
request_id=request_id,
request_timeout=request_timeout,
)
)
return self
async def arefresh(self, request_id=None, request_timeout=None):
self.refresh_from(
await self.arequest(
"get",
self.instance_url(operation="refresh"),
request_id=request_id,
request_timeout=request_timeout,
)
)
return self
@classmethod
def class_url(cls):
if cls == APIResource:
raise NotImplementedError(
"APIResource is an abstract class. You should perform actions on its subclasses."
)
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
if cls.api_prefix:
return "/%s/%s" % (cls.api_prefix, base)
return "/%s" % (base)
def instance_url(self, operation=None):
id = self.get("id")
if not isinstance(id, str):
raise error.InvalidRequestError(
"Could not determine which URL to request: %s instance "
"has invalid ID: %r, %s. ID should be of type `str` (or"
" `unicode`)" % (type(self).__name__, id, type(id)),
"id",
)
api_version = self.api_version or openaipro.api_version
extn = quote_plus(id)
if self.typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
if not operation:
base = self.class_url()
return "/%s%s/%s?api-version=%s" % (
self.azure_api_prefix,
base,
extn,
api_version,
)
return "/%s/%s/%s/%s?api-version=%s" % (
self.azure_api_prefix,
self.azure_deployments_prefix,
extn,
operation,
api_version,
)
elif self.typed_api_type == ApiType.OPEN_AI:
base = self.class_url()
return "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % self.api_type)
# The `method_` and `url_` arguments are suffixed with an underscore to
# avoid conflicting with actual request parameters in `params`.
@classmethod
def _static_request(
cls,
method_,
url_,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_version=api_version,
organization=organization,
api_base=api_base,
api_type=api_type,
)
response, _, api_key = requestor.request(
method_, url_, params, request_id=request_id
)
return util.convert_to_openai_object(
response, api_key, api_version, organization
)
@classmethod
async def _astatic_request(
cls,
method_,
url_,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_version=api_version,
organization=organization,
api_base=api_base,
api_type=api_type,
)
response, _, api_key = await requestor.arequest(
method_, url_, params, request_id=request_id
)
return response
@classmethod
def _get_api_type_and_version(
cls, api_type: Optional[str] = None, api_version: Optional[str] = None
):
typed_api_type = (
ApiType.from_str(api_type)
if api_type
else ApiType.from_str(openaipro.api_type)
)
typed_api_version = api_version or openaipro.api_version
return (typed_api_type, typed_api_version)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~abstract~listable_api_resource.py | from openai import api_requestor, util, error
from openaipro.api_resources.abstract.api_resource import APIResource
from openaipro.util import ApiType
class ListableAPIResource(APIResource):
@classmethod
def auto_paging_iter(cls, *args, **params):
return cls.list(*args, **params).auto_paging_iter()
@classmethod
def __prepare_list_requestor(
cls,
api_key=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or cls.api_base(),
api_version=api_version,
api_type=api_type,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
base = cls.class_url()
url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = cls.class_url()
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def list(
cls,
api_key=None,
request_id=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
**params,
):
requestor, url = cls.__prepare_list_requestor(
api_key,
api_version,
organization,
api_base,
api_type,
)
response, _, api_key = requestor.request(
"get", url, params, request_id=request_id
)
openai_object = util.convert_to_openai_object(
response, api_key, api_version, organization
)
openai_object._retrieve_params = params
return openai_object
@classmethod
async def alist(
cls,
api_key=None,
request_id=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
**params,
):
requestor, url = cls.__prepare_list_requestor(
api_key,
api_version,
organization,
api_base,
api_type,
)
response, _, api_key = await requestor.arequest(
"get", url, params, request_id=request_id
)
openai_object = util.convert_to_openai_object(
response, api_key, api_version, organization
)
openai_object._retrieve_params = params
return openai_object
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~engine.py | import time
import warnings
from openai import util
from openaipro.api_resources.abstract import ListableAPIResource, UpdateableAPIResource
from openaipro.error import TryAgain
class Engine(ListableAPIResource, UpdateableAPIResource):
OBJECT_NAME = "engines"
def generate(self, timeout=None, **params):
start = time.time()
while True:
try:
return self.request(
"post",
self.instance_url() + "/generate",
params,
stream=params.get("stream"),
plain_old_data=True,
)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
async def agenerate(self, timeout=None, **params):
start = time.time()
while True:
try:
return await self.arequest(
"post",
self.instance_url() + "/generate",
params,
stream=params.get("stream"),
plain_old_data=True,
)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
def embeddings(self, **params):
warnings.warn(
"Engine.embeddings is deprecated, use Embedding.create", DeprecationWarning
)
return self.request("post", self.instance_url() + "/embeddings", params)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~abstract~deletable_api_resource.py | from urllib.parse import quote_plus
from typing import Awaitable
from openai import error
from openaipro.api_resources.abstract.api_resource import APIResource
from openaipro.util import ApiType
class DeletableAPIResource(APIResource):
@classmethod
def __prepare_delete(cls, sid, api_type=None, api_version=None):
if isinstance(cls, APIResource):
raise ValueError(".delete may only be called as a class method now.")
base = cls.class_url()
extn = quote_plus(sid)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s?api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return url
@classmethod
def delete(cls, sid, api_type=None, api_version=None, **params):
url = cls.__prepare_delete(sid, api_type, api_version)
return cls._static_request(
"delete", url, api_type=api_type, api_version=api_version, **params
)
@classmethod
def adelete(cls, sid, api_type=None, api_version=None, **params) -> Awaitable:
url = cls.__prepare_delete(sid, api_type, api_version)
return cls._astatic_request(
"delete", url, api_type=api_type, api_version=api_version, **params
)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~error.py | import openai
class OpenAIError(Exception):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
):
super(OpenAIError, self).__init__(message)
if http_body and hasattr(http_body, "decode"):
try:
http_body = http_body.decode("utf-8")
except BaseException:
http_body = (
"<Could not decode body as utf-8. "
"Please report to [email protected]>"
)
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.code = code
self.request_id = self.headers.get("request-id", None)
self.error = self.construct_error_object()
self.organization = self.headers.get("openai-organization", None)
def __str__(self):
msg = self._message or "<empty message>"
if self.request_id is not None:
return "Request {0}: {1}".format(self.request_id, msg)
else:
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by OpenAI's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r, http_status=%r, request_id=%r)" % (
self.__class__.__name__,
self._message,
self.http_status,
self.request_id,
)
def construct_error_object(self):
if (
self.json_body is None
or "error" not in self.json_body
or not isinstance(self.json_body["error"], dict)
):
return None
return openaipro.api_resources.error_object.ErrorObject.construct_from(
self.json_body["error"]
)
class APIError(OpenAIError):
pass
class TryAgain(OpenAIError):
pass
class Timeout(OpenAIError):
pass
class APIConnectionError(OpenAIError):
def __init__(
self,
message,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
should_retry=False,
):
super(APIConnectionError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.should_retry = should_retry
class InvalidRequestError(OpenAIError):
def __init__(
self,
message,
param,
code=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.param = param
def __repr__(self):
return "%s(message=%r, param=%r, code=%r, http_status=%r, " "request_id=%r)" % (
self.__class__.__name__,
self._message,
self.param,
self.code,
self.http_status,
self.request_id,
)
def __reduce__(self):
return type(self), (
self._message,
self.param,
self.code,
self.http_body,
self.http_status,
self.json_body,
self.headers,
)
class AuthenticationError(OpenAIError):
pass
class PermissionError(OpenAIError):
pass
class RateLimitError(OpenAIError):
pass
class ServiceUnavailableError(OpenAIError):
pass
class InvalidAPIType(OpenAIError):
pass
class SignatureVerificationError(OpenAIError):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(message, http_body)
self.sig_header = sig_header
def __reduce__(self):
return type(self), (
self._message,
self.sig_header,
self.http_body,
)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~tests~test_url_composition.py | from sys import api_version
import pytest
from openai import Completion, Engine
from openaipro.util import ApiType
@pytest.mark.url
def test_completions_url_composition_azure() -> None:
url = Completion.class_url("test_engine", "azure", "2021-11-01-preview")
assert (
url
== "/openai/deployments/test_engine/completions?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_azure_ad() -> None:
url = Completion.class_url("test_engine", "azure_ad", "2021-11-01-preview")
assert (
url
== "/openai/deployments/test_engine/completions?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_default() -> None:
url = Completion.class_url("test_engine")
assert url == "/engines/test_engine/completions"
@pytest.mark.url
def test_completions_url_composition_open_ai() -> None:
url = Completion.class_url("test_engine", "open_ai")
assert url == "/engines/test_engine/completions"
@pytest.mark.url
def test_completions_url_composition_invalid_type() -> None:
with pytest.raises(Exception):
url = Completion.class_url("test_engine", "invalid")
@pytest.mark.url
def test_completions_url_composition_instance_url_azure() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert (
url
== "/openai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_azure_ad() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure_ad",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert (
url
== "/openai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_azure_no_version() -> None:
completion = Completion(
id="test_id", engine="test_engine", api_type="azure", api_version=None
)
with pytest.raises(Exception):
completion.instance_url()
@pytest.mark.url
def test_completions_url_composition_instance_url_default() -> None:
completion = Completion(id="test_id", engine="test_engine")
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id"
@pytest.mark.url
def test_completions_url_composition_instance_url_open_ai() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="open_ai",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id"
@pytest.mark.url
def test_completions_url_composition_instance_url_invalid() -> None:
completion = Completion(id="test_id", engine="test_engine", api_type="invalid")
with pytest.raises(Exception):
url = completion.instance_url()
@pytest.mark.url
def test_completions_url_composition_instance_url_timeout_azure() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure",
api_version="2021-11-01-preview",
)
completion["timeout"] = 12
url = completion.instance_url()
assert (
url
== "/openai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview&timeout=12"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_timeout_openai() -> None:
completion = Completion(id="test_id", engine="test_engine", api_type="open_ai")
completion["timeout"] = 12
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id?timeout=12"
@pytest.mark.url
def test_engine_search_url_composition_azure() -> None:
engine = Engine(id="test_id", api_type="azure", api_version="2021-11-01-preview")
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
url = engine.instance_url("test_operation")
assert (
url
== "/openai/deployments/test_id/test_operation?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_azure_ad() -> None:
engine = Engine(id="test_id", api_type="azure_ad", api_version="2021-11-01-preview")
assert engine.api_type == "azure_ad"
assert engine.typed_api_type == ApiType.AZURE_AD
url = engine.instance_url("test_operation")
assert (
url
== "/openai/deployments/test_id/test_operation?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_azure_no_version() -> None:
engine = Engine(id="test_id", api_type="azure", api_version=None)
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
with pytest.raises(Exception):
engine.instance_url("test_operation")
@pytest.mark.url
def test_engine_search_url_composition_azure_no_operation() -> None:
engine = Engine(id="test_id", api_type="azure", api_version="2021-11-01-preview")
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
assert (
engine.instance_url()
== "/openai/engines/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_default() -> None:
engine = Engine(id="test_id")
assert engine.api_type == None
assert engine.typed_api_type == ApiType.OPEN_AI
url = engine.instance_url()
assert url == "/engines/test_id"
@pytest.mark.url
def test_engine_search_url_composition_open_ai() -> None:
engine = Engine(id="test_id", api_type="open_ai")
assert engine.api_type == "open_ai"
assert engine.typed_api_type == ApiType.OPEN_AI
url = engine.instance_url()
assert url == "/engines/test_id"
@pytest.mark.url
def test_engine_search_url_composition_invalid_type() -> None:
engine = Engine(id="test_id", api_type="invalid")
assert engine.api_type == "invalid"
with pytest.raises(Exception):
assert engine.typed_api_type == ApiType.OPEN_AI
@pytest.mark.url
def test_engine_search_url_composition_invalid_search() -> None:
engine = Engine(id="test_id", api_type="invalid")
assert engine.api_type == "invalid"
with pytest.raises(Exception):
engine.search()
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_requestor.py | import asyncio
import json
import platform
import sys
import threading
import warnings
from contextlib import asynccontextmanager
from json import JSONDecodeError
from typing import (
AsyncGenerator,
AsyncIterator,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
import aiohttp
import requests
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
import openai
from openai import error, util, version
from openaipro.openai_response import OpenAIResponse
from openaipro.util import ApiType
TIMEOUT_SECS = 600
MAX_CONNECTION_RETRIES = 2
# Has one attribute per thread, 'session'.
_thread_context = threading.local()
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:
"""Returns a value suitable for the 'proxies' argument to 'requests.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return {"http": proxy, "https": proxy}
elif isinstance(proxy, dict):
return proxy.copy()
else:
raise ValueError(
"'openaipro.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _aiohttp_proxies_arg(proxy) -> Optional[str]:
"""Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return proxy
elif isinstance(proxy, dict):
return proxy["https"] if "https" in proxy else proxy["http"]
else:
raise ValueError(
"'openaipro.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _make_session() -> requests.Session:
if not openaipro.verify_ssl_certs:
warnings.warn("verify_ssl_certs is ignored; openai always verifies.")
s = requests.Session()
proxies = _requests_proxies_arg(openaipro.proxy)
if proxies:
s.proxies = proxies
s.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES),
)
return s
def parse_stream_helper(line: bytes) -> Optional[str]:
if line:
if line.strip() == b"data: [DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
if line.startswith(b"data: "):
line = line[len(b"data: "):]
return line.decode("utf-8")
else:
return None
return None
def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
async def parse_stream_async(rbody: aiohttp.StreamReader):
async for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
class APIRequestor:
def __init__(
self,
key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
self.api_base = api_base or openaipro.api_base
self.api_key = key or util.default_api_key()
self.api_type = (
ApiType.from_str(api_type)
if api_type
else ApiType.from_str(openaipro.api_type)
)
self.api_version = api_version or openaipro.api_version
self.organization = organization or openaipro.organization
@classmethod
def format_app_info(cls, info):
str = info["name"]
if info["version"]:
str += "/%s" % (info["version"],)
if info["url"]:
str += " (%s)" % (info["url"],)
return str
@overload
def request(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[OpenAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[OpenAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[OpenAIResponse, bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
pass
def request(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
result = self.request_raw(
method.lower(),
url,
params=params,
supplied_headers=headers,
files=files,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = self._interpret_response(result, stream)
return resp, got_stream, self.api_key
@overload
async def arequest(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[OpenAIResponse, bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool, str]:
pass
async def arequest(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool, str]:
ctx = aiohttp_session()
session = await ctx.__aenter__()
try:
result = await self.arequest_raw(
method.lower(),
url,
session,
params=params,
supplied_headers=headers,
files=files,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = await self._interpret_async_response(result, stream)
except Exception:
await ctx.__aexit__(None, None, None)
raise
if got_stream:
async def wrap_resp():
assert isinstance(resp, AsyncGenerator)
try:
async for r in resp:
yield r
finally:
await ctx.__aexit__(None, None, None)
return wrap_resp(), got_stream, self.api_key
else:
await ctx.__aexit__(None, None, None)
return resp, got_stream, self.api_key
def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):
try:
error_data = resp["error"]
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody,
rcode,
resp,
)
if "internal_message" in error_data:
error_data["message"] += "\n\n" + error_data["internal_message"]
util.log_info(
"OpenAI API error received",
error_code=error_data.get("code"),
error_type=error_data.get("type"),
error_message=error_data.get("message"),
error_param=error_data.get("param"),
stream_error=stream_error,
)
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
return error.RateLimitError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode in [400, 404, 415]:
return error.InvalidRequestError(
error_data.get("message"),
error_data.get("param"),
error_data.get("code"),
rbody,
rcode,
resp,
rheaders,
)
elif rcode == 401:
return error.AuthenticationError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 403:
return error.PermissionError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 409:
return error.TryAgain(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif stream_error:
# TODO: we will soon attach status codes to stream errors
parts = [error_data.get("message"), "(Error occurred while streaming.)"]
message = " ".join([p for p in parts if p is not None])
return error.APIError(message, rbody, rcode, resp, rheaders)
else:
return error.APIError(
f"{error_data.get('message')} {rbody} {rcode} {resp} {rheaders}",
rbody,
rcode,
resp,
rheaders,
)
def request_headers(
self, method: str, extra, request_id: Optional[str]
) -> Dict[str, str]:
user_agent = "OpenAI/v1 PythonBindings/%s" % (version.VERSION,)
if openaipro.app_info:
user_agent += " " + self.format_app_info(openaipro.app_info)
uname_without_node = " ".join(
v for k, v in platform.uname()._asdict().items() if k != "node"
)
ua = {
"bindings_version": version.VERSION,
"httplib": "requests",
"lang": "python",
"lang_version": platform.python_version(),
"platform": platform.platform(),
"publisher": "openai",
"uname": uname_without_node,
}
if openaipro.app_info:
ua["application"] = openaipro.app_info
headers = {
"X-OpenAI-Client-User-Agent": json.dumps(ua),
"User-Agent": user_agent,
}
headers.update(util.api_key_to_header(self.api_type, self.api_key))
if self.organization:
headers["OpenAI-Organization"] = self.organization
if self.api_version is not None and self.api_type == ApiType.OPEN_AI:
headers["OpenAI-Version"] = self.api_version
if request_id is not None:
headers["X-Request-Id"] = request_id
if openaipro.debug:
headers["OpenAI-Debug"] = "true"
headers.update(extra)
return headers
def _validate_headers(
self, supplied_headers: Optional[Dict[str, str]]
) -> Dict[str, str]:
headers: Dict[str, str] = {}
if supplied_headers is None:
return headers
if not isinstance(supplied_headers, dict):
raise TypeError("Headers must be a dictionary")
for k, v in supplied_headers.items():
if not isinstance(k, str):
raise TypeError("Header keys must be strings")
if not isinstance(v, str):
raise TypeError("Header values must be strings")
headers[k] = v
# NOTE: It is possible to do more validation of the headers, but a request could always
# be made to the API manually with invalid headers, so we need to handle them server side.
return headers
def _prepare_request_raw(
self,
url,
supplied_headers,
method,
params,
files,
request_id: Optional[str],
) -> Tuple[str, Dict[str, str], Optional[bytes]]:
abs_url = "%s%s" % (self.api_base, url)
headers = self._validate_headers(supplied_headers)
data = None
if method == "get" or method == "delete":
if params:
encoded_params = urlencode(
[(k, v) for k, v in params.items() if v is not None]
)
abs_url = _build_api_url(abs_url, encoded_params)
elif method in {"post", "put"}:
if params and files:
raise ValueError("At most one of params and files may be specified.")
if params:
data = json.dumps(params).encode()
headers["Content-Type"] = "application/json"
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"OpenAI bindings. Please contact [email protected] for "
"assistance." % (method,)
)
headers = self.request_headers(method, headers, request_id)
util.log_debug("Request to OpenAI API", method=method, path=abs_url)
util.log_debug("Post details", data=data, api_version=self.api_version)
return abs_url, headers, data
def request_raw(
self,
method,
url,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> requests.Response:
abs_url, headers, data = self._prepare_request_raw(
url, supplied_headers, method, params, files, request_id
)
if not hasattr(_thread_context, "session"):
_thread_context.session = _make_session()
try:
result = _thread_context.session.request(
method,
abs_url,
headers=headers,
data=data,
files=files,
stream=stream,
timeout=request_timeout if request_timeout else TIMEOUT_SECS,
)
except requests.exceptions.Timeout as e:
raise error.Timeout("Request timed out: {}".format(e)) from e
except requests.exceptions.RequestException as e:
raise error.APIConnectionError(
"Error communicating with OpenAI: {}".format(e)
) from e
util.log_debug(
"OpenAI API response",
path=abs_url,
response_code=result.status_code,
processing_ms=result.headers.get("OpenAI-Processing-Ms"),
request_id=result.headers.get("X-Request-Id"),
)
# Don't read the whole stream for debug logging unless necessary.
if openaipro.log == "debug":
util.log_debug(
"API response body", body=result.content, headers=result.headers
)
return result
async def arequest_raw(
self,
method,
url,
session,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> aiohttp.ClientResponse:
abs_url, headers, data = self._prepare_request_raw(
url, supplied_headers, method, params, files, request_id
)
if isinstance(request_timeout, tuple):
timeout = aiohttp.ClientTimeout(
connect=request_timeout[0],
total=request_timeout[1],
)
else:
timeout = aiohttp.ClientTimeout(
total=request_timeout if request_timeout else TIMEOUT_SECS
)
if files:
# TODO: Use `aiohttp.MultipartWriter` to create the multipart form data here.
# For now we use the private `requests` method that is known to have worked so far.
data, content_type = requests.models.RequestEncodingMixin._encode_files( # type: ignore
files, data
)
headers["Content-Type"] = content_type
request_kwargs = {
"method": method,
"url": abs_url,
"headers": headers,
"data": data,
"proxy": _aiohttp_proxies_arg(openaipro.proxy),
"timeout": timeout,
}
try:
result = await session.request(**request_kwargs)
util.log_info(
"OpenAI API response",
path=abs_url,
response_code=result.status,
processing_ms=result.headers.get("OpenAI-Processing-Ms"),
request_id=result.headers.get("X-Request-Id"),
)
# Don't read the whole stream for debug logging unless necessary.
if openaipro.log == "debug":
util.log_debug(
"API response body", body=result.content, headers=result.headers
)
return result
except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e:
raise error.Timeout("Request timed out") from e
except aiohttp.ClientError as e:
raise error.APIConnectionError("Error communicating with OpenAI") from e
def _interpret_response(
self, result: requests.Response, stream: bool
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
return (
self._interpret_response_line(
line, result.status_code, result.headers, stream=True
)
for line in parse_stream(result.iter_lines())
), True
else:
return (
self._interpret_response_line(
result.content.decode("utf-8"),
result.status_code,
result.headers,
stream=False,
),
False,
)
async def _interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
return (
self._interpret_response_line(
line, result.status, result.headers, stream=True
)
async for line in parse_stream_async(result.content)
), True
else:
try:
await result.read()
except aiohttp.ClientError as e:
util.log_warn(e, body=result.content)
return (
self._interpret_response_line(
(await result.read()).decode("utf-8"),
result.status,
result.headers,
stream=False,
),
False,
)
def _interpret_response_line(
self, rbody: str, rcode: int, rheaders, stream: bool
) -> OpenAIResponse:
# HTTP 204 response code does not have any content in the body.
if rcode == 204:
return OpenAIResponse(None, rheaders)
if rcode == 503:
raise error.ServiceUnavailableError(
"The server is overloaded or not ready yet.",
rbody,
rcode,
headers=rheaders,
)
try:
data = json.loads(rbody)
except (JSONDecodeError, UnicodeDecodeError) as e:
raise error.APIError(
f"HTTP code {rcode} from API ({rbody})", rbody, rcode, headers=rheaders
) from e
resp = OpenAIResponse(data, rheaders)
# In the future, we might add a "status" parameter to errors
# to better handle the "error while streaming" case.
stream_error = stream and "error" in resp.data
if stream_error or not 200 <= rcode < 300:
raise self.handle_error_response(
rbody, rcode, resp.data, rheaders, stream_error=stream_error
)
return resp
@asynccontextmanager
async def aiohttp_session() -> AsyncIterator[aiohttp.ClientSession]:
user_set_session = openaipro.aiosession.get()
if user_set_session:
yield user_set_session
else:
async with aiohttp.ClientSession() as session:
yield session
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~customer.py | from openaipro.openai_object import OpenAIObject
class Customer(OpenAIObject):
@classmethod
def get_url(cls, customer, endpoint):
return f"/customer/{customer}/{endpoint}"
@classmethod
def create(cls, customer, endpoint, **params):
instance = cls()
return instance.request("post", cls.get_url(customer, endpoint), params)
@classmethod
def acreate(cls, customer, endpoint, **params):
instance = cls()
return instance.arequest("post", cls.get_url(customer, endpoint), params)
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~validators.py | import os
import sys
from typing import Any, Callable, NamedTuple, Optional
from openaipro.datalib import pandas as pd, assert_has_pandas
class Remediation(NamedTuple):
name: str
immediate_msg: Optional[str] = None
necessary_msg: Optional[str] = None
necessary_fn: Optional[Callable[[Any], Any]] = None
optional_msg: Optional[str] = None
optional_fn: Optional[Callable[[Any], Any]] = None
error_msg: Optional[str] = None
def num_examples_validator(df):
"""
This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100.
"""
MIN_EXAMPLES = 100
optional_suggestion = (
""
if len(df) >= MIN_EXAMPLES
else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples"
)
immediate_msg = (
f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}"
)
return Remediation(name="num_examples", immediate_msg=immediate_msg)
def necessary_column_validator(df, necessary_column):
"""
This validator will ensure that the necessary column is present in the dataframe.
"""
def lower_case_column(df, column):
cols = [c for c in df.columns if str(c).lower() == column]
df.rename(columns={cols[0]: column.lower()}, inplace=True)
return df
immediate_msg = None
necessary_fn = None
necessary_msg = None
error_msg = None
if necessary_column not in df.columns:
if necessary_column in [str(c).lower() for c in df.columns]:
def lower_case_column_creator(df):
return lower_case_column(df, necessary_column)
necessary_fn = lower_case_column_creator
immediate_msg = (
f"\n- The `{necessary_column}` column/key should be lowercase"
)
necessary_msg = f"Lower case column name to `{necessary_column}`"
else:
error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry"
return Remediation(
name="necessary_column",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
error_msg=error_msg,
)
def additional_column_validator(df, fields=["prompt", "completion"]):
"""
This validator will remove additional columns from the dataframe.
"""
additional_columns = []
necessary_msg = None
immediate_msg = None
necessary_fn = None
if len(df.columns) > 2:
additional_columns = [c for c in df.columns if c not in fields]
warn_message = ""
for ac in additional_columns:
dups = [c for c in additional_columns if ac in c]
if len(dups) > 0:
warn_message += f"\n WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file."
immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}"
necessary_msg = f"Remove additional columns/keys: {additional_columns}"
def necessary_fn(x):
return x[fields]
return Remediation(
name="additional_column",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
)
def non_empty_field_validator(df, field="completion"):
"""
This validator will ensure that no completion is empty.
"""
necessary_msg = None
necessary_fn = None
immediate_msg = None
if df[field].apply(lambda x: x == "").any() or df[field].isnull().any():
empty_rows = (df[field] == "") | (df[field].isnull())
empty_indexes = df.reset_index().index[empty_rows].tolist()
immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}"
def necessary_fn(x):
return x[x[field] != ""].dropna(subset=[field])
necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s"
return Remediation(
name=f"empty_{field}",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
)
def duplicated_rows_validator(df, fields=["prompt", "completion"]):
"""
This validator will suggest to the user to remove duplicate rows if they exist.
"""
duplicated_rows = df.duplicated(subset=fields)
duplicated_indexes = df.reset_index().index[duplicated_rows].tolist()
immediate_msg = None
optional_msg = None
optional_fn = None
if len(duplicated_indexes) > 0:
immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}"
optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows"
def optional_fn(x):
return x.drop_duplicates(subset=fields)
return Remediation(
name="duplicated_rows",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def long_examples_validator(df):
"""
This validator will suggest to the user to remove examples that are too long.
"""
immediate_msg = None
optional_msg = None
optional_fn = None
ft_type = infer_task_type(df)
if ft_type != "open-ended generation":
def get_long_indexes(d):
long_examples = d.apply(
lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1
)
return d.reset_index().index[long_examples].tolist()
long_indexes = get_long_indexes(df)
if len(long_indexes) > 0:
immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens."
optional_msg = f"Remove {len(long_indexes)} long examples"
def optional_fn(x):
long_indexes_to_drop = get_long_indexes(x)
if long_indexes != long_indexes_to_drop:
sys.stdout.write(f"The indices of the long examples has changed as a result of a previously applied recommendation.\nThe {len(long_indexes_to_drop)} long examples to be dropped are now at the following indices: {long_indexes_to_drop}\n")
return x.drop(long_indexes_to_drop)
return Remediation(
name="long_examples",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_prompt_suffix_validator(df):
"""
This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation.
"""
error_msg = None
immediate_msg = None
optional_msg = None
optional_fn = None
# Find a suffix which is not contained within the prompt otherwise
suggested_suffix = "\n\n### =>\n\n"
suffix_options = [
" ->",
"\n\n###\n\n",
"\n\n===\n\n",
"\n\n---\n\n",
"\n\n===>\n\n",
"\n\n--->\n\n",
]
for suffix_option in suffix_options:
if suffix_option == " ->":
if df.prompt.str.contains("\n").any():
continue
if df.prompt.str.contains(suffix_option, regex=False).any():
continue
suggested_suffix = suffix_option
break
display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
ft_type = infer_task_type(df)
if ft_type == "open-ended generation":
return Remediation(name="common_suffix")
def add_suffix(x, suffix):
x["prompt"] += suffix
return x
common_suffix = get_common_xfix(df.prompt, xfix="suffix")
if (df.prompt == common_suffix).all():
error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different"
return Remediation(name="common_suffix", error_msg=error_msg)
if common_suffix != "":
common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
immediate_msg = (
f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`"
)
if len(common_suffix) > 10:
immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
if (
df.prompt.str[: -len(common_suffix)]
.str.contains(common_suffix, regex=False)
.any()
):
immediate_msg += f"\n WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix"
else:
immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
if common_suffix == "":
optional_msg = (
f"Add a suffix separator `{display_suggested_suffix}` to all prompts"
)
def optional_fn(x):
return add_suffix(x, suggested_suffix)
return Remediation(
name="common_completion_suffix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
error_msg=error_msg,
)
def common_prompt_prefix_validator(df):
"""
This validator will suggest to remove a common prefix from the prompt if a long one exist.
"""
MAX_PREFIX_LEN = 12
immediate_msg = None
optional_msg = None
optional_fn = None
common_prefix = get_common_xfix(df.prompt, xfix="prefix")
if common_prefix == "":
return Remediation(name="common_prefix")
def remove_common_prefix(x, prefix):
x["prompt"] = x["prompt"].str[len(prefix) :]
return x
if (df.prompt == common_prefix).all():
# already handled by common_suffix_validator
return Remediation(name="common_prefix")
if common_prefix != "":
immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`"
if MAX_PREFIX_LEN < len(common_prefix):
immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion"
optional_msg = f"Remove prefix `{common_prefix}` from all prompts"
def optional_fn(x):
return remove_common_prefix(x, common_prefix)
return Remediation(
name="common_prompt_prefix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_completion_prefix_validator(df):
"""
This validator will suggest to remove a common prefix from the completion if a long one exist.
"""
MAX_PREFIX_LEN = 5
common_prefix = get_common_xfix(df.completion, xfix="prefix")
ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " "
if len(common_prefix) < MAX_PREFIX_LEN:
return Remediation(name="common_prefix")
def remove_common_prefix(x, prefix, ws_prefix):
x["completion"] = x["completion"].str[len(prefix) :]
if ws_prefix:
# keep the single whitespace as prefix
x["completion"] = " " + x["completion"]
return x
if (df.completion == common_prefix).all():
# already handled by common_suffix_validator
return Remediation(name="common_prefix")
immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix"
optional_msg = f"Remove prefix `{common_prefix}` from all completions"
def optional_fn(x):
return remove_common_prefix(x, common_prefix, ws_prefix)
return Remediation(
name="common_completion_prefix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_completion_suffix_validator(df):
"""
This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation.
"""
error_msg = None
immediate_msg = None
optional_msg = None
optional_fn = None
ft_type = infer_task_type(df)
if ft_type == "open-ended generation" or ft_type == "classification":
return Remediation(name="common_suffix")
common_suffix = get_common_xfix(df.completion, xfix="suffix")
if (df.completion == common_suffix).all():
error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`"
return Remediation(name="common_suffix", error_msg=error_msg)
# Find a suffix which is not contained within the completion otherwise
suggested_suffix = " [END]"
suffix_options = [
"\n",
".",
" END",
"***",
"+++",
"&&&",
"$$$",
"@@@",
"%%%",
]
for suffix_option in suffix_options:
if df.completion.str.contains(suffix_option, regex=False).any():
continue
suggested_suffix = suffix_option
break
display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
def add_suffix(x, suffix):
x["completion"] += suffix
return x
if common_suffix != "":
common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
immediate_msg = (
f"\n- All completions end with suffix `{common_suffix_new_line_handled}`"
)
if len(common_suffix) > 10:
immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
if (
df.completion.str[: -len(common_suffix)]
.str.contains(common_suffix, regex=False)
.any()
):
immediate_msg += f"\n WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending"
else:
immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
if common_suffix == "":
optional_msg = (
f"Add a suffix ending `{display_suggested_suffix}` to all completions"
)
def optional_fn(x):
return add_suffix(x, suggested_suffix)
return Remediation(
name="common_completion_suffix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
error_msg=error_msg,
)
def completions_space_start_validator(df):
"""
This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization.
"""
def add_space_start(x):
x["completion"] = x["completion"].apply(
lambda x: ("" if x[0] == " " else " ") + x
)
return x
optional_msg = None
optional_fn = None
immediate_msg = None
if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ":
immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
optional_msg = "Add a whitespace character to the beginning of the completion"
optional_fn = add_space_start
return Remediation(
name="completion_space_start",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def lower_case_validator(df, column):
"""
This validator will suggest to lowercase the column values, if more than a third of letters are uppercase.
"""
def lower_case(x):
x[column] = x[column].str.lower()
return x
count_upper = (
df[column]
.apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper()))
.sum()
)
count_lower = (
df[column]
.apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower()))
.sum()
)
if count_upper * 2 > count_lower:
return Remediation(
name="lower_case",
immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
optional_msg=f"Lowercase all your data in column/key `{column}`",
optional_fn=lower_case,
)
def read_any_format(fname, fields=["prompt", "completion"]):
"""
This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas.
- for .xlsx it will read the first sheet
- for .txt it will assume completions and split on newline
"""
assert_has_pandas()
remediation = None
necessary_msg = None
immediate_msg = None
error_msg = None
df = None
if os.path.isfile(fname):
try:
if fname.lower().endswith(".csv") or fname.lower().endswith(".tsv"):
file_extension_str, separator = (
("CSV", ",") if fname.lower().endswith(".csv") else ("TSV", "\t")
)
immediate_msg = f"\n- Based on your file extension, your file is formatted as a {file_extension_str} file"
necessary_msg = (
f"Your format `{file_extension_str}` will be converted to `JSONL`"
)
df = pd.read_csv(fname, sep=separator, dtype=str).fillna("")
elif fname.lower().endswith(".xlsx"):
immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file"
necessary_msg = "Your format `XLSX` will be converted to `JSONL`"
xls = pd.ExcelFile(fname)
sheets = xls.sheet_names
if len(sheets) > 1:
immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..."
df = pd.read_excel(fname, dtype=str).fillna("")
elif fname.lower().endswith(".txt"):
immediate_msg = (
"\n- Based on your file extension, you provided a text file"
)
necessary_msg = "Your format `TXT` will be converted to `JSONL`"
with open(fname, "r") as f:
content = f.read()
df = pd.DataFrame(
[["", line] for line in content.split("\n")],
columns=fields,
dtype=str,
).fillna("")
elif fname.lower().endswith(".jsonl"):
df = pd.read_json(fname, lines=True, dtype=str).fillna("")
if len(df) == 1:
# this is NOT what we expect for a .jsonl file
immediate_msg = "\n- Your JSONL file appears to be in a JSON format. Your file will be converted to JSONL format"
necessary_msg = "Your format `JSON` will be converted to `JSONL`"
df = pd.read_json(fname, dtype=str).fillna("")
else:
pass # this is what we expect for a .jsonl file
elif fname.lower().endswith(".json"):
df = pd.read_json(fname, lines=True, dtype=str).fillna("")
if len(df) == 1:
# this is what we expect for a .json file
df = pd.read_json(fname, dtype=str).fillna("")
else:
# this is NOT what we expect for a .json file
immediate_msg = "\n- Your JSON file appears to be in a JSONL format. Your file will be converted to JSONL format"
necessary_msg = "Your format `JSON` will be converted to `JSONL`"
else:
error_msg = "Your file must have one of the following extensions: .CSV, .TSV, .XLSX, .TXT, .JSON or .JSONL"
if "." in fname:
error_msg += f" Your file `{fname}` ends with the extension `.{fname.split('.')[-1]}` which is not supported."
else:
error_msg += f" Your file `{fname}` is missing a file extension."
except (ValueError, TypeError):
file_extension_str = fname.split(".")[-1].upper()
error_msg = f"Your file `{fname}` does not appear to be in valid {file_extension_str} format. Please ensure your file is formatted as a valid {file_extension_str} file."
else:
error_msg = f"File {fname} does not exist."
remediation = Remediation(
name="read_any_format",
necessary_msg=necessary_msg,
immediate_msg=immediate_msg,
error_msg=error_msg,
)
return df, remediation
def format_inferrer_validator(df):
"""
This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification.
It will also suggest to use ada and explain train/validation split benefits.
"""
ft_type = infer_task_type(df)
immediate_msg = None
if ft_type == "classification":
immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training"
return Remediation(name="num_examples", immediate_msg=immediate_msg)
def apply_necessary_remediation(df, remediation):
"""
This function will apply a necessary remediation to a dataframe, or print an error message if one exists.
"""
if remediation.error_msg is not None:
sys.stderr.write(
f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting..."
)
sys.exit(1)
if remediation.immediate_msg is not None:
sys.stdout.write(remediation.immediate_msg)
if remediation.necessary_fn is not None:
df = remediation.necessary_fn(df)
return df
def accept_suggestion(input_text, auto_accept):
sys.stdout.write(input_text)
if auto_accept:
sys.stdout.write("Y\n")
return True
return input().lower() != "n"
def apply_optional_remediation(df, remediation, auto_accept):
"""
This function will apply an optional remediation to a dataframe, based on the user input.
"""
optional_applied = False
input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: "
if remediation.optional_msg is not None:
if accept_suggestion(input_text, auto_accept):
df = remediation.optional_fn(df)
optional_applied = True
if remediation.necessary_msg is not None:
sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n")
return df, optional_applied
def estimate_fine_tuning_time(df):
"""
Estimate the time it'll take to fine-tune the dataset
"""
ft_format = infer_task_type(df)
expected_time = 1.0
if ft_format == "classification":
num_examples = len(df)
expected_time = num_examples * 1.44
else:
size = df.memory_usage(index=True).sum()
expected_time = size * 0.0515
def format_time(time):
if time < 60:
return f"{round(time, 2)} seconds"
elif time < 3600:
return f"{round(time / 60, 2)} minutes"
elif time < 86400:
return f"{round(time / 3600, 2)} hours"
else:
return f"{round(time / 86400, 2)} days"
time_string = format_time(expected_time + 140)
sys.stdout.write(
f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n"
)
def get_outfnames(fname, split):
suffixes = ["_train", "_valid"] if split else [""]
i = 0
while True:
index_suffix = f" ({i})" if i > 0 else ""
candidate_fnames = [
os.path.splitext(fname)[0] + "_prepared" + suffix + index_suffix + ".jsonl"
for suffix in suffixes
]
if not any(os.path.isfile(f) for f in candidate_fnames):
return candidate_fnames
i += 1
def get_classification_hyperparams(df):
n_classes = df.completion.nunique()
pos_class = None
if n_classes == 2:
pos_class = df.completion.value_counts().index[0]
return n_classes, pos_class
def write_out_file(df, fname, any_remediations, auto_accept):
"""
This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file.
For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set.
"""
ft_format = infer_task_type(df)
common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix")
common_completion_suffix = get_common_xfix(df.completion, xfix="suffix")
split = False
input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: "
if ft_format == "classification":
if accept_suggestion(input_text, auto_accept):
split = True
additional_params = ""
common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n")
common_completion_suffix_new_line_handled = common_completion_suffix.replace(
"\n", "\\n"
)
optional_ending_string = (
f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.'
if len(common_completion_suffix_new_line_handled) > 0
else ""
)
input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: "
if not any_remediations and not split:
sys.stdout.write(
f'\nYou can use your file for fine-tuning:\n> openai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n'
)
estimate_fine_tuning_time(df)
elif accept_suggestion(input_text, auto_accept):
fnames = get_outfnames(fname, split)
if split:
assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1]
MAX_VALID_EXAMPLES = 1000
n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8))
df_train = df.sample(n=n_train, random_state=42)
df_valid = df.drop(df_train.index)
df_train[["prompt", "completion"]].to_json(
fnames[0], lines=True, orient="records", force_ascii=False
)
df_valid[["prompt", "completion"]].to_json(
fnames[1], lines=True, orient="records", force_ascii=False
)
n_classes, pos_class = get_classification_hyperparams(df)
additional_params += " --compute_classification_metrics"
if n_classes == 2:
additional_params += f' --classification_positive_class "{pos_class}"'
else:
additional_params += f" --classification_n_classes {n_classes}"
else:
assert len(fnames) == 1
df[["prompt", "completion"]].to_json(
fnames[0], lines=True, orient="records", force_ascii=False
)
# Add -v VALID_FILE if we split the file into train / valid
files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames))
valid_string = f' -v "{fnames[1]}"' if split else ""
separator_reminder = (
""
if len(common_prompt_suffix_new_line_handled) == 0
else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt."
)
sys.stdout.write(
f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> openai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n'
)
estimate_fine_tuning_time(df)
else:
sys.stdout.write("Aborting... did not write the file\n")
def infer_task_type(df):
"""
Infer the likely fine-tuning task type from the data
"""
CLASSIFICATION_THRESHOLD = 3 # min_average instances of each class
if sum(df.prompt.str.len()) == 0:
return "open-ended generation"
if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD:
return "classification"
return "conditional generation"
def get_common_xfix(series, xfix="suffix"):
"""
Finds the longest common suffix or prefix of all the values in a series
"""
common_xfix = ""
while True:
common_xfixes = (
series.str[-(len(common_xfix) + 1) :]
if xfix == "suffix"
else series.str[: len(common_xfix) + 1]
) # first few or last few characters
if (
common_xfixes.nunique() != 1
): # we found the character at which we don't have a unique xfix anymore
break
elif (
common_xfix == common_xfixes.values[0]
): # the entire first row is a prefix of every other row
break
else: # the first or last few characters are still common across all rows - let's try to add one more
common_xfix = common_xfixes.values[0]
return common_xfix
def get_validators():
return [
num_examples_validator,
lambda x: necessary_column_validator(x, "prompt"),
lambda x: necessary_column_validator(x, "completion"),
additional_column_validator,
non_empty_field_validator,
format_inferrer_validator,
duplicated_rows_validator,
long_examples_validator,
lambda x: lower_case_validator(x, "prompt"),
lambda x: lower_case_validator(x, "completion"),
common_prompt_suffix_validator,
common_prompt_prefix_validator,
common_completion_prefix_validator,
common_completion_suffix_validator,
completions_space_start_validator,
]
def apply_validators(
df,
fname,
remediation,
validators,
auto_accept,
write_out_file_func,
):
optional_remediations = []
if remediation is not None:
optional_remediations.append(remediation)
for validator in validators:
remediation = validator(df)
if remediation is not None:
optional_remediations.append(remediation)
df = apply_necessary_remediation(df, remediation)
any_optional_or_necessary_remediations = any(
[
remediation
for remediation in optional_remediations
if remediation.optional_msg is not None
or remediation.necessary_msg is not None
]
)
any_necessary_applied = any(
[
remediation
for remediation in optional_remediations
if remediation.necessary_msg is not None
]
)
any_optional_applied = False
if any_optional_or_necessary_remediations:
sys.stdout.write(
"\n\nBased on the analysis we will perform the following actions:\n"
)
for remediation in optional_remediations:
df, optional_applied = apply_optional_remediation(
df, remediation, auto_accept
)
any_optional_applied = any_optional_applied or optional_applied
else:
sys.stdout.write("\n\nNo remediations found.\n")
any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied
write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept)
| [
"\n",
"\\n"
] |
2024-01-10 | tedxnet/python-openai | openaipro~_openai_scripts.py | #!/usr/bin/env python
import argparse
import logging
import sys
import openai
from openaipro.cli import api_register, display_error, tools_register, wandb_register
logger = logging.getLogger()
formatter = logging.Formatter("[%(asctime)s] %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="Set verbosity.",
)
parser.add_argument("-b", "--api-base", help="What API base url to use.")
parser.add_argument("-k", "--api-key", help="What API key to use.")
parser.add_argument(
"-o",
"--organization",
help="Which organization to run as (will use your default organization if not specified)",
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
subparsers = parser.add_subparsers()
sub_api = subparsers.add_parser("api", help="Direct API calls")
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases")
api_register(sub_api)
tools_register(sub_tools)
wandb_register(sub_wandb)
args = parser.parse_args()
if args.verbosity == 1:
logger.setLevel(logging.INFO)
elif args.verbosity >= 2:
logger.setLevel(logging.DEBUG)
openaipro.debug = True
if args.api_key is not None:
openaipro.api_key = args.api_key
if args.api_base is not None:
openaipro.api_base = args.api_base
if args.organization is not None:
openaipro.organization = args.organization
try:
args.func(args)
except openaipro.error.OpenAIError as e:
display_error(e)
return 1
except KeyboardInterrupt:
sys.stderr.write("\n")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| [] |
2024-01-10 | tedxnet/python-openai | openaipro~tests~test_endpoints.py | import io
import json
import pytest
import openai
from openai import error
# FILE TESTS
def test_file_upload():
result = openaipro.File.create(
file=io.StringIO(
json.dumps({"prompt": "test file data", "completion": "tada"})
),
purpose="fine-tune",
)
assert result.purpose == "fine-tune"
assert "id" in result
result = openaipro.File.retrieve(id=result.id)
assert result.status == "uploaded"
# COMPLETION TESTS
def test_completions():
result = openaipro.Completion.create(prompt="This was a test", n=5, engine="ada")
assert len(result.choices) == 5
def test_completions_multiple_prompts():
result = openaipro.Completion.create(
prompt=["This was a test", "This was another test"], n=5, engine="ada"
)
assert len(result.choices) == 10
def test_completions_model():
result = openaipro.Completion.create(prompt="This was a test", n=5, model="ada")
assert len(result.choices) == 5
assert result.model.startswith("ada")
def test_timeout_raises_error():
# A query that should take awhile to return
with pytest.raises(error.Timeout):
openaipro.Completion.create(
prompt="test" * 1000,
n=10,
model="ada",
max_tokens=100,
request_timeout=0.01,
)
def test_timeout_does_not_error():
# A query that should be fast
openaipro.Completion.create(
prompt="test",
model="ada",
request_timeout=10,
)
| [
"This was a test",
"['This was a test', 'This was another test']",
"testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest",
"test"
] |
2024-01-10 | tedxnet/python-openai | openaipro~api_resources~edit.py | import time
from openai import util, error
from openaipro.api_resources.abstract.engine_api_resource import EngineAPIResource
from openaipro.error import TryAgain
class Edit(EngineAPIResource):
OBJECT_NAME = "edits"
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new edit for the provided input, instruction, and parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
api_type = kwargs.pop("api_type", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType(
"This operation is not supported by the Azure OpenAI API yet."
)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
@classmethod
async def acreate(cls, *args, **kwargs):
"""
Creates a new edit for the provided input, instruction, and parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
api_type = kwargs.pop("api_type", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType(
"This operation is not supported by the Azure OpenAI API yet."
)
while True:
try:
return await super().acreate(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
| [] |
2024-01-10 | k-d-7/SmartHome-M5Stack | testtraindata~fine_tune.py | import openai
openai.api_key = "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"
path = 'train_data/ft_data.jsonl'
def upload_ftdata():
with open(path, "rb") as file:
response = openai.File.create(
file = file,
purpose = 'fine-tune'
)
file_id = response['id']
print(f"File uload with id: {file_id}")
return file_id
def finetune_create(file_id):
model_name = "gpt-3.5-turbo"
response = openai.FineTuningJob.create(
training_file = file_id,
model = model_name
)
job_id = response['id']
print(f"Job id: {job_id}")
return job_id
def delete_model(model_name):
openai.Model.delete(model_name)
# Retrieve the state of a fine-tune
def retrieve_model(job_id):
response = openai.FineTuningJob.retrieve(job_id)
print(response)
# file_id = upload_ftdata()
# job_id = finetune_create(file_id)
#retrieve_model('ftjob-ezyrT0OCLXSeHcaFtmbYuVZp')
#print(openai.FineTuningJob.list_events('ftjob-rRfqapvOAFZis79HXFPE9NIY',10))
#print(openai.FineTuningJob.cancel(""))
#delete_model('ft:gpt-3.5-turbo-0613:personal::81TtQzRO')
| [] |
2024-01-10 | k-d-7/SmartHome-M5Stack | testtraindata~run_chatgpt.py | import openai
# openai.api_key = "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"
#
# #model_id = 'ft:gpt-3.5-turbo-0613:personal::81Tsc1dF'
# model_id = 'gpt-3.5-turbo'
openai.api_key = "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"
model_id = 'ft:gpt-3.5-turbo-0613:personal::82L6L6nv'
# message_content = 'Interpret the following user input and convert it into JSON of the form { "intent": ["string"], ' \
# '"device":["string"], "location":[ "string"]} ' \
# '.Only return JSON with 2 lines. User input:'
def generate_response(user_input, role="user"):
array_exit = ["", "Bye ChatGPT", " Bye ChatGPT", "bye", "bye chat", " bye", " see you"]
if user_input in array_exit:
return None
message_history.append({'role': 'system', 'content': 'You are a helpful assistant.'})
message_history.append({"role": role, "content": f"{user_input}"})
completion = openai.ChatCompletion.create(
model=model_id,
messages=message_history
)
response = completion.choices[0].message.content
print(completion.choices[0].message.content.strip())
message_history.append({"role": "assistant", "content": f"{response}"})
return response
message_history = []
while True:
prompt = input('User:')
conversation = generate_response(prompt, role="user")
if conversation is None:
break
| [
"PLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | fetchai/uAgents | integrations~fetch-holiday~src~agents~activities~top_activities.py | from langchain.chat_models import ChatOpenAI
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.prompts import PromptTemplate
from messages import UAgentResponse, UAgentResponseType, TopActivities, KeyValue
from uagents import Agent, Context, Protocol
from uagents.setup import fund_agent_if_low
import os
TOP_ACTIVITIES_SEED = os.getenv("TOP_ACTIVITIES_SEED", "top_activities really secret phrase :)")
agent = Agent(
name="top_activities",
seed=TOP_ACTIVITIES_SEED
)
fund_agent_if_low(agent.wallet.address())
output_parser = CommaSeparatedListOutputParser()
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template="""
You are an expert AI in suggesting travel, holiday activities based on the date and city specified in user input.\n
The question that SerpAPI has to answer: What are the top 5 tourist activities in {city} on {date}?\n
{preferred_activities_str}\n
You should find tourist attractions and programs which are available exactly on the specified date.\n
{format_instructions}""",
input_variables=["city", "date", "preferred_activities_str"],
partial_variables={"format_instructions": format_instructions}
)
llm = ChatOpenAI(temperature=0.1)
tools = load_tools(["serpapi"], llm=llm)
langchain_agent = initialize_agent(tools, llm, agent="chat-zero-shot-react-description", verbose=True)
top_activities_protocol = Protocol("TopActivities")
@top_activities_protocol.on_message(model=TopActivities, replies=UAgentResponse)
async def get_top_activity(ctx: Context, sender: str, msg: TopActivities):
ctx.logger.info(f"Received message from {sender}, session: {ctx.session}")
preferred_activities_str = f"You should only offer programs and activities related to {msg.preferred_activities}" if msg.preferred_activities else ""
_input = prompt.format(city=msg.city, date=msg.date, preferred_activities_str = preferred_activities_str)
try:
output = await langchain_agent.arun(_input)
result = output_parser.parse(output)
options = list(map(lambda x: KeyValue(key=x, value=x), result))
ctx.logger.info(f"Agent executed and got following result: {result}. Mapped to options: {options}")
await ctx.send(
sender,
UAgentResponse(
options=options,
type=UAgentResponseType.FINAL_OPTIONS,
)
)
except Exception as exc:
ctx.logger.warn(exc)
await ctx.send(sender, UAgentResponse(message=str(exc), type=UAgentResponseType.ERROR))
agent.include(top_activities_protocol)
| [
"\n You are an expert AI in suggesting travel, holiday activities based on the date and city specified in user input.\n\n The question that SerpAPI has to answer: What are the top 5 tourist activities in {city} on {date}?\n\n {preferred_activities_str}\n\n You should find tourist attractions and programs which are available exactly on the specified date.\n\n {format_instructions}",
"format_instructions",
"preferred_activities_str"
] |
2024-01-10 | aronweiler/langchain | libs~langchain~langchain~vectorstores~pinecone.py | from __future__ import annotations
import logging
import uuid
import warnings
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils.iter import batch_iterate
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance
if TYPE_CHECKING:
from pinecone import Index
logger = logging.getLogger(__name__)
class Pinecone(VectorStore):
"""`Pinecone` vector store.
To use, you should have the ``pinecone-client`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
index = pinecone.Index("langchain-demo")
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone(index, embeddings.embed_query, "text")
"""
def __init__(
self,
index: Any,
embedding: Union[Embeddings, Callable],
text_key: str,
namespace: Optional[str] = None,
distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE,
):
"""Initialize with Pinecone client."""
try:
import pinecone
except ImportError:
raise ImportError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
if not isinstance(embedding, Embeddings):
warnings.warn(
"Passing in `embedding` as a Callable is deprecated. Please pass in an"
" Embeddings object instead."
)
if not isinstance(index, pinecone.index.Index):
raise ValueError(
f"client should be an instance of pinecone.index.Index, "
f"got {type(index)}"
)
self._index = index
self._embedding = embedding
self._text_key = text_key
self._namespace = namespace
self.distance_strategy = distance_strategy
@property
def embeddings(self) -> Optional[Embeddings]:
"""Access the query embedding object if available."""
if isinstance(self._embedding, Embeddings):
return self._embedding
return None
def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search docs."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_documents(list(texts))
return [self._embedding(t) for t in texts]
def _embed_query(self, text: str) -> List[float]:
"""Embed query text."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_query(text)
return self._embedding(text)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
batch_size: int = 32,
embedding_chunk_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Upsert optimization is done by chunking the embeddings and upserting them.
This is done to avoid memory issues and optimize using HTTP based embeddings.
For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index,
embedding_chunk_size>1000 and batch_size~64 for best performance.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
batch_size: Batch size to use when adding the texts to the vectorstore.
embedding_chunk_size: Chunk size to use when embedding the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
texts = list(texts)
ids = ids or [str(uuid.uuid4()) for _ in texts]
metadatas = metadatas or [{} for _ in texts]
for metadata, text in zip(metadatas, texts):
metadata[self._text_key] = text
# For loops to avoid memory issues and optimize when using HTTP based embeddings
# The first loop runs the embeddings, it benefits when using OpenAI embeddings
# The second loops runs the pinecone upsert asynchoronously.
for i in range(0, len(texts), embedding_chunk_size):
chunk_texts = texts[i : i + embedding_chunk_size]
chunk_ids = ids[i : i + embedding_chunk_size]
chunk_metadatas = metadatas[i : i + embedding_chunk_size]
embeddings = self._embed_documents(chunk_texts)
async_res = [
self._index.upsert(
vectors=batch,
namespace=namespace,
async_req=True,
**kwargs,
)
for batch in batch_iterate(
batch_size, zip(chunk_ids, embeddings, chunk_metadatas)
)
]
[res.get() for res in async_res]
return ids
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
return self.similarity_search_by_vector_with_score(
self._embed_query(query), k=k, filter=filter, namespace=namespace
)
def similarity_search_by_vector_with_score(
self,
embedding: List[float],
*,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to embedding, along with scores."""
if namespace is None:
namespace = self._namespace
docs = []
results = self._index.query(
[embedding],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
metadata = res["metadata"]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
score = res["score"]
docs.append((Document(page_content=text, metadata=metadata), score))
else:
logger.warning(
f"Found document with no `{self._text_key}` key. Skipping."
)
return docs
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filter=filter, namespace=namespace, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, max_inner_product "
"(dot product), or euclidean"
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if namespace is None:
namespace = self._namespace
results = self._index.query(
[embedding],
top_k=fetch_k,
include_values=True,
include_metadata=True,
namespace=namespace,
filter=filter,
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[item["values"] for item in results["matches"]],
k=k,
lambda_mult=lambda_mult,
)
selected = [results["matches"][i]["metadata"] for i in mmr_selected]
return [
Document(page_content=metadata.pop((self._text_key)), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, filter, namespace
)
@classmethod
def get_pinecone_index(
cls,
index_name: Optional[str],
pool_threads: int = 4,
) -> Index:
"""Return a Pinecone Index instance.
Args:
index_name: Name of the index to use.
pool_threads: Number of threads to use for index upsert.
Returns:
Pinecone Index instance."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
indexes = pinecone.list_indexes() # checks if provided index exists
if index_name in indexes:
index = pinecone.Index(index_name, pool_threads=pool_threads)
elif len(indexes) == 0:
raise ValueError(
"No active indexes found in your Pinecone project, "
"are you sure you're using the right Pinecone API key and Environment? "
"Please double check your Pinecone dashboard."
)
else:
raise ValueError(
f"Index '{index_name}' not found in your Pinecone project. "
f"Did you mean one of the following indexes: {', '.join(indexes)}"
)
return index
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
text_key: str = "text",
namespace: Optional[str] = None,
index_name: Optional[str] = None,
upsert_kwargs: Optional[dict] = None,
pool_threads: int = 4,
embeddings_chunk_size: int = 1000,
**kwargs: Any,
) -> Pinecone:
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Example:
.. code-block:: python
from langchain import Pinecone
from langchain.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs)
pinecone.add_texts(
texts,
metadatas=metadatas,
ids=ids,
namespace=namespace,
batch_size=batch_size,
embedding_chunk_size=embeddings_chunk_size,
**(upsert_kwargs or {}),
)
return pinecone
@classmethod
def from_existing_index(
cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
pool_threads: int = 4,
) -> Pinecone:
"""Load pinecone vectorstore from index name."""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
return cls(pinecone_index, embedding, text_key, namespace)
def delete(
self,
ids: Optional[List[str]] = None,
delete_all: Optional[bool] = None,
namespace: Optional[str] = None,
filter: Optional[dict] = None,
**kwargs: Any,
) -> None:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
filter: Dictionary of conditions to filter vectors to delete.
"""
if namespace is None:
namespace = self._namespace
if delete_all:
self._index.delete(delete_all=True, namespace=namespace, **kwargs)
elif ids is not None:
chunk_size = 1000
for i in range(0, len(ids), chunk_size):
chunk = ids[i : i + chunk_size]
self._index.delete(ids=chunk, namespace=namespace, **kwargs)
elif filter is not None:
self._index.delete(filter=filter, namespace=namespace, **kwargs)
else:
raise ValueError("Either ids, delete_all, or filter must be provided.")
return None
| [] |
2024-01-10 | aronweiler/langchain | libs~langchain~tests~integration_tests~llms~test_opaqueprompts.py | import langchain.utilities.opaqueprompts as op
from langchain import LLMChain, PromptTemplate
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is [email protected]
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
output = chain.run(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableMap(
{
"response": (lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
"secure_context": lambda x: x["secure_context"],
}
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
| [
"PERSON_998 is good.",
"PERSON_998,",
"\nAs an AI assistant, you will answer questions according to given context.\n\nSensitive personal information in the question is masked for privacy.\nFor instance, if the original text says \"Giana is good,\" it will be changed\nto \"PERSON_998 is good.\"\n\nHere's how to handle these changes:\n* Consider these masked phrases just as placeholders, but still refer to\nthem in a relevant way when answering.\n* It's possible that different masked terms might mean the same thing.\nStick with the given term and don't modify it.\n* All masked terms follow the \"TYPE_ID\" pattern.\n* Please don't invent new masked terms. For instance, if you see \"PERSON_998,\"\ndon't come up with \"PERSON_997\" or \"PERSON_999\" unless they're already in the question.\n\nConversation History: ```{history}```\nContext : ```During our recent meeting on February 23, 2023, at 10:30 AM,\nJohn Doe provided me with his personal details. His email is [email protected]\nand his contact number is 650-456-7890. He lives in New York City, USA, and\nbelongs to the American nationality with Christian beliefs and a leaning towards\nthe Democratic party. He mentioned that he recently made a transaction using his\ncredit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address\n1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he\nnoted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided\nhis website as https://johndoeportfolio.com. John also discussed\nsome of his US-specific details. He said his bank account number is\n1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,\nand he recently renewed his passport,\nthe number for which is 123456789. He emphasized not to share his SSN, which is\n669-45-6789. Furthermore, he mentioned that he accesses his work files remotely\nthrough the IP 192.168.1.1 and has a medical license number MED-123456. ```\nQuestion: ```{question}```\n",
"Giana is good,",
"t come up with \"PERSON_997\" or \"PERSON_999\" unless they"
] |
2024-01-10 | aronweiler/langchain | libs~langchain~tests~integration_tests~llms~test_fireworks.py | """Test Fireworks AI API Wrapper."""
from pathlib import Path
import pytest
from langchain import LLMChain, PromptTemplate
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAIChat
from langchain.llms.fireworks import Fireworks, FireworksChat
from langchain.llms.loading import load_llm
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import LLMResult
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import DeepLake
def test_fireworks_call() -> None:
"""Test valid call to fireworks."""
llm = Fireworks(
model_id="accounts/fireworks/models/fireworks-llama-v2-13b-chat", max_tokens=900
)
output = llm("What is the weather in NYC")
assert isinstance(output, str)
def test_fireworks_in_chain() -> None:
"""Tests fireworks AI in a Langchain chain"""
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
chat = Fireworks()
chain = LLMChain(llm=chat, prompt=chat_prompt_template)
output = chain.run("football helmets")
assert isinstance(output, str)
@pytest.mark.asyncio
async def test_openai_chat_async_generate() -> None:
"""Test async chat."""
llm = OpenAIChat(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
def test_fireworks_model_param() -> None:
"""Tests model parameters for Fireworks"""
llm = Fireworks(model="foo")
assert llm.model_id == "foo"
llm = Fireworks(model_id="foo")
assert llm.model_id == "foo"
def test_fireworkschat_model_param() -> None:
"""Tests model parameters for FireworksChat"""
llm = FireworksChat(model="foo")
assert llm.model_id == "foo"
llm = FireworksChat(model_id="foo")
assert llm.model_id == "foo"
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an Fireworks LLM."""
llm = Fireworks(max_tokens=10)
llm.save(file_path=tmp_path / "fireworks.yaml")
loaded_llm = load_llm(tmp_path / "fireworks.yaml")
assert loaded_llm == llm
def test_fireworks_multiple_prompts() -> None:
"""Test completion with multiple prompts."""
llm = Fireworks()
output = llm.generate(["How is the weather in New York today?", "I'm pickle rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
def test_fireworks_chat() -> None:
"""Test FireworksChat."""
llm = FireworksChat()
output = llm("Name me 3 quick facts about the New England Patriots")
assert isinstance(output, str)
async def test_fireworks_agenerate() -> None:
llm = Fireworks()
output = await llm.agenerate(["I'm a pickle", "I'm a pickle"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
async def test_fireworkschat_agenerate() -> None:
llm = FireworksChat(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 1
def test_fireworkschat_chain() -> None:
embeddings = OpenAIEmbeddings()
loader = TextLoader(
"[workspace]/langchain-internal/docs/extras/modules/state_of_the_union.txt"
)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = DeepLake(
dataset_path="./my_deeplake/", embedding_function=embeddings, overwrite=True
)
db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
qa = RetrievalQA.from_chain_type(
llm=FireworksChat(),
chain_type="stuff",
retriever=db.as_retriever(),
)
query = "What did the president say about Ketanji Brown Jackson"
output = qa.run(query)
assert isinstance(output, str)
_EXPECTED_NUM_TOKENS = {
"accounts/fireworks/models/fireworks-llama-v2-13b": 17,
"accounts/fireworks/models/fireworks-llama-v2-7b": 17,
"accounts/fireworks/models/fireworks-llama-v2-13b-chat": 17,
"accounts/fireworks/models/fireworks-llama-v2-7b-chat": 17,
}
_MODELS = models = [
"accounts/fireworks/models/fireworks-llama-v2-13b",
"accounts/fireworks/models/fireworks-llama-v2-7b",
"accounts/fireworks/models/fireworks-llama-v2-13b-chat",
"accounts/fireworks/models/fireworks-llama-v2-7b-chat",
]
@pytest.mark.parametrize("model", _MODELS)
def test_fireworks_get_num_tokens(model: str) -> None:
"""Test get_tokens."""
llm = Fireworks(model=model)
assert llm.get_num_tokens("表情符号是\n🦜🔗") == _EXPECTED_NUM_TOKENS[model]
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | aronweiler/langchain | libs~experimental~tests~integration_tests~chains~test_pal.py | """Test PAL chain."""
from langchain import OpenAI
from langchain.chains.pal.base import PALChain
def test_math_prompt() -> None:
"""Test math prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
question = (
"Jan has three times the number of pets as Marcia. "
"Marcia has two more pets than Cindy. "
"If Cindy has four pets, how many total pets do the three have?"
)
output = pal_chain.run(question)
assert output == "28"
def test_colored_object_prompt() -> None:
"""Test colored object prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None)
question = (
"On the desk, you see two blue booklets, "
"two purple booklets, and two yellow pairs of sunglasses. "
"If I remove all the pairs of sunglasses from the desk, "
"how many purple items remain on it?"
)
output = pal_chain.run(question)
assert output == "2"
| [] |
2024-01-10 | aronweiler/langchain | libs~langchain~tests~integration_tests~llms~test_huggingface_text_gen_inference.py | from langchain import HuggingFaceTextGenInference
def test_invocation_params_stop_sequences() -> None:
llm = HuggingFaceTextGenInference()
assert llm._default_params["stop_sequences"] == []
runtime_stop = None
assert llm._invocation_params(runtime_stop)["stop_sequences"] == []
assert llm._default_params["stop_sequences"] == []
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == ["stop"]
assert llm._default_params["stop_sequences"] == []
llm = HuggingFaceTextGenInference(stop_sequences=["."])
runtime_stop = ["stop"]
assert llm._invocation_params(runtime_stop)["stop_sequences"] == [".", "stop"]
assert llm._default_params["stop_sequences"] == ["."]
| [] |
2024-01-10 | aronweiler/langchain | libs~langchain~langchain~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from tenacity import RetryCallState
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers import run_collector
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
Document,
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
if TYPE_CHECKING:
from langsmith import Client as LangSmithClient
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
run_collector_var: ContextVar[
Optional[run_collector.RunCollectorCallbackHandler]
] = ContextVar( # noqa: E501
"run_collector", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get the Deprecated LangChainTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
TracerSessionV1: The LangChainTracer session.
Example:
>>> with tracing_enabled() as session:
... # Use the LangChainTracer session
"""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
client: Optional[LangSmithClient] = None,
) -> Generator[None, None, None]:
"""Instruct LangChain to log all runs in context to LangSmith.
Args:
project_name (str, optional): The name of the project.
Defaults to "default".
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The tags to add to the run.
Defaults to None.
Returns:
None
Example:
>>> with tracing_v2_enabled():
... # LangChain code will automatically be traced
"""
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
project_name=project_name,
tags=tags,
client=client,
)
tracing_v2_callback_var.set(cb)
yield
tracing_v2_callback_var.set(None)
@contextmanager
def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]:
"""Collect all run traces in context.
Returns:
run_collector.RunCollectorCallbackHandler: The run collector callback handler.
Example:
>>> with collect_runs() as runs_cb:
chain.invoke("foo")
run_id = runs_cb.traced_runs[0].id
"""
cb = run_collector.RunCollectorCallbackHandler()
run_collector_var.set(cb)
yield cb
run_collector_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: Optional[CallbackManager] = None,
*,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManager, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
CallbackManager: The callback manager for the chain group.
Example:
>>> with trace_as_chain_group("group_name") as manager:
... # Use the callback manager for the chain group
... llm.predict("Foo", callbacks=manager)
"""
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, {}, run_id=run_id)
yield run_manager.get_child()
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: Optional[AsyncCallbackManager] = None,
*,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManager, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Example:
>>> async with atrace_as_chain_group("group_name") as manager:
... # Use the async callback manager for the chain group
... await llm.apredict("Foo", callbacks=manager)
"""
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = await cm.on_chain_start({"name": group_name}, {}, run_id=run_id)
try:
yield run_manager.get_child()
finally:
await run_manager.on_chain_end({})
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
_handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Sync Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
await _ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
_handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
_handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await _ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await _ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that handles callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
callback_manager = callback_manager_cls(handlers=[])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags,
inheritable_tags=inheritable_callbacks.inheritable_tags,
metadata=inheritable_callbacks.metadata,
inheritable_metadata=inheritable_callbacks.inheritable_metadata,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_project = os.environ.get(
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
)
run_collector_ = run_collector_var.get()
debug = _get_debug()
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_project)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
if run_collector_ is not None and not any(
handler is run_collector_ # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(run_collector_, False)
return callback_manager
| [] |
2024-01-10 | aronweiler/langchain | libs~langchain~tests~unit_tests~chains~test_combine_documents.py | """Test functionality related to combining documents."""
from typing import Any, List
import pytest
from langchain import PromptTemplate
from langchain.chains.combine_documents.reduce import (
_collapse_docs,
_split_list_of_docs,
)
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
from langchain.schema import format_document
from tests.unit_tests.llms.fake_llm import FakeLLM
def _fake_docs_len_func(docs: List[Document]) -> int:
return len(_fake_combine_docs_func(docs))
def _fake_combine_docs_func(docs: List[Document], **kwargs: Any) -> str:
return "".join([d.page_content for d in docs])
def test_multiple_input_keys() -> None:
chain = load_qa_with_sources_chain(FakeLLM(), chain_type="stuff")
assert chain.input_keys == ["input_documents", "question"]
def test__split_list_long_single_doc() -> None:
"""Test splitting of a long single doc."""
docs = [Document(page_content="foo" * 100)]
with pytest.raises(ValueError):
_split_list_of_docs(docs, _fake_docs_len_func, 100)
def test__split_list_single_doc() -> None:
"""Test splitting works with just a single doc."""
docs = [Document(page_content="foo")]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100)
assert doc_list == [docs]
def test__split_list_double_doc() -> None:
"""Test splitting works with just two docs."""
docs = [Document(page_content="foo"), Document(page_content="bar")]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100)
assert doc_list == [docs]
def test__split_list_works_correctly() -> None:
"""Test splitting works correctly."""
docs = [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
Document(page_content="foo" * 2),
Document(page_content="bar"),
Document(page_content="baz"),
]
doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 10)
expected_result = [
# Test a group of three.
[
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
],
# Test a group of two, where one is bigger.
[Document(page_content="foo" * 2), Document(page_content="bar")],
# Test no errors on last
[Document(page_content="baz")],
]
assert doc_list == expected_result
def test__collapse_docs_no_metadata() -> None:
"""Test collapse documents functionality when no metadata."""
docs = [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
output = _collapse_docs(docs, _fake_combine_docs_func)
expected_output = Document(page_content="foobarbaz")
assert output == expected_output
def test__collapse_docs_one_doc() -> None:
"""Test collapse documents functionality when only one document present."""
# Test with no metadata.
docs = [Document(page_content="foo")]
output = _collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
# Test with metadata.
docs = [Document(page_content="foo", metadata={"source": "a"})]
output = _collapse_docs(docs, _fake_combine_docs_func)
assert output == docs[0]
def test__collapse_docs_metadata() -> None:
"""Test collapse documents functionality when metadata exists."""
metadata1 = {"source": "a", "foo": 2, "bar": "1", "extra1": "foo"}
metadata2 = {"source": "b", "foo": "3", "bar": 2, "extra2": "bar"}
docs = [
Document(page_content="foo", metadata=metadata1),
Document(page_content="bar", metadata=metadata2),
]
output = _collapse_docs(docs, _fake_combine_docs_func)
expected_metadata = {
"source": "a, b",
"foo": "2, 3",
"bar": "1, 2",
"extra1": "foo",
"extra2": "bar",
}
expected_output = Document(page_content="foobar", metadata=expected_metadata)
assert output == expected_output
def test_format_doc_with_metadata() -> None:
"""Test format doc on a valid document."""
doc = Document(page_content="foo", metadata={"bar": "baz"})
prompt = PromptTemplate(
input_variables=["page_content", "bar"], template="{page_content}, {bar}"
)
expected_output = "foo, baz"
output = format_document(doc, prompt)
assert output == expected_output
def test_format_doc_missing_metadata() -> None:
"""Test format doc on a document with missing metadata."""
doc = Document(page_content="foo")
prompt = PromptTemplate(
input_variables=["page_content", "bar"], template="{page_content}, {bar}"
)
with pytest.raises(ValueError):
format_document(doc, prompt)
| [
"{page_content}, {bar}",
"page_content"
] |
2024-01-10 | davedavis/llama-whisperer | step_04_whisper_with_chat_gpt.py | import asyncio
import pyaudio
import wave
import whisper
from pydub import AudioSegment
from dotenv import load_dotenv
import openai
import os
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
WAKE_TERM = os.getenv("WAKETERM")
# Parameters for recording audio
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 2048
# Initialize the audio interface
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
# Load the Whisper model
model = whisper.load_model("medium")
def get_completion(prompt, model="gpt-4"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
# this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
async def record_audio(filename, duration=4):
"""
An asynchronous generator that records audio continuously and yields
filenames of audio chunks. Each chunk is approximately 4 seconds long.
"""
while True:
frames = []
for _ in range(0, int(RATE / CHUNK * duration)):
data = stream.read(CHUNK)
frames.append(data)
with wave.open(filename, 'wb') as wf:
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
yield filename
await asyncio.sleep(0)
async def transcribe_audio():
"""
Continuously transcribes audio from the microphone. Audio is recorded
in chunks (approximately 4 seconds each), and each chunk is transcribed
separately.
"""
audio_generator = record_audio("chunk.wav")
async for filename in audio_generator:
audio = whisper.load_audio(filename)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
options = whisper.DecodingOptions()
result = whisper.decode(model, mel, options)
if WAKE_TERM in result.text.lower():
await wake_word_detected()
print("Transcription:", result.text)
async def wake_word_detected():
"""
Function to be called when the wake word is detected. It will listen
for 20 seconds or until a 2-second silence is detected.
"""
print("Wake word detected. Listening for command...")
audio_generator = record_audio("command.wav", duration=10)
async for filename in audio_generator:
audio_segment = AudioSegment.from_wav(filename)
silence_threshold = -40 # dB
if audio_segment.dBFS < silence_threshold:
print("Silence detected. Stopping recording...")
break
audio = whisper.load_audio(filename)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
options = whisper.DecodingOptions()
result = whisper.decode(model, mel, options)
print("Command transcription:", result.text)
response = get_completion(result.text)
print("Response:", response)
break
def main():
"""
The main function of the script. Starts the transcription process.
"""
asyncio.run(transcribe_audio())
if __name__ == "__main__":
main()
| [] |
2024-01-10 | PKU-YuanGroup/LanguageBind | data~process_video.py |
import io
import logging
import os
import cv2
import numpy as np
import torch
import decord
import torchvision.transforms
from PIL import Image
from decord import VideoReader, cpu
try:
from petrel_client.client import Client
petrel_backend_imported = True
except (ImportError, ModuleNotFoundError):
petrel_backend_imported = False
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision.transforms import Compose, Lambda, ToTensor
from torchvision.transforms._transforms_video import NormalizeVideo, RandomCropVideo, RandomHorizontalFlipVideo
from pytorchvideo.transforms import ApplyTransformToKey, ShortSideScale, UniformTemporalSubsample
import sys
sys.path.append('../')
from open_clip import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from os.path import join as opj
def get_video_loader(use_petrel_backend: bool = True,
enable_mc: bool = True,
conf_path: str = None):
if petrel_backend_imported and use_petrel_backend:
_client = Client(conf_path=conf_path, enable_mc=enable_mc)
else:
_client = None
def _loader(video_path):
if _client is not None and 's3:' in video_path:
video_path = io.BytesIO(_client.get(video_path))
vr = VideoReader(video_path, num_threads=1, ctx=cpu(0))
return vr
return _loader
decord.bridge.set_bridge('torch')
# video_loader = get_video_loader()
def get_video_transform(args):
if args.video_decode_backend == 'pytorchvideo':
transform = ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(args.num_frames),
Lambda(lambda x: x / 255.0),
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
ShortSideScale(size=224),
RandomCropVideo(size=224),
RandomHorizontalFlipVideo(p=0.5),
]
),
)
elif args.video_decode_backend == 'decord':
transform = Compose(
[
# UniformTemporalSubsample(num_frames),
Lambda(lambda x: x / 255.0),
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
ShortSideScale(size=224),
RandomCropVideo(size=224),
RandomHorizontalFlipVideo(p=0.5),
]
)
elif args.video_decode_backend == 'opencv':
transform = Compose(
[
# UniformTemporalSubsample(num_frames),
Lambda(lambda x: x / 255.0),
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
ShortSideScale(size=224),
RandomCropVideo(size=224),
RandomHorizontalFlipVideo(p=0.5),
]
)
elif args.video_decode_backend == 'imgs':
transform = Compose(
[
# UniformTemporalSubsample(num_frames),
# Lambda(lambda x: x / 255.0),
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
ShortSideScale(size=224),
RandomCropVideo(size=224),
RandomHorizontalFlipVideo(p=0.5),
]
)
else:
raise NameError('video_decode_backend should specify in (pytorchvideo, decord, opencv, imgs)')
return transform
def load_and_transform_video(
video_path,
transform,
video_decode_backend='opencv',
clip_start_sec=0.0,
clip_end_sec=None,
num_frames=8,
):
if video_decode_backend == 'pytorchvideo':
# decord pyav
video = EncodedVideo.from_path(video_path, decoder="decord", decode_audio=False)
duration = video.duration
start_sec = clip_start_sec # secs
end_sec = clip_end_sec if clip_end_sec is not None else duration # secs
video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
video_outputs = transform(video_data)
elif video_decode_backend == 'decord':
decord_vr = VideoReader(video_path, ctx=cpu(0))
duration = len(decord_vr)
frame_id_list = np.linspace(0, duration-1, num_frames, dtype=int)
video_data = decord_vr.get_batch(frame_id_list)
video_data = video_data.permute(3, 0, 1, 2) # (T, H, W, C) -> (C, T, H, W)
video_outputs = transform(video_data)
elif video_decode_backend == 'opencv':
cv2_vr = cv2.VideoCapture(video_path)
duration = int(cv2_vr.get(cv2.CAP_PROP_FRAME_COUNT))
frame_id_list = np.linspace(0, duration-1, num_frames, dtype=int)
video_data = []
for frame_idx in frame_id_list:
cv2_vr.set(1, frame_idx)
_, frame = cv2_vr.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
video_data.append(torch.from_numpy(frame).permute(2, 0, 1))
cv2_vr.release()
video_data = torch.stack(video_data, dim=1)
video_outputs = transform(video_data)
elif video_decode_backend == 'imgs':
resize256_folder = video_path.replace('.mp4', '_resize256_folder')
video_data = [ToTensor()(Image.open(opj(resize256_folder, f'{i}.jpg'))) for i in range(8)]
video_data = torch.stack(video_data, dim=1)
# print(video_data.shape, video_data.max(), video_data.min())
video_outputs = transform(video_data)
else:
raise NameError('video_decode_backend should specify in (pytorchvideo, decord, opencv, imgs)')
return {'pixel_values': video_outputs}
if __name__ == '__main__':
load_and_transform_video(r"D:\ONE-PEACE-main\lb_test\zHSOYcZblvY.mp4") | [] |
2024-01-10 | PKU-YuanGroup/LanguageBind | open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained,\
list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name, cache_dir):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):], cache_dir)
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
cache_dir=cache_dir,
)
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
is_timm_model = 'timm_model_name' in model_cfg.get('vision_cfg', {})
if pretrained_image:
if is_timm_model:
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
# cast_dtype set for fp16 and bf16 (manual mixed-precision), not set for 'amp' or 'pure' modes
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
if precision in ("fp16", "bf16"):
dtype = torch.float16 if 'fp16' in precision else torch.bfloat16
# manual mixed precision that matches original OpenAI behaviour
if is_timm_model:
# FIXME this is a bit janky, create timm based model in low-precision and
# then cast only LayerNormFp32 instances back to float32 so they don't break.
# Why? The convert_weights_to_lp fn only works with native models.
model.to(device=device, dtype=dtype)
from .transformer import LayerNormFp32
def _convert_ln(m):
if isinstance(m, LayerNormFp32):
m.weight.data = m.weight.data.to(torch.float32)
m.bias.data = m.bias.data.to(torch.float32)
model.apply(_convert_ln)
else:
model.to(device=device)
convert_weights_to_lp(model, dtype=dtype)
elif precision in ("pure_fp16", "pure_bf16"):
dtype = torch.float16 if 'fp16' in precision else torch.bfloat16
model.to(device=device, dtype=dtype)
else:
model.to(device=device)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
require_pretrained=True,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
| [] |
2024-01-10 | PKU-YuanGroup/LanguageBind | v_cls~zero_shot.py | import logging
import os
import torch
import torch.nn.functional as F
from tqdm import tqdm
from open_clip import get_input_dtype, get_tokenizer
from open_clip.factory import HF_HUB_PREFIX
from training.distributed import is_master
from v_cls.zero_shot_classifier import build_zero_shot_classifier
from v_cls.zero_shot_metadata import OPENAI_IMAGENET_TEMPLATES, CLASSNAMES
from training.precision import get_autocast
def accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
def run(model, classifier, dataloader, args):
autocast = get_autocast(args.precision)
input_dtype = get_input_dtype(args.precision)
file = os.path.join(args.output_dir, str(args.rank) + '.txt')
final_result = []
with torch.no_grad():
top1, top5, n = 0., 0., 0.
for batch in tqdm(dataloader, unit_scale=args.batch_size):
images = batch[0]
target = batch[1]
ids = batch[2]
chunk_nb = batch[3]
split_nb = batch[4]
images = images.to(device=args.device, dtype=input_dtype)
target = target.to(args.device)
with autocast():
# predict
output = model(image=images)
image_features = output['image_features'] if isinstance(output, dict) else output[0]
logits = 100. * image_features @ classifier
output = logits
# print(output.shape)
for i in range(output.size(0)):
string = "{} {} {} {} {}\n".format(
ids[i], str(output.data[i].cpu().numpy().tolist()),
str(int(target[i].cpu().numpy())),
str(int(chunk_nb[i].cpu().numpy())),
str(int(split_nb[i].cpu().numpy())))
final_result.append(string)
# measure accuracy
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
top1 += acc1
top5 += acc5
n += images.size(0)
top1 = (top1 / n)
top5 = (top5 / n)
if not os.path.exists(file):
os.mknod(file)
with open(file, 'w') as f:
f.write("{}, {}\n".format(top1, top5))
for line in final_result:
f.write(line)
return top1, top5
def zero_shot_eval(model, dataloader, epoch, args):
if args.zeroshot_frequency == 0:
return {}
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
return {}
if args.distributed and not args.horovod:
model = model.module
if is_master(args):
logging.info(f'Starting zero-shot {args.val_v_cls_data[0].upper()}')
logging.info('Building zero-shot classifier')
autocast = get_autocast(args.precision)
with autocast():
tokenizer = get_tokenizer(HF_HUB_PREFIX+args.model, cache_dir=args.cache_dir)
classifier = build_zero_shot_classifier(
model,
tokenizer=tokenizer,
classnames=CLASSNAMES[args.val_v_cls_data[0]],
templates=OPENAI_IMAGENET_TEMPLATES,
num_classes_per_batch=10,
device=args.device,
use_tqdm=True,
)
if is_master(args):
logging.info('Using classifier')
# results = {}
run(model, classifier, dataloader, args)
# results['kinetics400-zeroshot-val-top1'] = top1
# results['kinetics400-zeroshot-val-top5'] = top5
if is_master(args):
logging.info(f'Finished zero-shot {args.val_v_cls_data[0].upper()}.')
# return results
| [] |
2024-01-10 | davila7/file-gpt | embeddings.py | """Wrapper around OpenAI embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from tenacity import (
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from openai.error import Timeout, APIError, APIConnectionError, RateLimitError
class OpenAIEmbeddings(BaseModel, Embeddings):
"""Wrapper around OpenAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
"""
client: Any #: :meta private:
document_model_name: str = "text-embedding-ada-002"
query_model_name: str = "text-embedding-ada-002"
openai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
# TODO: deprecate this
@root_validator(pre=True, allow_reuse=True)
def get_model_names(cls, values: Dict) -> Dict:
"""Get model names from just old model name."""
if "model_name" in values:
if "document_model_name" in values:
raise ValueError(
"Both `model_name` and `document_model_name` were provided, "
"but only one should be."
)
if "query_model_name" in values:
raise ValueError(
"Both `model_name` and `query_model_name` were provided, "
"but only one should be."
)
model_name = values.pop("model_name")
values["document_model_name"] = f"text-search-{model_name}-doc-001"
values["query_model_name"] = f"text-search-{model_name}-query-001"
return values
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
values["client"] = openai.Embedding
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
return values
@retry(
reraise=True,
stop=stop_after_attempt(100),
wait=wait_exponential(multiplier=1, min=10, max=60),
retry=(
retry_if_exception_type(Timeout)
| retry_if_exception_type(APIError)
| retry_if_exception_type(APIConnectionError)
| retry_if_exception_type(RateLimitError)
),
)
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint with exponential backoff."""
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return self.client.create(input=[text], engine=engine)["data"][0]["embedding"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
responses = [
self._embedding_func(text, engine=self.document_model_name)
for text in texts
]
return responses
def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self._embedding_func(text, engine=self.query_model_name)
return embedding | [] |
2024-01-10 | enoreyes/langchain | tests~integration_tests~embeddings~test_huggingface.py | """Test huggingface embeddings."""
import unittest
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
@unittest.skip("This test causes a segfault.")
def test_huggingface_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
@unittest.skip("This test causes a segfault.")
def test_huggingface_embedding_query() -> None:
"""Test huggingface embeddings."""
document = "foo bar"
embedding = HuggingFaceEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
def test_huggingface_instructor_embedding_documents() -> None:
"""Test huggingface embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceEmbeddings(model_name="hkunlp/instructor-large")
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
| [] |
2024-01-10 | Wilson-ZheLin/GPT-4-Web-Browsing | src~llm_answer.py | import time
import os
import yaml
from fetch_web_content import WebContentFetcher
from retrieval import EmbeddingRetriever
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
class GPTAnswer:
TOP_K = 10 # Top K documents to retrieve
def __init__(self):
# Load configuration from a YAML file
config_path = os.path.join(os.path.dirname(__file__), 'config', 'config.yaml')
with open(config_path, 'r') as file:
self.config = yaml.safe_load(file)
self.model_name = self.config["model_name"]
self.api_key = self.config["openai_api_key"]
def _format_reference(self, relevant_docs_list, link_list):
# Format the references from the retrieved documents for use in the prompt
reference_url_list = [(relevant_docs_list[i].metadata)['url'] for i in range(self.TOP_K)]
reference_content_list = [relevant_docs_list[i].page_content for i in range(self.TOP_K)]
reference_index_list = [link_list.index(link)+1 for link in reference_url_list]
rearranged_index_list = self._rearrange_index(reference_index_list)
# Create a formatted string of references
formatted_reference = "\n"
for i in range(self.TOP_K):
formatted_reference += ('Webpage[' + str(rearranged_index_list[i]) + '], url: ' + reference_url_list[i] + ':\n' + reference_content_list[i] + '\n\n\n')
return formatted_reference
def _rearrange_index(self, original_index_list):
# Rearrange indices to ensure they are unique and sequential
index_dict = {}
rearranged_index_list = []
for index in original_index_list:
if index not in index_dict:
index_dict.update({index: len(index_dict)+1})
rearranged_index_list.append(len(index_dict))
else:
rearranged_index_list.append(index_dict[index])
return rearranged_index_list
def get_answer(self, query, relevant_docs, language, output_format, profile):
# Create an instance of ChatOpenAI and generate an answer
llm = ChatOpenAI(model_name=self.model_name, openai_api_key=self.api_key, temperature=0.0, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])
template = self.config["template"]
prompt_template = PromptTemplate(
input_variables=["profile", "context_str", "language", "query", "format"],
template=template
)
profile = "conscientious researcher" if not profile else profile
summary_prompt = prompt_template.format(context_str=relevant_docs, language=language, query=query, format=output_format, profile=profile)
print("\n\nThe message sent to LLM:\n", summary_prompt)
print("\n\n", "="*30, "GPT's Answer: ", "="*30, "\n")
gpt_answer = llm([HumanMessage(content=summary_prompt)])
return gpt_answer
# Example usage
if __name__ == "__main__":
content_processor = GPTAnswer()
query = "What happened to Silicon Valley Bank"
output_format = "" # User can specify output format
profile = "" # User can define the role for LLM
# Fetch web content based on the query
web_contents_fetcher = WebContentFetcher(query)
web_contents, serper_response = web_contents_fetcher.fetch()
# Retrieve relevant documents using embeddings
retriever = EmbeddingRetriever()
relevant_docs_list = retriever.retrieve_embeddings(web_contents, serper_response['links'], query)
formatted_relevant_docs = content_processor._format_reference(relevant_docs_list, serper_response['links'])
print(formatted_relevant_docs)
# Measure the time taken to get an answer from the GPT model
start = time.time()
# Generate answer from ChatOpenAI
ai_message_obj = content_processor.get_answer(query, formatted_relevant_docs, serper_response['language'], output_format, profile)
answer = ai_message_obj.content + '\n'
end = time.time()
print("\n\nGPT Answer time:", end - start, "s") | [
"context_str",
"language"
] |
2024-01-10 | sushiswap/sushi-operations | analytics~nomi~text-embeddings~src~utils~token_embeddings.py |
import openai
import pandas as pd
import numpy as np
import os
from utils.config import OPEN_API_KEY
EMBEDDING_MODEL = "text-embedding-ada-002"
openai.api_key = OPEN_API_KEY
def get_embedding(text, model=EMBEDDING_MODEL):
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
def compute_doc_embeddings(df: pd.DataFrame) -> dict[(str, str), np.array]:
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_by_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
| [] |
2024-01-10 | dylanneve1/gpt-telegram | test~test_Faucet.py | from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def gptsearch(result, llm):
response = llm([HumanMessage(content=result)])
response = response.content
return response
print(gptsearch("鲁迅和周树人为什么打架", chainllm)) | [] |
2024-01-10 | mehrdadzakershahrak/MindForge-Framework | src~LanguageModelService.py | import os
from dotenv import load_dotenv
from openai import OpenAI
# Load environment variables
load_dotenv()
class LanguageModelService:
def __init__(self, provider='openai'):
self.provider = provider
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not self.openai_api_key:
raise ValueError("OpenAI API key not found.")
self.openai_client = OpenAI() if self.provider == 'openai' else None
def query_language_model(self, prompt, max_tokens=1000):
if self.provider == 'openai':
return self.query_openai(prompt, max_tokens)
elif self.provider == 'huggingface':
return self.query_huggingface(prompt, max_tokens)
else:
raise ValueError("Unsupported language model provider.")
def query_openai(self, prompt, max_tokens):
response = self.openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
temperature=0.75
)
if response.choices:
last_message = response.choices[-1].message
return last_message.content if last_message else ''
return ''
def query_huggingface(self, prompt, max_tokens):
# Implementation for Hugging Face API call
# Placeholder: return a formatted string for now
return f"Hugging Face response to: {prompt}" | [] |
2024-01-10 | fredrikskatland/loplabbet_scraping | loplabbet~update_index.py | import requests
from bs4 import BeautifulSoup
from langchain.indexes import SQLRecordManager
from langchain.indexes import index as LangchainIndex
from langchain.schema import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
import os
import pinecone
def get_windows_env(var_name):
try:
result = os.popen(f"powershell.exe -c 'echo $env:{var_name}'").read().strip()
return result
except Exception as e:
print(f"An error occurred: {e}")
return None
pinecone.init(
api_key=get_windows_env("PINECONE_API_FINN"), # find at app.pinecone.io
environment = get_windows_env("PINECONE_ENV_FINN") # next to api key in console
)
embeddings = OpenAIEmbeddings(openai_api_key=get_windows_env("OPENAI_API_KEY"))
collection_name = "loplabbet-produkter"
index_name = "loplabbet-produkter"
pinecone_index = pinecone.Index(index_name)
#index_stats_response = pinecone_index.describe_index_stats()
#index_name = "loplabbet-produkter"
vectorstore = Pinecone.from_existing_index(index_name = index_name, embedding=embeddings)
# Setting up a record manager
namespace = f"pinecone/{collection_name}"
record_manager = SQLRecordManager(
namespace, db_url="sqlite:///record_manager_cache.sql"
)
record_manager.create_schema()
def extract_text_from(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
try:
# Headline
headline = soup.select_one('html > body > div:nth-of-type(1) > main > div > section:nth-of-type(2) > div > div:nth-of-type(2) > div:nth-of-type(1) > div > h1').text
# Headline description
headline_description = soup.select_one('html > body > div:nth-of-type(1) > main > div > section:nth-of-type(2) > div > div:nth-of-type(2) > div:nth-of-type(1) > div > div:nth-of-type(3) > p').text
# Beskrivelse
beskrivelse = soup.select_one('html > body > div:nth-of-type(1) > main > div > section:nth-of-type(2) > div > div:nth-of-type(1) > div > div:nth-of-type(2) > div > div:nth-of-type(2) > div')
if beskrivelse is not None:
beskrivelse_tekst = '\n'.join(child.get_text().strip() for child in beskrivelse.children if child.name)
else:
beskrivelse_tekst = ''
# Ratings
rating_sections = soup.select('.ll-product-review__rating')
ratings_dict = {}
for section in rating_sections:
# Extract the rating category name
category_name = section.select_one('.ll-product-rating__label').get_text().strip()
# Extract the star rating
star_container = section.select_one('.ll-product-rating__stars')
#star_rating = str(int(star_container['class'][-1].split('-')[-1])) + " av 6"
star_rating = int(star_container['class'][-1].split('-')[-1])
ratings_dict[category_name] = star_rating
if len(ratings_dict) == 0:
ratings_dict = {'Stabilitet':'', 'Støtdemping':'', 'Løpsfølelse':''}
# Details
detail_sections = soup.select('.ll-product-detail--container')
details_dict = {}
for section in detail_sections:
# Extract the detail description
detail_description_raw = section.select_one('div > div:not(.ll-product-detail--bold)').get_text().strip()
# Split the description at the non-breaking space and take the first portion
detail_description = detail_description_raw.split('\xa0')[0]
# Extract the detail value and remove any non-numeric characters like spaces
detail_value_raw = section.select_one('.ll-product-detail--bold').get_text().strip()
# Check if the value is numeric (like '8') or a string (like '30/22')
if '/' in detail_value_raw:
detail_value = detail_value_raw
else:
detail_value = int(detail_value_raw)
details_dict[detail_description] = detail_value
output = {
'headline': headline,
'headline_description': headline_description,
'beskrivelse_tekst': beskrivelse_tekst,
'url': url,
'ratings': ratings_dict,
'details': details_dict,
}
except:
output = {
'headline': 'Parse error',
'headline_description': 'Parse error',
'beskrivelse_tekst': 'Parse error',
'url': url,
'ratings': {'Stabilitet':'', 'Støtdemping':'', 'Løpsfølelse':''},
'details': {},
}
return output
def format_to_markdown(data):
details_with_linebreaks = data['beskrivelse_tekst'].replace('\n', ' \n') # Ensuring line breaks are respected in markdown
markdown_text = f"## {data['headline']}\n"
markdown_text += f"\n**Beskrivelse:** {data['headline_description']}\n"
markdown_text += f"\n### Detaljer:\n"
markdown_text += f"\n{details_with_linebreaks}\n"
for key, value in data['details'].items():
markdown_text += f"\n- **{key}**: {value}"
markdown_text += f"\n### Vurderinger:\n"
for key, value in data['ratings'].items():
markdown_text += f"\n- **{key}**: {value}"
markdown_text += f"\n[Kilde]({data['url']})"
return markdown_text
def update_index(df, indexing_type="incremental"):
product_urls = df['links'].tolist()
sex = df['gender'].tolist()
categories = df['category'].tolist()
ranks = range(1, len(product_urls)+1)
# Extract texts
texts = []
numberOfUrls = len(product_urls)
counter = 0
# Loop through dataframe
for index, row in df.iterrows():
texts.append(extract_text_from(row['links']))
counter += 1
print(f"Extracted text from {row['links']}. {counter} of {numberOfUrls} done.")
# Format to markdown
markdown_texts = [format_to_markdown(text) for text in texts]
# Set url and headline as metadata
metadatas = [{'url': text['url']
, 'produktnavn': text['headline']
, 'ratings_Stabilitet': text['ratings'].get('Stabilitet', '')
, 'ratings_Demping': text['ratings'].get('Støtdemping', '')
, 'ratings_Løpsfølelse': text['ratings'].get('Løpsfølelse', '')
#, 'details': text['details']
, 'sex': sex
, 'category': category
, 'rank': rank} for text, sex, category, rank in zip(texts, sex, categories, ranks)]
documents = [Document(page_content=string, metadata=meta) for string, meta in zip(markdown_texts, metadatas)]
print(LangchainIndex(
documents,
record_manager,
vectorstore,
cleanup=indexing_type,
source_id_key="url"
))
| [] |
2024-01-10 | carlodandan/chatgpt-telegram-bot | gpt_bot.py | # This script were written using python.
# Using python-telegram-bot (v20.5) and openai api (chat-completion, gpt-3.5-turbo) libraries.
# Import necessary libraries and modules.
import openai
import os
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar, Union
from telegram import Update
from telegram._utils.defaultvalue import DEFAULT_TRUE
from telegram._utils.types import DVType
from telegram.ext import filters as filters_module
from telegram.ext._basehandler import BaseHandler
from telegram.ext._utils.types import CCT, HandlerCallback
from telegram.ext import ApplicationBuilder, ContextTypes
# Fetch OPENAI_API_KEY and TELEGRAM_BOT_TOKEN from environment variables
openai.api_key = os.getenv('OPENAI_API_KEY')
telegram_bot_token = os.getenv('TELEGRAM_BOT_TOKEN')
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(name)s - %(levelname)s - %(message)s'
)
if TYPE_CHECKING:
from telegram.ext import Application
RT = TypeVar("RT")
class MessageHandler(BaseHandler[Update, CCT]):
__slots__ = ("filters",)
def __init__(
self,
filters: filters_module.BaseFilter,
callback: HandlerCallback[Update, CCT, RT],
block: DVType[bool] = DEFAULT_TRUE,
):
super().__init__(callback, block=block)
self.filters: filters_module.BaseFilter = (
filters if filters is not None else filters_module.ALL
)
def check_update(self, update: object) -> Optional[Union[bool, Dict[str, List[Any]]]]:
if isinstance(update, Update):
return self.filters.check_update(update) or False
return None
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_input = update.message.text
# Make "typing..." status visible under bots name
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action='typing')
# Use OpenAI API to generate a response
response = openai.Completion.create(
engine="gpt-3.5-turbo",
prompt=f"{user_input}\n",
max_tokens="4,097"
)
# Send the response back to the user
await context.bot.send_message(chat_id=update.effective_chat.id, text=response["choices"][0]["text"])
await context.bot.send_message(chat_id=update.effective_chat.id, text="Any more questions?")
if __name__ == "__main__":
# Create the Telegram bot instance
application = ApplicationBuilder().token(telegram_bot_token).build()
# Register the message handler
handler = MessageHandler(filters=None, callback=handle_message)
application.add_handler(handler)
# Start the bot
application.run_polling()
| [
"PLACEHOLDER\n"
] |
2024-01-10 | AdamKAstro/babyagi | babyagi.py | #!/usr/bin/env python3
import os
import openai
import pinecone
import time
import sys
from collections import deque
from typing import Dict, List
from dotenv import load_dotenv
import os
#Set Variables
load_dotenv()
# Set API Keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
# Use GPT-3 model
USE_GPT4 = False
if USE_GPT4:
print("\033[91m\033[1m"+"\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"+"\033[0m\033[0m")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
assert PINECONE_API_KEY, "PINECONE_API_KEY environment variable is missing from .env"
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "us-east1-gcp")
assert PINECONE_ENVIRONMENT, "PINECONE_ENVIRONMENT environment variable is missing from .env"
# Table config
YOUR_TABLE_NAME = os.getenv("TABLE_NAME", "")
assert YOUR_TABLE_NAME, "TABLE_NAME environment variable is missing from .env"
# Project config
OBJECTIVE = sys.argv[1] if len(sys.argv) > 1 else os.getenv("OBJECTIVE", "")
assert OBJECTIVE, "OBJECTIVE environment variable is missing from .env"
YOUR_FIRST_TASK = os.getenv("FIRST_TASK", "")
assert YOUR_FIRST_TASK, "FIRST_TASK environment variable is missing from .env"
#Print OBJECTIVE
print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
print(OBJECTIVE)
# Configure OpenAI and Pinecone
openai.api_key = OPENAI_API_KEY
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
# Create Pinecone index
table_name = YOUR_TABLE_NAME
dimension = 1536
metric = "cosine"
pod_type = "p1"
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
# Connect to the index
index = pinecone.Index(table_name)
# Task list
task_list = deque([])
def add_task(task: Dict):
task_list.append(task)
def get_ada_embedding(text):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
def openai_call(prompt: str, use_gpt4: bool = False, temperature: float = 0.5, max_tokens: int = 100):
if not use_gpt4:
#Call GPT-3 DaVinci model
response = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].text.strip()
else:
#Call GPT-4 chat model
messages=[{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model="gpt-4",
messages = messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
def task_creation_agent(objective: str, result: Dict, task_description: str, task_list: List[str], gpt_version: str = 'gpt-3'):
prompt = f"You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {', '.join(task_list)}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array."
response = openai_call(prompt, USE_GPT4)
new_tasks = response.split('\n')
return [{"task_name": task_name} for task_name in new_tasks]
def prioritization_agent(this_task_id:int, gpt_version: str = 'gpt-3'):
global task_list
task_names = [t["task_name"] for t in task_list]
next_task_id = int(this_task_id)+1
prompt = f"""You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team:{OBJECTIVE}. Do not remove any tasks. Return the result as a numbered list, like:
#. First task
#. Second task
Start the task list with number {next_task_id}."""
response = openai_call(prompt, USE_GPT4)
new_tasks = response.split('\n')
task_list = deque()
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
task_list.append({"task_id": task_id, "task_name": task_name})
def execution_agent(objective:str,task: str, gpt_version: str = 'gpt-3') -> str:
#context = context_agent(index="quickstart", query="my_search_query", n=5)
context=context_agent(index=YOUR_TABLE_NAME, query=objective, n=5)
#print("\n*******RELEVANT CONTEXT******\n")
#print(context)
prompt =f"You are an AI who performs one task based on the following objective: {objective}.\nTake into account these previously completed tasks: {context}\nYour task: {task}\nResponse:"
return openai_call(prompt, USE_GPT4, 0.7, 2000)
def context_agent(query: str, index: str, n: int):
query_embedding = get_ada_embedding(query)
index = pinecone.Index(index_name=index)
results = index.query(query_embedding, top_k=n,
include_metadata=True)
#print("***** RESULTS *****")
#print(results)
sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
return [(str(item.metadata['task'])) for item in sorted_results]
# Add the first task
first_task = {
"task_id": 1,
"task_name": YOUR_FIRST_TASK
}
add_task(first_task)
# Main loop
task_id_counter = 1
while True:
if task_list:
# Print the task list
print("\033[95m\033[1m"+"\n*****TASK LIST*****\n"+"\033[0m\033[0m")
for t in task_list:
print(str(t['task_id'])+": "+t['task_name'])
# Step 1: Pull the first task
task = task_list.popleft()
print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
print(str(task['task_id'])+": "+task['task_name'])
# Send to execution function to complete the task based on the context
result = execution_agent(OBJECTIVE,task["task_name"])
this_task_id = int(task["task_id"])
print("\033[93m\033[1m"+"\n*****TASK RESULT*****\n"+"\033[0m\033[0m")
print(result)
# Step 2: Enrich result and store in Pinecone
enriched_result = {'data': result} # This is where you should enrich the result if needed
result_id = f"result_{task['task_id']}"
vector = enriched_result['data'] # extract the actual result from the dictionary
index.upsert([(result_id, get_ada_embedding(vector),{"task":task['task_name'],"result":result})])
# Step 3: Create new tasks and reprioritize task list
new_tasks = task_creation_agent(OBJECTIVE,enriched_result, task["task_name"], [t["task_name"] for t in task_list])
for new_task in new_tasks:
task_id_counter += 1
new_task.update({"task_id": task_id_counter})
add_task(new_task)
prioritization_agent(this_task_id)
time.sleep(1) # Sleep before checking the task list again
| [
", ",
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: PLACEHOLDER. Consider the ultimate objective of your team:PLACEHOLDER. Do not remove any tasks. Return the result as a numbered list, like:\n #. First task\n #. Second task\n Start the task list with number PLACEHOLDER.",
"You are an AI who performs one task based on the following objective: PLACEHOLDER.\nTake into account these previously completed tasks: PLACEHOLDER\nYour task: PLACEHOLDER\nResponse:"
] |
2024-01-10 | uetuluk/code-interpreter-lite | assistant_trials~assistantkobold.py | import gradio as gr
from dotenv import load_dotenv
import os
import requests
import io
import re
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import TextGen
from langchain.chat_models import ChatAnthropic
from langchain.agents import ZeroShotAgent, initialize_agent, AgentType, AgentExecutor
from langchain.tools import StructuredTool
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, LLMResult, AgentFinish, OutputParserException
from langchain.prompts import MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
# from langchain.experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner
from langchain.tools.python.tool import PythonREPLTool
from langchain.agents.agent_toolkits import create_python_agent
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from typing import Any, Dict, List
from langchain.llms.human import HumanInputLLM
def setup_assistant():
# class CustomLLM(LLM):
# n: int
# @property
# def _llm_type(self) -> str:
# return "custom"
# def _call(
# self,
# prompt: str,
# stop: Optional[List[str]] = None,
# run_manager: Optional[CallbackManagerForLLMRun] = None,
# ) -> str:
# if stop is not None:
# raise ValueError("stop kwargs are not permitted.")
# return prompt[: self.n]
# @property
# def _identifying_params(self) -> Mapping[str, Any]:
# """Get the identifying parameters."""
# return {"n": self.n}
# llm = HumanInputLLM(
# prompt_func=lambda prompt: print(
# f"\n===PROMPT====\n{prompt}\n=====END OF PROMPT======"
# )
# )
# tools = [PythonREPLTool()]
# memory = ConversationBufferMemory(
# memory_key="chat_history")
# agent_executor_instance = create_python_agent(
# llm=custom_llm,
# tool=PythonREPLTool(),
# verbose=True,
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# )
# agent_executor_instance = initialize_agent(
# tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
from langchain.llms import KoboldApiLLM
from langchain.agents import XMLAgent, tool, AgentExecutor
@tool
def search(query: str) -> str:
"""Search things about current events."""
return "32 degrees"
llm = KoboldApiLLM(endpoint="http://localhost:5001", max_length=80)
chain = LLMChain(
llm=llm,
prompt=XMLAgent.get_default_prompt(),
output_parser=XMLAgent.get_default_output_parser()
)
agent = XMLAgent(tools=[search], llm_chain=chain)
agent_executor_instance = AgentExecutor(
agent=agent, tools=[search], verbose=True)
return agent_executor_instance
agent_executor = setup_assistant()
with gr.Blocks() as demo:
with gr.Column() as chatbot_column:
chatbot = gr.Chatbot()
with gr.Row() as chatbot_input:
msg = gr.Textbox(placeholder="Type your message here", lines=8)
send = gr.Button(value="Send", variant="primary")
def chatbot_handle(chatbot_instance, msg_instance):
class ChatbotHandler(BaseCallbackHandler):
def __init__(self):
self.chatbot_response = ""
super().__init__()
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
self.chatbot_response += outputs.get("output", "") + '\n'
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
self.chatbot_response += f'```\n{output}\n```\n'
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
chatbot_thought = action.log.split("\n")[0]
chatbot_thought = chatbot_thought.replace("Thought: ", "")
if isinstance(action.tool_input, str):
chatbot_tool_input_code_string = action.tool_input
else:
chatbot_tool_input_code_string = action.tool_input.get(
"code")
self.chatbot_response += f"{chatbot_thought}\n"
self.chatbot_response += f'```\n{chatbot_tool_input_code_string}\n```\n'
def get_chatbot_response(self):
return self.chatbot_response
try:
chatbotHandler = ChatbotHandler()
agent_executor(
msg_instance, callbacks=[chatbotHandler])
chatbot_response = chatbotHandler.get_chatbot_response()
except OutputParserException as e:
raise gr.Error(
"Assistant could not handle the request. Error: " + str(e))
chatbot_instance.append((msg_instance, chatbot_response))
return {
chatbot: chatbot_instance,
msg: "",
}
send.click(chatbot_handle, [chatbot, msg], [
chatbot, msg])
if __name__ == "__main__":
demo.launch(server_port=7860)
| [
"Search things about current events."
] |
2024-01-10 | uetuluk/code-interpreter-lite | assistant_trials~assistant.py | from markdown import Markdown
import gradio as gr
from dotenv import load_dotenv
import os
import requests
import io
import re
import base64
import langchain
from langchain import PromptTemplate, LLMChain
from langchain.llms import TextGen
from typing import Any, Dict, List, Optional, Iterator, Tuple
import json
from langchain.schema.output import GenerationChunk
from langchain.callbacks.manager import CallbackManagerForLLMRun
import websocket
from langchain.tools import StructuredTool
from langchain.agents import ZeroShotAgent
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.base import BaseCallbackHandler
from langchain.agents import AgentExecutor
from langchain.schema import AgentAction, LLMResult, AgentFinish, OutputParserException
from threading import Thread
from queue import Queue, Empty
SUPERVISOR_API = "http://localhost:3000"
MODEL_URL = "wss://api.openchat.ritsdev.top"
def setup_assistant():
class CustomTextGen(TextGen):
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
combined_stop = []
if self.stopping_strings and stop is not None:
# combine
combined_stop = self.stopping_strings + stop
# raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
params["stop"] = combined_stop or self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
print(prompt + combined_text_output)
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
print(prompt + result)
else:
print(f"ERROR: response: {response}")
result = ""
return result
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
token_response = requests.post(
f'{SUPERVISOR_API}/token', data={'username': "a", 'password': "a"}, timeout=600)
token_text = token_response.json().get('access_token')
token_instance = token_text
container_response = requests.post(
f'{SUPERVISOR_API}/container', headers={
'Authorization': f'Bearer {token_text}'
}, timeout=600
)
def code_interpreter_lite(code: str) -> str:
"""Execute the python code and return the result."""
# handle markdown
def extract_code_from_markdown(md_text):
# Using regex to extract text between ```
pattern = r"```[\w]*\n(.*?)```"
match = re.search(pattern, md_text, re.DOTALL)
if match:
return match.group(1).strip()
else:
# might not be markdown
return md_text
code = extract_code_from_markdown(code)
code_response = requests.post(
f'{SUPERVISOR_API}/run', headers={
'Authorization': f'Bearer {token_instance}'
}, json={
'code_string': code
}, timeout=600)
if code_response.status_code != 200:
raise Exception("No container created yet", code_response.text)
# return {
# chatbot: chatbot_instance,
# msg: msg_instance,
# }
result = code_response.json()
def is_base64(string):
try:
# Try to decode the string as base64
base64.b64decode(string, validate=True)
return True
except:
return False
# handle base64 results - ie images
print("Result from tool:", result)
if len(result) > 1024:
result = "The result is too long to display."
return result
tool = StructuredTool.from_function(
func=code_interpreter_lite, name="CIL", description="useful for running python code. The input should be a string of python code.")
tools = [tool]
prefix = """<|im_start|>system
You are an assistant to a user who is trying to solve a question. You can write and execute Python code to find the solution.
You should only use the name of the tool to call it.
If you need to output any kind of graph to the user, you should save it in a file and return the file location.
You have access to the following tools:"""
suffix = """Begin! Remember to use the tools with the correct format which is:
Action: CIL
Action Input: ```python
your code
```<|im_end|>
<|im_start|>user
Question: {input}<|im_end|>
<|im_start|>assistant
Thought: {agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools=[tool], prefix=prefix, suffix=suffix, input_variables=[
"input", "agent_scratchpad"]
)
model_url = MODEL_URL
llm = CustomTextGen(model_url=model_url, temperature=0.1, max_new_tokens=1024, streaming=True, callbacks=[
StreamingStdOutCallbackHandler()], stopping_strings=["<|im_end|>", "<|im_sep|>", "Observation:"])
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
agent_executor_instance = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, return_intermediate_steps=True, max_iterations=2
)
return agent_executor_instance
agent_executor = setup_assistant()
# chatbot style
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
def insert_newline_before_triple_backtick(text):
modified_text = text.replace(" ```", "\n```")
return modified_text
def insert_summary_block(text):
pattern = r"(Action: CIL)(.*?)(Observation:|$)"
replacement = r"<details><summary>CIL Code</summary>\n\1\2\n</details>\n\3"
return re.sub(pattern, replacement, text, flags=re.DOTALL)
def postprocess(
self, history: List[Tuple[str | None, str | None]]
) -> List[Tuple[str | None, str | None]]:
markdown_converter = Markdown(
extensions=["nl2br", "fenced_code"])
if history is None or history == []:
return []
print(history)
formatted_history = []
for conversation in history:
user, bot = conversation
if user == None or user.endswith(ALREADY_CONVERTED_MARK):
formatted_user = user
else:
formatted_user = markdown_converter.convert(
user) + ALREADY_CONVERTED_MARK
if bot == None or bot.endswith(ALREADY_CONVERTED_MARK):
formatted_bot = bot
else:
preformatted_bot = insert_newline_before_triple_backtick(bot)
summary_bot = insert_summary_block(preformatted_bot)
print(summary_bot)
formatted_bot = markdown_converter.convert(
summary_bot) + ALREADY_CONVERTED_MARK
formatted_history.append((formatted_user, formatted_bot))
return formatted_history
gr.Chatbot.postprocess = postprocess
with gr.Blocks() as demo:
with gr.Column() as chatbot_column:
chatbot = gr.Chatbot()
with gr.Row() as chatbot_input:
with gr.Column():
msg = gr.Textbox(placeholder="Type your message here")
with gr.Column():
send = gr.Button(value="Send", variant="primary")
regenerate = gr.Button(
value="Regenerate", variant="secondary", interactive=False)
def message_handle(chatbot_instance, msg_instance):
return {
chatbot: chatbot_instance + [[msg_instance, None]],
msg: "",
regenerate: gr.update(interactive=True),
}
def regenerate_message_handle(chatbot_instance):
previous_message = chatbot_instance[-1][0]
chatbot_instance[-1] = [previous_message, None]
return {
chatbot: chatbot_instance,
}
def chatbot_handle(chatbot_instance):
# class ChatbotHandler(BaseCallbackHandler):
# def __init__(self):
# self.chatbot_response = ""
# super().__init__()
# def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
# self.chatbot_response += outputs.get("output", "") + '\n'
# def on_tool_end(self, output: str, **kwargs: Any) -> Any:
# self.chatbot_response += f'```\n{output}\n```\n'
# def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
# chatbot_thought = action.log.split("\n")[0]
# chatbot_thought = chatbot_thought.replace("Thought: ", "")
# if isinstance(action.tool_input, str):
# chatbot_tool_input_code_string = action.tool_input
# else:
# chatbot_tool_input_code_string = action.tool_input.get(
# "code")
# self.chatbot_response += f"{chatbot_thought}\n"
# self.chatbot_response += f'```\n{chatbot_tool_input_code_string}\n```\n'
# def get_chatbot_response(self):
# return self.chatbot_response
class QueueCallback(BaseCallbackHandler):
"""Callback handler for streaming LLM responses to a queue."""
def __init__(self, queue):
self.queue = queue
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.queue.put(token)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
self.queue.put(f'Observation: \n```\n{output}\n```\n')
def on_llm_end(self, *args, **kwargs: Any) -> None:
return self.queue.empty()
streaming_queue = Queue()
job_done = object()
user_message = chatbot_instance[-1][0]
def task():
try:
agent_executor(
user_message, callbacks=[QueueCallback(streaming_queue)])
streaming_queue.put(job_done)
except OutputParserException as error:
streaming_queue.put(job_done)
raise gr.Error(
"Assistant could not handle the request. Error: " + str(error))
streaming_thread = Thread(target=task)
streaming_thread.start()
chatbot_instance[-1][1] = ""
while True:
try:
next_token = streaming_queue.get(True, timeout=1)
if next_token is job_done:
break
chatbot_instance[-1][1] += next_token
yield chatbot_instance
except Empty:
continue
send.click(message_handle, [chatbot, msg], [
chatbot, msg, regenerate]).then(
chatbot_handle, [chatbot], [chatbot]
)
regenerate.click(regenerate_message_handle, [chatbot], [
chatbot]).then(
chatbot_handle, [chatbot], [chatbot]
)
demo.queue()
demo.launch(server_port=7861)
| [
"input",
"agent_scratchpad"
] |
2024-01-10 | uetuluk/code-interpreter-lite | assistant_trials~assistant_koboldcpp.py | from langchain.agents import XMLAgent, tool, AgentExecutor
# from langchain.chat_models import ChatAnthropic
from langchain.chains import LLMChain
from langchain.llms import KoboldApiLLM
model = KoboldApiLLM(endpoint="http://localhost:5001")
# XML
# @tool
# def search(query: str) -> str:
# """Search things about current events."""
# return "32 degrees"
# tool_list = [search]
# chain = LLMChain(
# llm=model,
# prompt=XMLAgent.get_default_prompt(),
# output_parser=XMLAgent.get_default_output_parser()
# )
# agent = XMLAgent(tools=tool_list, llm_chain=chain)
# agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)
# agent_executor.run("whats the weather in New york?")
| [] |
2024-01-10 | chrisreeves-/scripts | python~openai~ask-question.py | import os
import openai
import sys
# Set the OpenAI API key
print("You can create an API key from here: https://beta.openai.com/account/api-keys")
key = input("Please enter the OpenAI API key: ")
if key == "":
os.getenv("OPENAI_API_KEY")
else:
openai.api_key = key
# Define the model and prompt to use
model = input("""
text = ['text-davinci-002'], ['text-currie-001'], ['text-babbage-001'], ['text-ada-001']
What OpenAI model do you want to use?: """)
if model != "text-davinci-002" or "text-currie-001" or "text-babbage-001" or "text-ada-001":
model_engine = str.lower(model)
else:
print("Model input error")
sys.exit()
# Ask a question
message = input("What do you want to ask?: ")
prompt = message
# Generate a response
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
# Print the response
message = response.choices[0].text
print(message)
| [] |
2024-01-10 | developerisnow/codeinterpreter-api | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = llm.predict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def aremove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(remove_download_link(example, llm))
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
test()
| [] |
2024-01-10 | developerisnow/codeinterpreter-api | codeinterpreterapi~schema~response.py | from langchain.schema import AIMessage, HumanMessage # type: ignore
from .file import File
class UserRequest(HumanMessage):
files: list[File] = []
def __str__(self):
return self.content
def __repr__(self):
return f"UserRequest(content={self.content}, files={self.files})"
class CodeInterpreterResponse(AIMessage):
"""
Response from the code interpreter agent.
files: list of files to be sent to the user (File )
code_log: list[tuple[str, str]] = []
"""
files: list[File] = []
code_log: list[tuple[str, str]] = []
def show(self):
print("AI: ", self.content)
for file in self.files:
file.show_image()
def __str__(self):
return self.content
def __repr__(self):
return f"CodeInterpreterResponse(content={self.content}, files={self.files})"
| [] |
2024-01-10 | developerisnow/codeinterpreter-api | codeinterpreterapi~session.py | import base64
import re
import traceback
from io import BytesIO
from os import getenv
from typing import Optional
from uuid import UUID, uuid4
from codeboxapi import CodeBox # type: ignore
from codeboxapi.schema import CodeBoxOutput # type: ignore
from langchain.agents import (
AgentExecutor,
BaseSingleActionAgent,
ConversationalAgent,
ConversationalChatAgent,
)
from langchain.base_language import BaseLanguageModel
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import (
ChatMessageHistory,
PostgresChatMessageHistory,
RedisChatMessageHistory,
)
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema import BaseChatMessageHistory, SystemMessage
from langchain.tools import BaseTool, StructuredTool
from codeinterpreterapi.agents import OpenAIFunctionsAgent
from codeinterpreterapi.chains import (
aget_file_modifications,
aremove_download_link,
get_file_modifications,
remove_download_link,
)
from codeinterpreterapi.chat_history import CodeBoxChatMessageHistory
from codeinterpreterapi.config import settings
from codeinterpreterapi.parser import CodeAgentOutputParser, CodeChatAgentOutputParser
from codeinterpreterapi.prompts import code_interpreter_system_message
from codeinterpreterapi.schema import (
CodeInput,
CodeInterpreterResponse,
File,
SessionStatus,
UserRequest,
)
class CodeInterpreterSession:
def __init__(
self,
llm: Optional[BaseLanguageModel] = None,
system_message: SystemMessage = code_interpreter_system_message,
max_iterations: int = 9,
additional_tools: list[BaseTool] = [],
**kwargs,
) -> None:
self.codebox = CodeBox()
self.verbose = kwargs.get("verbose", settings.VERBOSE)
self.tools: list[BaseTool] = self._tools(additional_tools)
self.llm: BaseLanguageModel = llm or self._choose_llm(**kwargs)
self.max_iterations = max_iterations
self.system_message = system_message
self.agent_executor: Optional[AgentExecutor] = None
self.input_files: list[File] = []
self.output_files: list[File] = []
self.code_log: list[tuple[str, str]] = []
@classmethod
def from_id(cls, session_id: UUID, **kwargs) -> "CodeInterpreterSession":
session = cls(**kwargs)
session.codebox = CodeBox.from_id(session_id)
session.agent_executor = session._agent_executor()
return session
@property
def session_id(self) -> Optional[UUID]:
return self.codebox.session_id
def start(self) -> SessionStatus:
status = SessionStatus.from_codebox_status(self.codebox.start())
self.agent_executor = self._agent_executor()
return status
async def astart(self) -> SessionStatus:
status = SessionStatus.from_codebox_status(await self.codebox.astart())
self.agent_executor = self._agent_executor()
return status
def _tools(self, additional_tools: list[BaseTool]) -> list[BaseTool]:
return additional_tools + [
StructuredTool(
name="python",
description="Input a string of code to a ipython interpreter. "
"Write the entire code in a single string. This string can "
"be really long, so you can use the `;` character to split lines. "
"Variables are preserved between runs. ",
func=self._run_handler,
coroutine=self._arun_handler,
args_schema=CodeInput, # type: ignore
),
]
def _choose_llm(
self, model: str = "gpt-4", openai_api_key: Optional[str] = None, **kwargs
) -> BaseChatModel:
if "gpt" in model:
openai_api_key = (
openai_api_key
or settings.OPENAI_API_KEY
or getenv("OPENAI_API_KEY", None)
)
if openai_api_key is None:
raise ValueError(
"OpenAI API key missing. Set OPENAI_API_KEY env variable "
"or pass `openai_api_key` to session."
)
openai_api_version = getenv("OPENAI_API_VERSION")
openai_api_base = getenv("OPENAI_API_BASE")
deployment_name = getenv("DEPLOYMENT_NAME")
openapi_type = getenv("OPENAI_API_TYPE")
if (
openapi_type == "azure"
and openai_api_version
and openai_api_base
and deployment_name
):
return AzureChatOpenAI(
temperature=0.03,
openai_api_base=openai_api_base,
openai_api_version=openai_api_version,
deployment_name=deployment_name,
openai_api_key=openai_api_key,
max_retries=3,
request_timeout=60 * 3,
) # type: ignore
else:
return ChatOpenAI(
temperature=0.03,
model=model,
openai_api_key=openai_api_key,
max_retries=3,
request_timeout=60 * 3,
) # type: ignore
elif "claude" in model:
return ChatAnthropic(model=model)
else:
raise ValueError(f"Unknown model: {model} (expected gpt or claude model)")
def _choose_agent(self) -> BaseSingleActionAgent:
return (
OpenAIFunctionsAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=self.system_message,
extra_prompt_messages=[
MessagesPlaceholder(variable_name="chat_history")
],
)
if isinstance(self.llm, ChatOpenAI)
else ConversationalChatAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=code_interpreter_system_message.content,
output_parser=CodeChatAgentOutputParser(self.llm),
)
if isinstance(self.llm, BaseChatModel)
else ConversationalAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
prefix=code_interpreter_system_message.content,
output_parser=CodeAgentOutputParser(),
)
)
def _history_backend(self) -> BaseChatMessageHistory:
return (
CodeBoxChatMessageHistory(codebox=self.codebox)
if settings.HISTORY_BACKEND == "codebox"
else RedisChatMessageHistory(
session_id=str(self.session_id),
url=settings.REDIS_URL,
)
if settings.HISTORY_BACKEND == "redis"
else PostgresChatMessageHistory(
session_id=str(self.session_id),
connection_string=settings.POSTGRES_URL,
)
if settings.HISTORY_BACKEND == "postgres"
else ChatMessageHistory()
)
def _agent_executor(self) -> AgentExecutor:
return AgentExecutor.from_agent_and_tools(
agent=self._choose_agent(),
max_iterations=self.max_iterations,
tools=self.tools,
verbose=self.verbose,
memory=ConversationBufferMemory(
memory_key="chat_history",
return_messages=True,
chat_memory=self._history_backend(),
),
)
def show_code(self, code: str) -> None:
if self.verbose:
print(code)
async def ashow_code(self, code: str) -> None:
"""Callback function to show code to the user."""
if self.verbose:
print(code)
def _run_handler(self, code: str):
"""Run code in container and send the output to the user"""
self.show_code(code)
output: CodeBoxOutput = self.codebox.run(code)
self.code_log.append((code, output.content))
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'", output.content
):
self.codebox.install(package.group(1))
return (
f"{package.group(1)} was missing but "
"got installed now. Please try again."
)
else:
# TODO: pre-analyze error to optimize next code generation
pass
if self.verbose:
print("Error:", output.content)
elif modifications := get_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = self.codebox.download(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
async def _arun_handler(self, code: str):
"""Run code in container and send the output to the user"""
await self.ashow_code(code)
output: CodeBoxOutput = await self.codebox.arun(code)
self.code_log.append((code, output.content))
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'", output.content
):
await self.codebox.ainstall(package.group(1))
return (
f"{package.group(1)} was missing but "
"got installed now. Please try again."
)
else:
# TODO: pre-analyze error to optimize next code generation
pass
if self.verbose:
print("Error:", output.content)
elif modifications := await aget_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = await self.codebox.adownload(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
def _input_handler(self, request: UserRequest) -> None:
"""Callback function to handle user input."""
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
self.codebox.upload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
async def _ainput_handler(self, request: UserRequest):
# TODO: variables as context to the agent
# TODO: current files as context to the agent
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
await self.codebox.aupload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
def _output_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(r"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(r"\n\[.*\]\(.*\)", final_response):
try:
final_response = remove_download_link(final_response, self.llm)
except Exception as e:
if self.verbose:
print("Error while removing download links:", e)
output_files = self.output_files
code_log = self.code_log
self.output_files = []
self.code_log = []
return CodeInterpreterResponse(
content=final_response, files=output_files, code_log=code_log
)
async def _aoutput_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(r"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(r"\n\[.*\]\(.*\)", final_response):
try:
final_response = await aremove_download_link(final_response, self.llm)
except Exception as e:
if self.verbose:
print("Error while removing download links:", e)
output_files = self.output_files
code_log = self.code_log
self.output_files = []
self.code_log = []
return CodeInterpreterResponse(
content=final_response, files=output_files, code_log=code_log
)
def generate_response_sync(
self,
user_msg: str,
files: list[File] = [],
detailed_error: bool = False,
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
self._input_handler(user_request)
assert self.agent_executor, "Session not initialized."
response = self.agent_executor.run(input=user_request.content)
return self._output_handler(response)
except Exception as e:
if self.verbose:
traceback.print_exc()
if detailed_error:
return CodeInterpreterResponse(
content="Error in CodeInterpreterSession: "
f"{e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
async def generate_response(
self,
user_msg: str,
files: list[File] = [],
detailed_error: bool = False,
) -> CodeInterpreterResponse:
print(
"DEPRECATION WARNING: Use agenerate_response for async generation.\n"
"This function will be converted to sync in the future.\n"
"You can use generate_response_sync for now.",
)
return await self.agenerate_response(
user_msg=user_msg,
files=files,
detailed_error=detailed_error,
)
async def agenerate_response(
self,
user_msg: str,
files: list[File] = [],
detailed_error: bool = False,
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
await self._ainput_handler(user_request)
assert self.agent_executor, "Session not initialized."
response = await self.agent_executor.arun(input=user_request.content)
return await self._aoutput_handler(response)
except Exception as e:
if self.verbose:
traceback.print_exc()
if detailed_error:
return CodeInterpreterResponse(
content="Error in CodeInterpreterSession: "
f"{e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
def is_running(self) -> bool:
return self.codebox.status() == "running"
async def ais_running(self) -> bool:
return await self.codebox.astatus() == "running"
def stop(self) -> SessionStatus:
return SessionStatus.from_codebox_status(self.codebox.stop())
async def astop(self) -> SessionStatus:
return SessionStatus.from_codebox_status(await self.codebox.astop())
def __enter__(self) -> "CodeInterpreterSession":
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.stop()
async def __aenter__(self) -> "CodeInterpreterSession":
await self.astart()
return self
async def __aexit__(self, exc_type, exc_value, traceback) -> None:
await self.astop()
| [] |
2024-01-10 | developerisnow/codeinterpreter-api | codeinterpreterapi~agents~functions_agent.py | """Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from langchain.agents import BaseSingleActionAgent
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks # type: ignore
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain.pydantic_v1 import root_validator
from langchain.schema import (
AgentAction,
AgentFinish,
BasePromptTemplate,
OutputParserException,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import (
AIMessage,
BaseMessage,
FunctionMessage,
SystemMessage,
)
from langchain.tools import BaseTool
from langchain.tools.convert_to_openai import format_tool_to_openai_function
@dataclass
class _FunctionsAgentAction(AgentAction):
message_log: List[BaseMessage]
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, _FunctionsAgentAction):
return agent_action.message_log + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def _format_intermediate_steps(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> List[BaseMessage]:
"""Format intermediate steps.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for intermediate_step in intermediate_steps:
agent_action, observation = intermediate_step
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
_tool_input = json.loads(function_call["arguments"])
except JSONDecodeError:
if function_name == "python":
code = function_call["arguments"]
_tool_input = {
"code": code,
}
else:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = "responded: {content}\n" if message.content else "\n"
return _FunctionsAgentAction(
tool=function_name,
tool_input=tool_input,
log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n",
message_log=[message],
)
return AgentFinish(return_values={"output": message.content}, log=message.content)
class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""An Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
def get_allowed_tools(self) -> List[str]:
"""Get allowed tools."""
return list([t.name for t in self.tools])
@root_validator
def validate_llm(cls, values: dict) -> dict:
if not isinstance(values["llm"], ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
return values
@root_validator
def validate_prompt(cls, values: dict) -> dict:
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> List[dict]:
return [dict(format_tool_to_openai_function(t)) for t in self.tools]
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
with_functions: bool = True,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
if with_functions:
predicted_message = self.llm.predict_messages(
messages,
functions=self.functions,
callbacks=callbacks,
)
else:
predicted_message = self.llm.predict_messages(
messages,
callbacks=callbacks,
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.apredict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations."""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."}, ""
)
elif early_stopping_method == "generate":
# Generate does one final forward pass
agent_decision = self.plan(
intermediate_steps, with_functions=False, **kwargs
)
if type(agent_decision) == AgentFinish: # noqa: E721
return agent_decision
else:
raise ValueError(
f"got AgentAction with no functions provided: {agent_decision}"
)
else:
raise ValueError(
"early_stopping_method should be one of `force` or `generate`, "
f"got {early_stopping_method}"
)
@classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
) -> BasePromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
if system_message:
messages = [system_message]
else:
messages = []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
return ChatPromptTemplate(messages=messages) # type: ignore
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools."""
if not isinstance(llm, ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
return cls( # type: ignore
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager, # type: ignore
**kwargs,
)
| [
"You are a helpful AI assistant.",
"{input}"
] |
2024-01-10 | fredrikskatland/jorgen-pdh-thesis | testing_callbacks.py | import streamlit as st
from langchain.document_loaders import RecursiveUrlLoader
from langchain.document_transformers import Html2TextTransformer
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.callbacks import StreamlitCallbackHandler
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, AIMessage, HumanMessage
from langchain.prompts import MessagesPlaceholder
from langsmith import Client
import pinecone
from langchain.vectorstores import Pinecone
import os
from langchain.vectorstores import Vectara
client = Client()
st.set_page_config(
page_title="ChatLangChain",
page_icon="🦜",
layout="wide",
initial_sidebar_state="collapsed",
)
"# Chat🦜🔗"
local = False
vectara_customer_id=st.secrets["vectara_customer_id"]
vectara_corpus_id=st.secrets["vectara_corpus_id"]
vectara_api_key=st.secrets["vectara_api_key"]
@st.cache_resource(ttl="1h")
def configure_retriever():
vectorstore = Vectara(
vectara_customer_id=vectara_customer_id,
vectara_corpus_id=vectara_corpus_id,
vectara_api_key=vectara_api_key
)
retriever = vectorstore.as_retriever()
return retriever
tool = create_retriever_tool(
configure_retriever(),
"search_pdh_thesis",
"Searches and returns text from PhD thesis. This tool should be used to answer questions about the PhD thesis.",
)
tools = [tool]
if local:
llm = ChatOpenAI(temperature=0, streaming=True, model="gpt-4", )
else:
llm = ChatOpenAI(temperature=0, streaming=True, model="gpt-4", openai_api_key=st.secrets["openai_api_key"])
message = SystemMessage(
content=(
"You are a helpful chatbot who is tasked with answering questions about the contents of the PhD thesis. "
"Unless otherwise explicitly stated, it is probably fair to assume that questions are about the PhD thesis. "
"If there is any ambiguity, you probably assume they are about that."
)
)
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=message,
extra_prompt_messages=[MessagesPlaceholder(variable_name="history")],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True,
)
memory = AgentTokenBufferMemory(llm=llm)
starter_message = "Ask me the PhD thesis!"
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state["messages"] = [AIMessage(content=starter_message)]
def send_feedback(run_id, score):
client.create_feedback(run_id, "user_score", score=score)
for msg in st.session_state.messages:
if isinstance(msg, AIMessage):
st.chat_message("assistant").write(msg.content)
elif isinstance(msg, HumanMessage):
st.chat_message("user").write(msg.content)
memory.chat_memory.add_message(msg)
if prompt := st.chat_input(placeholder=starter_message):
st.chat_message("user").write(prompt)
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
response = agent_executor(
{"input": prompt, "history": st.session_state.messages},
callbacks=[st_callback],
include_run_info=True,
)
st.session_state.messages.append(AIMessage(content=response["output"]))
st.write(response["output"])
memory.save_context({"input": prompt}, response)
st.session_state["messages"] = memory.buffer
run_id = response["__run"].run_id
| [
"You are a helpful chatbot who is tasked with answering questions about the contents of the PhD thesis. Unless otherwise explicitly stated, it is probably fair to assume that questions are about the PhD thesis. If there is any ambiguity, you probably assume they are about that.",
"Ask me the PhD thesis!"
] |
2024-01-10 | shroominic/fastui-chat | examples~simple_chatbot.py | from fastui_chat import ChatUI, basic_chat_handler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ChatMessageHistory
history = ChatMessageHistory()
handler = basic_chat_handler(
llm=ChatOpenAI(),
chat_history=history,
)
history.add_ai_message("How can I help you today?")
app = ChatUI(
chat_history=history,
chat_handler=handler,
)
app.start_with_uvicorn()
| [] |
2024-01-10 | shroominic/fastui-chat | src~fastui_chat~session.py | from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import Runnable
from langchain_core.runnables.history import RunnableWithMessageHistory
class ChatSession:
def __init__(
self,
*,
chat_handler: Runnable[HumanMessage, AIMessage],
history: BaseChatMessageHistory,
) -> None:
self.history = history
self.chat_handler = chat_handler
async def astream(self, user_msg: str):
async for message in self.chat_handler.astream(
HumanMessage(content=user_msg),
config={
"run_name": "ChatMessage",
"configurable": {"session_id": ""},
},
):
yield message
def basic_chat_handler(
llm: BaseChatModel,
chat_history: BaseChatMessageHistory,
system_message: str = "",
) -> Runnable[HumanMessage, AIMessage]:
handler = (
ChatPromptTemplate.from_messages(
[
*(("system", system_message) if system_message else []),
MessagesPlaceholder(variable_name="history"),
("human", "{user_msg}"),
]
)
| llm
)
return {
"user_msg": lambda x: x.content,
} | RunnableWithMessageHistory(
handler,
lambda _: chat_history,
input_messages_key="user_msg",
history_messages_key="history",
)
| [
"human",
"{user_msg}"
] |
2024-01-10 | VoltaicGRiD/Recipe_Manager | RecipeManager.py | import re
import json
import os
import openai
import webbrowser
def get_minutes(mseconds):
return mseconds / 60000000.0
def read_recipe(file):
y = json.loads(file.read())
for x in range(4):
print('')
print(y['name'])
print(f'Serves: {y["servings"]}')
print('------')
if 'cookTime' in y:
print(f'Cooking time: {get_minutes(y["cookTime"])}')
if 'prepTime' in y:
print(f'Prep time: {get_minutes(y["prepTime"])}')
print('------')
print('Ingredients')
for ingredient in y['ingredients']:
print(f'-- {ingredient["name"]}')
print('------')
print('Steps')
index = 0
for step in y['instructions']:
index += 1
print(f'{index}) {step["text"]}')
for x in range(4):
print('')
def parse_ingredient(ingredient):
match = re.search(
r'\b(?:\d+\.?\d*|\d*\.?\d+)\s*[a-zA-Z]*\s*([a-zA-Z\- ]+)',
ingredient)
if match:
return match.group(1).strip()
return None
def get_config(key):
with open('config', 'r') as config_file:
config = config_file.read()
for line in config.split('\n'):
if line.startswith(key):
return line.split(' : ')[1]
def gather_ingredients(files):
ingredients = []
for recipe in files:
with open(recipe, 'r') as file:
y = json.loads(file.read())
for ingredient in y['ingredients']:
ingredients.append(ingredient['name'])
key = get_config('openai_api')
if len(key) > 0:
openai.api_key = get_config('openai_api')
else:
print('API key not found')
return ingredients
outingredients = ''
for ingredient in ingredients:
outingredients = outingredients + ingredient + '\n'
city = ''
city = get_config('city')
prompt = ''
if len(city) == 0:
prompt = f'''
Ingredients:
{outingredients}
Tasks:
1. Merge like-items and convert measurements.
2. Format as:
**CATEGORY**
[INGREDIENT]: [QUANTITY]
'''
else:
prompt = f'''
Ingredients:
{outingredients}
Tasks:
1. Merge like-items and convert measurements.
2. Recommend substitutes for {city} availability
a. Substitutions should take up one line per substitution suggestion
3. Format as:
**CATEGORY**
[INGREDIENT]: [QUANTITY]
'''
messages = [
{"role": "user", "content": "You are a professional grocery shopper, making the most efficient, time-saving lists in the whole world. Remain brief and highly-efficient."},
{"role": "user", "content": "Only use these categories: Produce, Canned Goods, Dairy, Meat, Deli, Seafood, Condiments & Spices, Bakery, Grains"},
{"role": "user", "content": prompt}
]
max_tokens = int(get_config('tokens'))
response = openai.ChatCompletion.create(
model='gpt-4',
messages=messages,
temperature=0.5,
max_tokens=max_tokens
)
print(response.choices[0].message.content)
return response.choices[0].message.content.split('\n')
# Open the recipe card through 'JustTheRecipe.com'
def open_card(files):
# For each of the files that the user's requested, open in read-only mode, load the JSON, find the 'sourceURL' value, and open in the browser
for recipe in files:
with open(recipe, 'r') as file:
y = json.loads(file.read())
# TODO: Noticed that, rarely, the 'sourceUrl' isn't formatted, need to create a catch for this
url = y['sourceUrl']
webbrowser.open(f'https://www.justtherecipe.com/?url={url}')
# Implementation of Python code for use in a Terminal emulator
if __name__ == '__main__':
print('')
print('Select an option: ')
print('1) Select recipes for shopping')
print('2) Output recipe to console')
print('3) Open recipe card')
choice = input()
# Empty array that contains all of our recipes in the ~/Recipes folder
allrecipes = []
# Iterate and add to array
for file in os.listdir(os.path.expanduser('~/Recipes/')):
if file.endswith('.recipe'):
allrecipes.append(file)
# The user wants to go shopping!
# Gather all the recipes the user wants from a comma-separated input, and process them with 'gather_ingredients' filter
# Also outputs a Markdown file that contains a Markdown checklist with ingredients and measurements
if int(choice) == 1:
recipes = []
print('')
for file in allrecipes:
print(f'{allrecipes.index(file) + 1}) {file}')
print('')
print('Input comma-separated index of the recipes')
# Re-retrieve our input
choice = input()
# Separate input to get our comma-separated list
for val in choice.strip().split(','):
recipe = allrecipes[int(val.strip()) - 1]
recipes.append(os.path.expanduser(f'~/Recipes/{recipe}'))
# Get our consolidated ingredients list
results = gather_ingredients(recipes)
# Open our 'Shopping.md' file in write-create mode and write our output
with open(os.path.expanduser(f'~/Recipes/Shopping.md'), 'w+') as file:
for result in results:
file.write(f'- [ ] {result}\n')
elif int(choice) == 2:
print('')
for file in allrecipes:
print(f'{allrecipes.index(file) + 1}) {file}')
print('')
print('Input the file index')
index = int(input())
recipe = allrecipes[index - 1]
file = open(os.path.expanduser(f'~/Recipes/{recipe}'))
read_recipe(file)
elif int(choice) == 3:
recipes = []
print('')
for file in allrecipes:
print(f'{allrecipes.index(file) + 1}) {file}')
print('')
print('Input comma-separated index of the recipes')
choice = input()
for val in choice.split(','):
recipe = allrecipes[int(val) - 1]
recipes.append(os.path.expanduser(f'~/Recipes/{recipe}'))
open_card(recipes)
| [
"You are a professional grocery shopper, making the most efficient, time-saving lists in the whole world. Remain brief and highly-efficient.",
"\n Ingredients:\n PLACEHOLDER\n\n Tasks:\n 1. Merge like-items and convert measurements.\n 2. Format as:\n **CATEGORY**\n [INGREDIENT]: [QUANTITY]\n ",
"\n Ingredients:\n PLACEHOLDER\n\n Tasks:\n 1. Merge like-items and convert measurements.\n 2. Recommend substitutes for PLACEHOLDER availability\n a. Substitutions should take up one line per substitution suggestion\n 3. Format as:\n **CATEGORY**\n [INGREDIENT]: [QUANTITY]\n ",
"Only use these categories: Produce, Canned Goods, Dairy, Meat, Deli, Seafood, Condiments & Spices, Bakery, Grains"
] |
2024-01-10 | PAIXAI/superagent | app~lib~agents~base.py | import json
from typing import Any
from decouple import config
from langchain import HuggingFaceHub
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import Cohere, OpenAI
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.prompts.prompt import PromptTemplate
from app.lib.callbacks import StreamingCallbackHandler
from app.lib.prisma import prisma
from app.lib.prompts import (
CustomPromptTemplate,
agent_template,
default_chat_prompt,
qa_prompt,
)
from app.lib.tools import get_search_tool, get_wolfram_alpha_tool
from app.lib.vectorstores.base import VectorStoreBase
class AgentBase:
def __init__(
self,
agent: dict,
has_streaming: bool = False,
on_llm_new_token=None,
on_llm_end=None,
on_chain_end=None,
):
self.id = agent.id
self.userId = agent.userId
self.document = agent.document
self.has_memory = agent.hasMemory
self.type = agent.type
self.llm = agent.llm
self.prompt = agent.prompt
self.tool = agent.tool
self.has_streaming = has_streaming
self.on_llm_new_token = on_llm_new_token
self.on_llm_end = on_llm_end
self.on_chain_end = on_chain_end
def _get_api_key(self) -> str:
if self.llm["provider"] == "openai-chat" or self.llm["provider"] == "openai":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("OPENAI_API_KEY")
)
if self.llm["provider"] == "anthropic":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("ANTHROPIC_API_KEY")
)
if self.llm["provider"] == "cohere":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("COHERE_API_KEY")
)
if self.llm["provider"] == "azure-openai":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("AZURE_API_KEY")
)
if self.llm["provider"] == "huggingface":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("HUGGINGFACEHUB_API_TOKEN")
)
def _get_tool(self) -> Any:
try:
if self.tool.type == "SEARCH":
tools = get_search_tool()
if self.tool.type == "WOLFRAM_ALPHA":
tools = get_wolfram_alpha_tool()
return tools
except Exception:
return None
def _get_prompt(self) -> Any:
if self.prompt:
if self.tool:
prompt = CustomPromptTemplate(
template=self.prompt.template,
tools=self._get_tool(),
input_variables=[
"human_input",
"intermediate_steps",
"chat_history",
],
)
else:
prompt = PromptTemplate(
input_variables=self.prompt.input_variables,
template=self.prompt.template,
)
return prompt
else:
if self.tool:
return CustomPromptTemplate(
template=agent_template,
tools=self._get_tool(),
input_variables=[
"human_input",
"intermediate_steps",
"chat_history",
],
)
elif self.document:
return qa_prompt
return default_chat_prompt
def _get_llm(self) -> Any:
if self.llm["provider"] == "openai-chat":
return (
ChatOpenAI(
temperature=0,
openai_api_key=self._get_api_key(),
model_name=self.llm["model"],
streaming=self.has_streaming,
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else ChatOpenAI(model_name=self.llm["model"])
)
if self.llm["provider"] == "openai":
return OpenAI(
model_name=self.llm["model"], openai_api_key=self._get_api_key()
)
if self.llm["provider"] == "anthropic":
return (
ChatAnthropic(
streaming=self.has_streaming,
anthropic_api_key=self._get_api_key(),
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else ChatAnthropic(anthropic_api_key=self._get_api_key())
)
if self.llm["provider"] == "cohere":
return (
Cohere(
cohere_api_key=self._get_api_key(),
model=self.llm["model"],
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else Cohere(cohere_api_key=self._get_api_key(), model=self.llm["model"])
)
if self.llm["provider"] == "azure-openai":
return (
AzureChatOpenAI(
openai_api_key=self._get_api_key(),
openai_api_base=config("AZURE_API_BASE"),
openai_api_type=config("AZURE_API_TYPE"),
openai_api_version=config("AZURE_API_VERSION"),
deployment_name=self.llm["model"],
streaming=self.has_streaming,
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else AzureChatOpenAI(
deployment_name=self.llm["model"],
openai_api_key=self._get_api_key(),
openai_api_base=config("AZURE_API_BASE"),
openai_api_type=config("AZURE_API_TYPE"),
openai_api_version=config("AZURE_API_VERSION"),
)
)
if self.llm["provider"] == "huggingface":
return HuggingFaceHub(
repo_id=self.llm["model"], huggingfacehub_api_token=self._get_api_key()
)
# Use ChatOpenAI as default llm in agents
return ChatOpenAI(temperature=0, openai_api_key=self._get_api_key())
def _get_memory(self) -> Any:
if self.has_memory:
memories = prisma.agentmemory.find_many(
where={"agentId": self.id},
order={"createdAt": "desc"},
take=5,
)
history = ChatMessageHistory()
[
history.add_ai_message(memory.message)
if memory.agent == "AI"
else history.add_user_message(memory.message)
for memory in memories
]
memory_key = "chat_history"
output_key = "output"
memory = (
ConversationBufferMemory(
chat_memory=history, memory_key=memory_key, output_key=output_key
)
if (self.document and self.document.type == "OPENAPI") or self.tool
else ConversationBufferMemory(
chat_memory=history, memory_key=memory_key
)
)
return memory
return None
def _get_document(self) -> Any:
if self.document.type != "OPENAPI":
embeddings = OpenAIEmbeddings()
docsearch = (
VectorStoreBase()
.get_database()
.from_existing_index(embeddings, self.document.id)
)
return docsearch
return self.document
def save_intermediate_steps(self, trace: Any) -> None:
if (self.document and self.document.type == "OPENAPI") or self.tool:
json_array = json.dumps(
{
"output": trace["output"],
"steps": [
{
"action": step[0].tool,
"input": step[0].tool_input,
"log": step[0].log,
"observation": step[1],
}
for step in trace["intermediate_steps"]
],
}
)
else:
json_array = json.dumps({"output": trace["output"], "steps": [trace]})
prisma.agenttrace.create(
{
"userId": self.userId,
"agentId": self.id,
"data": json_array,
}
)
def get_agent(self) -> Any:
pass
| [
"chat_history",
"intermediate_steps",
"human_input"
] |
2024-01-10 | PAIXAI/superagent | app~lib~splitters.py | from langchain.text_splitter import (
CharacterTextSplitter,
NLTKTextSplitter,
RecursiveCharacterTextSplitter,
SpacyTextSplitter,
TokenTextSplitter,
)
class TextSplitters:
def __init__(self, documents, text_splitter):
self.documents = documents
if text_splitter is None:
self.split_type = "character"
self.chunk_size = 256
self.chunk_overlap = 0
else:
self.split_type = text_splitter["type"]
self.chunk_size = text_splitter["chunk_size"]
self.chunk_overlap = text_splitter["chunk_overlap"]
def document_splitter(self):
if self.split_type == "character":
return self.character_splitter()
elif self.split_type == "recursive":
return self.recursive_splitter()
elif self.split_type == "token":
return self.token_splitter()
elif self.split_type == "spacy":
return self.spacy_splitter()
elif self.split_type == "nltk":
return self.nltk_splitter()
elif self.split_type == "huggingface":
return self.huggingface_splitter()
else:
return self.character_splitter()
def character_splitter(self):
"""
Splits a document into chunks of characters using the
character text splitter (default)
"""
text_splitter = CharacterTextSplitter(
chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
docs = text_splitter.split_documents(self.documents)
return docs
def recursive_splitter(self):
"""
Splits a document into chunks of characters
using the recursive character text splitter
"""
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
docs = text_splitter.split_documents(self.documents)
return docs
def token_splitter(self):
"""
Splits a document into chunks of tokens using the token text splitter
"""
text_splitter = TokenTextSplitter(
chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
docs = text_splitter.split_text(self.documents)
return docs
def spacy_splitter(self):
"""
Splits a document into chunks of tokens using the spacy text splitter
"""
text_splitter = SpacyTextSplitter(chunk_size=self.chunk_size)
docs = text_splitter.split_text(self.documents)
return docs
def nltk_splitter(self):
"""
Splits a document into chunks of tokens using the nltk text splitter
"""
text_splitter = NLTKTextSplitter(chunk_size=self.chunk_size)
docs = text_splitter.split_text(self.documents)
return docs
def huggingface_splitter(self):
"""
Splits a document into chunks of tokens using the huggingface text splitter
"""
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ImportError(
"transformers package not found, please install it with "
"`pip install transformers`"
)
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(
tokenizer, chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
docs = text_splitter.split_text(self.documents)
return docs
| [] |
2024-01-10 | dmnkss/gpt3-ipython-bot | ipython_commander.py | import datetime
import re
from dataclasses import dataclass
import openai as openai
import pexpect.fdpexpect
from settings import settings
from utils import splited_print
@dataclass
class Response:
final_prompt: str
answer: str
iterations: int
total_tokens: int
execution_total_cost: float
ansi_escape = re.compile(r'''
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''', re.VERBOSE)
def find_max_input_number(prompt):
"""
Parse all numbers in format In [${number}] and return the max number
"""
numbers = [int(x) for x in re.findall(r"In \[(\d+)\]", prompt)]
return max(numbers) if numbers else 0
def request_prompt(prompt):
return openai.Completion.create(
model="text-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=90,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n\n", "Out[", "Collecting", "Requirement alread"]
)
def sanitize_command(value):
value = value.replace(
'!pip install',
'%pip --quiet --disable-pip-version-check --no-python-version-warning install --root-user-action=ignore'
)
value = value.replace(
'%pip install',
'%pip --quiet --disable-pip-version-check --no-python-version-warning install --root-user-action=ignore'
)
value = value.strip()
value = value.lstrip('\r')
value = value.rstrip('\r')
value = value.lstrip('\n')
value = value.rstrip('\n')
return value
def sanitize_stdout(value):
return ansi_escape.sub('', value.decode('utf-8')).rstrip('\n')
def sanitize_gpt3_response(value):
value = value['choices'][0]['text']
value = value.lstrip('\n')
value = value.rstrip('\n')
value = value.lstrip(' ')
return value
def calc_execution_costs(total_tokens, cost_per_1000_tokens):
return cost_per_1000_tokens * (total_tokens / 1000)
def process_question(
question,
max_iterations=5,
apikey=None,
cost_per_1000_tokens=None
) -> Response:
openai.api_key = apikey or settings.OPENAI_API_KEY
cost_per_1000_tokens = cost_per_1000_tokens or settings.MODEL_COST_DOLLARS
total_tokens = 0
now = datetime.datetime.now()
today = now.strftime('%d %B %Y %A %H:%M:%S')
prompt = settings.PROMPT.replace('{{today}}', today)
prompt = prompt.replace('{{question}}', question)
# open subprocess to run ipython
# Create ipython process
c = pexpect.spawn('ipython --colors=NoColor --nosep --simple-prompt')
execution_number = 1
c.expect_exact(f'In [{execution_number}]:')
# got_answer indicates what response contains "Answer:"
iteration = 0
while True:
if iteration > max_iterations:
prompt += f"""
```
"""
print('GPT-3 REQUEST')
response = request_prompt(prompt)
total_tokens += response['usage']['total_tokens']
response = sanitize_gpt3_response(response)
# If response contains "Answer:", then we are done
if 'Answer:' in response:
answer = response.split('Answer:')[1]
prompt += response
splited_print(prompt)
return Response(
final_prompt=prompt,
answer=answer,
iterations=iteration,
execution_total_cost=calc_execution_costs(
total_tokens,
cost_per_1000_tokens
),
total_tokens=total_tokens,
)
# Get result from response (remove all text before) and execute it in ipython
else:
result = response
# Split result into separate commands by "In [${number}]: " using regex
commands = re.split(r"In \[\d+\]: ", result)
for command in commands:
is_pip_install = "pip install" in command
command = sanitize_command(command)
print(f'IPYTHON REQUEST: {command}')
c.sendline(command)
execution_number += 1
try:
c.expect_exact(f'In [{execution_number}]:')
except pexpect.exceptions.TIMEOUT:
return Response(
final_prompt=prompt,
answer='Failed to execute command. Timeout',
iterations=iteration,
execution_total_cost=calc_execution_costs(
total_tokens,
cost_per_1000_tokens
),
total_tokens=total_tokens,
)
execute_result = sanitize_stdout(c.before)
if is_pip_install:
execute_result = execute_result.replace(
'Note: you may need to '
'restart the kernel to use '
'updated packages.\r',
''
)
prompt += execute_result
if 'Out[' not in prompt.split('\n')[-1]:
prompt += sanitize_stdout(c.after)
else:
break
# Break execution sequence result contains
# "Traceback (most recent call last)"
if 'Traceback (most recent call last)' in execute_result:
break
if iteration > max_iterations:
break
iteration += 1
return Response(
final_prompt=prompt,
answer='Can not find answer. Please, try again with another question',
iterations=iteration,
execution_total_cost=calc_execution_costs(
total_tokens,
cost_per_1000_tokens
),
total_tokens=total_tokens,
)
| [
"{{question}}",
"\n ```\n "
] |
2024-01-10 | carolius/Multi-Objective-Reinforcement-Learning-from-AI-Feedback | get_feedback_from_GPT-3.5.py | import openai
import os
import random
import json
# Read the API key from a file outside repo
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'openai_key.txt'), 'r') as key_file:
openai.api_key = key_file.read().strip()
principle_folder_path="Principles/"
def evaluate_responses(question, responseA, responseB, principle):
"""
Asks GPT-3.5 which response is better based on a given principle using logits.
Args:
- question (str): The user input which the model is responding to.
- responseA (str): The first response.
- responseB (str): The second response.
- principle (str): The principle to judge the responses.
Returns:
- logits_for_A the logits for response A
- logits_for_B the logits for response B
"""
prompt = f"You will be given a conversation between a human and an AI assistant along "\
"with a principle and two responses. Your task is to choose the response which "\
"best follows the principle. \n"\
"Conversation: {question} \n Given the principle '{principle}', "\
"which of the following responses is better?\n" \
f"A. {responseA}\n" \
f"B. {responseB}\n" \
f"Respond only with A or B.\n\n"
response = openai.Completion.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=1,
logprobs=5,
n=1,
)
# Extracting the logits for the last tokens (which should correspond to "A" or "B")
choices = response.choices[0]
logprobs = choices['logprobs']['top_logprobs'][0]
print(logprobs)
logits_for_A = logprobs.get('A', None)
logits_for_B = logprobs.get('B', None)
return logits_for_A,logits_for_B
def get_principles_from_folder(principle_folder_path):
"""
Reads all the .txt files in the given folder and returns their content as principles.
Returns:
- dict: Dictionary where keys are filenames (without .txt) and values are lists containing rewordings of the principle.
"""
principles = {}
for filename in os.listdir(principle_folder_path):
if filename.endswith('.txt'):
with open(os.path.join(principle_folder_path, filename), 'r') as file:
principle_name = filename[:-4] # Removing .txt extension
# Initialize an empty list for storing the rewordings
rewordings = []
# Iterate through each line in the file, stripping it and appending to the list
for line in file:
rewordings.append(line.strip())
# Store the list of rewordings as the value corresponding to the principle_name key
principles[principle_name] = rewordings
return principles
def process_file_with_principles(input_filename, output_filename,principle_folder_path):
principles = get_principles_from_folder(principle_folder_path)
with open(input_filename, 'r', encoding='utf-8') as infile, open(output_filename, 'w', encoding='utf-8') as outfile:
for line in infile:
input_dict = json.loads(line.strip())
question = input_dict["Prompt"]
responseA = input_dict["ResponseA"]
responseB = input_dict["ResponseB"]
result_dict = {
"Prompt": question,
"ResponseA": responseA,
"ResponseB": responseB
}
for principle_name, rewordings in principles.items():
sampled_principle = random.choice(rewordings)
logits_for_A, logits_for_B = evaluate_responses(question, responseA, responseB, sampled_principle)
result_dict[principle_name] = (logits_for_A, logits_for_B)
result_json_str = json.dumps(result_dict)
outfile.write(f"{result_json_str}\n")
process_file_with_principles('Data/testing-s.jsonl', 'Data/testing-s-rated.jsonl',principle_folder_path)
| [
"You will be given a conversation between a human and an AI assistant along with a principle and two responses. Your task is to choose the response which best follows the principle. \nConversation: {question} \n Given the principle '{principle}', which of the following responses is better?\nA. PLACEHOLDER\nB. PLACEHOLDER\nRespond only with A or B.\n\n"
] |
2024-01-10 | reagannchaupa/chatBot | chat0.py | # Demonstrates a simple interaction with a chatbot
import openai
def main():
# Setup the API and start chat
openai.api_key = "OPENAI_API_KEY"
chat()
# Define how to get a response from chatbot
def ask_chatbot(prompt):
response = openai.Completion.create(
engine="gpt-3.5-turbo",
prompt=prompt,
max_tokens=150,
)
return response["choices"][0]["text"].strip()
# Use that function to chat with the chatbot
def chat():
# Continuously try to get and print a response from chatbot until user stops.
try:
while True:
user_input = input("You: ")
if user_input.lower() in ["exit", "quit", "bye"]:
print("Bot: Goodbye!")
break
bot_response = ask_chatbot("You: " + user_input + "\nBot:")
print(f"Bot: {bot_response}")
except:
print("Something went wrong!")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | willdphan/voice-assistant | genius.py | import openai
from dotenv import load_dotenv
import speech_recognition as sr
import pyttsx3
import time
import os
# Initialize OpenAI API
openai.api_key = os.getenv("OPENAI_API_KEY")
# Initialize the text to speech engine stored in engine var
engine = pyttsx3.init()
# speech recognition lib from audio as input and transcribes it to text
# takes in arg 'filename' which specifies the audio file we want
# to transcribe
def transcribe_audio_to_test(filename):
# create instance of recognizer class from sr module
recogizer = sr.Recognizer()
# with statement to open audio file using audio file
# class from sr module
with sr.AudioFile(filename) as source:
# record the audio using the record method of the recognizer object
audio = recogizer.record(source)
try:
# transcribes the audio into text using the recognize_google method
return recogizer.recognize_google(audio)
# if an err occures, and message will be displayed
except:
print("skipping unkown error")
# generates response from gpt3 api
# takes single argument prompt - which represents input text
# as starting point for generating a repsonse using gpt3 api
def generate_response(prompt):
# pass several arguments to specify parameters of response
response= openai.completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=4000,
n=1,
stop=None,
temperature=0.5,
)
# return gen response from gpt3 api
return response ["Choices"][0]["text"]
# takes text arg that converts it to speech using pyttsx3 lib
def speak_text(text):
# specifies the text to be spoken
engine.say(text)
# plays the speech
engine.runAndWait()
# shows how we want python to run the script
def main():
# runs continuously until program is stopped
while True:
# Wait for user say "genius"
print("Say 'Genius' to start recording your question")
# sr microphone class to access mic and audio
with sr.Microphone() as source:
# creates the instance of sr recognizer class
recognizer = sr.Recognizer()
# records the audio using the listen method of recognizer object
audio = recognizer.listen(source)
# transcibes recorded audio to text using the recognize google method
try:
# next two lines check if the transcribed text is genius and
# the lower method converts it to lower case to make it
# case insensitive
transcription = recognizer.recognize_google(audio)
# if it is "genius" then record more audio
if transcription.lower() == "genius":
# record audio and save it to file input.wav
filename ="input.wav"
# used to display message to say the question
print("Say your question")
with sr.Microphone() as source:
recognizer=sr.recognize()
source.pause_threshold=1
audio=recognizer.listen(source, phrase_time_limit = None, timeout = None)
with open(filename,"wb") as f:
f.write(audio.get_wav_data())
# transcribes audio to test
text = transcribe_audio_to_test(filename)
# if the transcription was successful, the text var will contain
# the transcribed text and print it
if text:
print(f"you said {text}")
# Generates the response and prints the response
response = generate_response(text)
print(f"chat gpt 3 say {response}")
# Reads the response using text to speech
speak_text(response)
# handles error with printed response
except Exception as e:
print("An error ocurred : {}".format(e))
# code that runs the main func
if __name__=="__main__":
main()
| [] |
2024-01-10 | americium-241/Omnitool_UI | Omnitool_UI.py | import langchain
langchain.debug = False
import os
from datetime import datetime
import streamlit as st
st.set_option('deprecation.showPyplotGlobalUse', False)
from streamlit_option_menu import option_menu
from langchain.agents import AgentType
from langchain.memory import ConversationBufferMemory
from langchain.prompts import MessagesPlaceholder
from tools.tool_manager import ToolManager
from storage.storage import PersistentStorage
from storage.document import DocumentManager
from storage.logger_config import logger
from ui.sidebar_ui import sidebar
from ui.chat_ui import chat_page
from ui.settings_ui import settings_page
from ui.tools_ui import tools_page
from ui.info_ui import info_page
from ui.settings_ui import list_custom_Agent_names
import config # You can hard code you api keys there
from PIL import Image
import random
if os.environ["OPENAI_API_KEY"] != '':
st.session_state.api_keys = True
BASE_DIR= os.path.dirname(os.path.realpath(__file__))
logger.debug('BASE_DIR :'+BASE_DIR)
im = Image.open(BASE_DIR+'/assets/appicon.ico')
st.set_page_config(
page_title="OmniTool",
page_icon=im,
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://github.com/americium-241/Omnitool_UI/tree/master',
'Report a bug': "https://github.com/americium-241/Omnitool_UI/tree/master",
'About': "Prototype for highly interactive and customizable chatbot "
}
)
#Session_state
def ensure_session_state():
logger.debug('Ensure sessions states')
# Ensure there are defaults for the session state
if "session_id" not in st.session_state:
st.session_state.session_id = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
if "model" not in st.session_state:
st.session_state.model = "gpt-3.5-turbo"
if "agent" not in st.session_state:
st.session_state.agent = AgentType.OPENAI_FUNCTIONS#ZERO_SHOT_REACT_DESCRIPTION
if 'tool_manager' not in st.session_state:
st.session_state.tool_manager = ToolManager()
st.session_state.tool_list = st.session_state.tool_manager.structured_tools
if "initial_tools" not in st.session_state :
#Enter a tool title here to make it the initial selected tool, most agents need at least one tool
st.session_state.initial_tools=['Testtool']
if "selected_tools" not in st.session_state :
st.session_state.selected_tools = st.session_state.initial_tools
if "tools" not in st.session_state:
st.session_state.tools= st.session_state.tool_manager.get_selected_tools(st.session_state.initial_tools)
if "clicked_cards" not in st.session_state:
st.session_state.clicked_cards = {tool_name: True for tool_name in st.session_state.initial_tools}
if "chat_history" not in st.session_state:
st.session_state.chat_history = MessagesPlaceholder(variable_name="chat_history")
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Initialize DocumentManager
if "doc_manager" not in st.session_state:
st.session_state.doc_manager = DocumentManager(os.environ["OPENAI_API_KEY"])
if "documents" not in st.session_state:
st.session_state.documents = st.session_state.doc_manager.list_documents()
if "database" not in st.session_state:
st.session_state.database = st.session_state.doc_manager.database
if "selected_page" not in st.session_state :
st.session_state.selected_page = "Settings"
if "autorun_state" not in st.session_state:
st.session_state.autorun_state=False
if "all_tokens" not in st.session_state:
st.session_state.all_tokens=''
if "prefix" not in st.session_state:
st.session_state.prefix = ''
if "suffix" not in st.session_state:
st.session_state.suffix = ''
if "session_name" not in st.session_state:
st.session_state.session_name = {}
if "token_count" not in st.session_state:
st.session_state.token_count = 0
if "executed_code" not in st.session_state:
st.session_state.executed_code=[]
if "listen" not in st.session_state:
st.session_state.listen = False
if "plan_execute" not in st.session_state:
st.session_state.plan_execute = False
if "customAgentList" not in st.session_state:
st.session_state.customAgentList = list_custom_Agent_names
# menu callback
def option_menu_cb(cb):
# For some reason this callback sends a parameter
st.session_state.selected_page=st.session_state.menu_opt
#@st.cache_resource
def init_storage(db_url='sqlite:///'+BASE_DIR+'//storage//app_session_history.db'):
logger.info('Building storage and doc_manager')
# Create or connect to db and initialise document manager
storage = PersistentStorage(db_url)
doc_Manager= DocumentManager(os.environ["OPENAI_API_KEY"])
return storage,doc_Manager
# Option Menu
def menusetup():
list_menu=["Chat", "Tools", "Settings","Info"]
list_pages=[chat_page,tools_page,settings_page,info_page]
st.session_state.dictpages = dict(zip(list_menu, list_pages))
list_icons=['cloud','cloud-upload', 'gear','info-circle']
st.session_state.selected_page = option_menu("",list_menu,
icons=list_icons, menu_icon="", orientation="horizontal",
on_change = option_menu_cb,key='menu_opt',
default_index=list_menu.index(st.session_state.selected_page))
def pageselection():
st.session_state.dictpages[st.session_state.selected_page]()
# Main
def main():
ensure_session_state()
menusetup()
st.session_state.storage, st.session_state.doc_manager = init_storage()
sidebar()
pageselection()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | americium-241/Omnitool_UI | ui~callbacks_ui.py | import streamlit as st
from langchain.callbacks.base import BaseCallbackHandler
from storage.logger_config import logger
class Custom_chat_callback(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
st.session_state.token_count=st.session_state.token_count+1
st.session_state.all_tokens=st.session_state.all_tokens+str(token)
def on_llm_start(self, serialized,prompts, **kwargs):
"""Run when LLM starts running."""
def on_chat_model_start(self, serialized, messages, **kwargs):
"""Run when Chat Model starts running."""
#st.sidebar.info('Chat begins')
def on_llm_end(self, response, **kwargs):
"""Run when LLM ends running."""
logger.info('-- llm tokens -- :'+str(st.session_state.token_count ))
with st.session_state.token_count_placeholder:
st.info('Tokens count : '+str(st.session_state.token_count ))
def on_llm_error( self, errors, **kwargs):
"""Run when LLM errors."""
logger.debug('on_llm_error'+str(errors))
class ToolCallback(BaseCallbackHandler):
def should_check(self,serialized_obj: dict) -> bool:
# Define condition and call it on dedicated callback
return serialized_obj.get("name") == "Code_sender"
def on_tool_start(self,serialized,input_str, **kwargs) -> None:
"""Run when tool starts running."""
if self.should_check(serialized) :
logger.info('Tool started')
#Add a custom handling of the tool like a human confirmation for instance
def on_tool_end(self,output,**kwargs) -> None:
"""Run when tool ends running."""
logger.info('Tool ended')
def on_tool_error(self,error,**kwargs) -> None:
"""Run when tool errors."""
logger.debug('Tool failed and broke into pieces')
| [] |
2024-01-10 | americium-241/Omnitool_UI | tools~tools_list~tools_list.py | import streamlit as st
from streamlit_elements import elements, mui, html
import socket
import os
from typing import Dict, Union
import sys
import io
import autogen
from langchain.tools import WikipediaQueryRun
from langchain.utilities import WikipediaAPIWrapper
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import HuggingFaceHub
from tools.base_tools import Ui_Tool
from storage.logger_config import logger
from tools.utils import evaluate_function_string
from tools.tool_manager import ToolManager
Local_dir=dir_path = os.path.dirname(os.path.realpath(__file__))
#!!! Carefull in this file, only used to define tools as functions
# (with a docstring)and tools as base_tool child classes to link ui
# or internal app logic
class Code_sender(Ui_Tool):
name = 'Code_sender'
icon= '💻'
title= 'Code Sender'
description="Allow you to send python script code to complete the task and get the result of the code"
def _run(self,code): # callback not working, is still inputed by the run( ... callabacks)
try:
logger.info("CODE executed : %s",code)
st.session_state.executed_code.append(code)
return 'Success, returns : ' + str(exec(code, globals(), locals()))
except Exception as e:
return f"An error occurred while executing the code: {e}"
def _ui(self):
def checkstate(value):
st.session_state.autorun_state=value['target']['checked']
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
mui.FormControlLabel(
control=mui.Checkbox(onChange=checkstate,checked= st.session_state.autorun_state),
label="Auto run")
class stable_diffusion(Ui_Tool):
name = 'stable_diffusion'
title='Stable diffusion'
icon= '🖼️'
description= 'This tool allow for the creation of an image from a text input : question'
def _run(self,question):
import torch
from diffusers import StableDiffusionPipeline
torch_dtype=torch.float16
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",force_download=True,resume_download= False)
pipe = pipe.to(st.session_state.diffusion_option[0]) # pipe.to('cuda') #si nvidia gpu
prompt = question
image = pipe(prompt).images[0]
st.image(image)
return 'Success '
def _ui(self):
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
# Initialize a session state variable to store the selected items
if "diffusion_option" not in st.session_state:
st.session_state.diffusion_option = ['cpu'] # Initialized as an array
def handle_selection_change(state,value):
st.session_state.diffusion_option = [value['props']['value']]
options = ["cpu", "gpu"]
with elements("multi_select_element"):
# Creating a label for the select component
mui.Typography("Select running option")
# Creating the multi-choice select box
with mui.Select(multiple=True, value=st.session_state.diffusion_option, onChange=handle_selection_change):
for option in options:
mui.MenuItem(value=option, children=option)
class hugging_call(Ui_Tool):
name = 'hugging_call'
title='Hugging call'
icon= '🤗'
description= 'This tool allow the call for a hugging_face NLP that returns the answer, input: question'
def _run(self,question):
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
# "openai-gpt"#"google/flan-t5-xxl" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
llm = HuggingFaceHub(
repo_id=st.session_state.hugging_option[0], model_kwargs={"temperature": 0.1, "max_tokens": st.session_state.hugging_length_option }
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
return (llm_chain.run(question))
def _ui(self):
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
# Initialize a session state variable to store the selected items
if "hugging_option" not in st.session_state:
st.session_state.hugging_option = ['google/flan-t5-xxl'] # Initialized as an array
if "hugging_length_option" not in st.session_state:
st.session_state.hugging_length_option = 500 # Initialized as an array
def handle_selection_change(state,value):
st.session_state.hugging_option = [value['props']['value']]
def handle_selection_length_change(value):
st.session_state.hugging_length_option = value['target']['value']
options =["databricks/dolly-v2-3b","Writer/camel-5b-hf","Salesforce/xgen-7b-8k-base","tiiuae/falcon-40b","openai-gpt","google/flan-t5-xxl"]
with elements("multi_select_element"):
# Creating a label for the select component
mui.Typography("Select your options:")
# Creating the multi-choice select box
with mui.Select(multiple=True, value=st.session_state.hugging_option, onChange=handle_selection_change):
for option in options:
mui.MenuItem(value=option, children=option)
with elements("value_input_element"):
# Creating a label for the numeric input
mui.Typography("Enter max_length")
# Numeric input
mui.TextField(
label="",
variant="outlined",
type="number", # This makes it a numeric input
fullWidth=True,
defaultValue=st.session_state.hugging_length_option,
onChange=handle_selection_length_change
)
class db_query(Ui_Tool):
name = 'db_query'
title='Db Query'
icon= '💽'
description= 'Send the user question to an agent that will explore the database. query: question NOT a SQL query'
def _run(self,query):
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
db = SQLDatabase.from_uri(st.session_state.db_option)
toolkit = SQLDatabaseToolkit(db=db, llm=ChatOpenAI(temperature=0))
agent_executor = create_sql_agent(
llm=ChatOpenAI(temperature=0),
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
answer =agent_executor.run(query)
return 'Tool run, returns : '+answer
def _ui(self):
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
# Initialize a session state variable to store the selected items
if "db_option" not in st.session_state:
st.session_state.db_option = 'sqlite:///'+Local_dir+'\\..\\..\\storage\\app_session_history.db'
def handle_selection_change(value):
st.session_state.db_option = value['target']['value']
with elements("value_input_element"):
# Creating a label for the numeric input
mui.Typography("Enter database path")
# Numeric input
mui.TextField(
label="",
variant="outlined",
fullWidth=True,
defaultValue= st.session_state.db_option,
onChange=handle_selection_change
)
class socket_com(Ui_Tool):
name = 'socket_com'
title='Socket Com'
icon= '📨'
description= 'Send a message to the client. Input: message'
def _run(self,question):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname() # get name of local machine
port = st.session_state.socket_option
def connect_to_host(host, port):
'''Connect to a specified host and port.'''
try:
client_socket.connect((host, port))
st.info(f"Connected to {host}:{port}")
except socket.error as e:
st.error(f"Failed to connect to {host}:{port}. Error: {e}")
raise
def close_connection():
'''Close the client socket connection.'''
try:
client_socket.close()
st.info("Connection closed successfully")
except socket.error as e:
st.error(f"Error while closing connection: {e}")
raise
def send_message(msg):
'''Send a message to the connected host.'''
try:
client_socket.sendall(msg.encode()) # Use sendall to ensure complete data is sent
st.info(f"Sent message: {msg}")
except socket.error as e:
st.error(f"Failed to send message: {msg}. Error: {e}")
raise
def receive_message():
'''Receive a message from the connected host.'''
try:
data = client_socket.recv(1024).decode()
st.info(f"Received message: {data}")
return data
except socket.error as e:
st.error(f"Failed to receive message. Error: {e}")
raise
try:
connect_to_host(host, port)
send_message(question)
response = receive_message()
return f'Error is: {response}'
finally:
close_connection()
def _ui(self):
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
# Initialize a session state variable to store the selected items
if "socket_option" not in st.session_state:
st.session_state.socket_option = 55555
def handle_selection_change(value):
st.session_state.socket_option = value['target']['value']
print('Socket ;',st.session_state.socket_option)
with elements("value_input_element"):
# Creating a label for the numeric input
mui.Typography("Enter port number")
# Numeric input
mui.TextField(
label="",
variant="outlined",
type="number", # This makes it a numeric input
fullWidth=True,
defaultValue=st.session_state.socket_option,
onChange=handle_selection_change
)
def autogen_code_writer(question):
'''This tool gets the input from autogen_plan and writes a python code that have to be sent to code_exec tool question is one simple string'''
prompt_writer ="""You should create a python code that precisely solves the problem asked. Always make one single python snippet and assume that exemples should be made with randomly generated data rather than loaded ones.
format : The python code should be formated as ```python \n ... \n ```
ALWAYS finish your answer by \n TERMINATE"""
# create an AssistantAgent named "assistant"
code_writer = autogen.AssistantAgent(
name="code_writer",
human_input_mode="NEVER",
llm_config=st.session_state.autogen_llm_config,
system_message=prompt_writer,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
max_consecutive_auto_reply=0, # terminate without auto-reply
human_input_mode="NEVER",
)
# the assistant receives a message from the user_proxy, which contains the task description
message = user_proxy.initiate_chat(
code_writer,
message=question,
)
return user_proxy.last_message()["content"]
def autogen_code_exec(question):
'''This tool extract the code from question when formatted as ``` \n python code \n ``` and will execute it'''
class ExecUserProxyAgent(autogen.UserProxyAgent):
def __init__(self, name: str, **kwargs):
super().__init__(name, **kwargs)
self._locals = {}
def generate_init_message(self, *args, **kwargs) -> Union[str, Dict]:
return super().generate_init_message(*args, **kwargs)
def run_code(self, code, **kwargs):
stdout_capture = io.StringIO()
stderr_capture = io.StringIO()
# Redirecting stdout and stderr
sys.stdout = stdout_capture
sys.stderr = stderr_capture
exitcode = 0
result = None
try:
exec(code, self._locals)
except Exception as e:
exitcode = 1
stderr_capture.write(str(e))
#bpy.ops.object.select_all(action='SELECT') # Select all objects in the scene
#bpy.ops.object.delete()
# Reset stdout and stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
log = stdout_capture.getvalue() + stderr_capture.getvalue()
return exitcode, log, None
# create a UserProxyAgent instance named "user_proxy"
code_executor = ExecUserProxyAgent(
name="code_executor",
human_input_mode="NEVER",
system_message="""You simply receive a message with code that will be executed, you can discuss ways to improve this code and return a better version if needed
ALWAYS finish your answer by \n TERMINATE""",
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={
"work_dir": "coding",
"use_docker": False, # set to True or image name like "python:3" to use docker
},
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
max_consecutive_auto_reply=0, # terminate without auto-reply
human_input_mode="NEVER",
)
message = user_proxy.initiate_chat(
code_executor,
message=question,
)
return user_proxy.last_message()["content"]
def autogen_plan(question):
'''This tool takes as input the fully detailed context of user question in order to construct a plan of action, always call at first or when confused'''
autogen_planner = autogen.AssistantAgent(
name="autogen_plan",
system_message="""NEVER WRITE PYTHON CODE. Your job is to improve the question you receive by making it a clear step by step problem solving . Never write code, only explanations.
Be precise and take into account that a LLM is reading your output to follow your instructions. You should remind in your answer that your message is intended for the code_writer
ALWAYS finish your answer by \n TERMINATE""",
llm_config=st.session_state.autogen_llm_config,
human_input_mode="NEVER",
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
max_consecutive_auto_reply=0, # terminate without auto-reply
human_input_mode="NEVER",
)
# the assistant receives a message from the user_proxy, which contains the task description
message = user_proxy.initiate_chat(
autogen_planner,
message=question,
)
return user_proxy.last_message()["content"]
def powershell_terminal(command):
'''send powershell commands to be executed separated with ; (each commands are an independant subprocess)'''
import subprocess
process = subprocess.Popen(["powershell", command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='./workspace')
result, error = process.communicate()
# Decode result and error
try:
result_str = result.decode('utf-8')
except UnicodeDecodeError:
result_str = result.decode('cp1252', errors='replace')
try:
error_str = error.decode('utf-8')
except UnicodeDecodeError:
error_str = error.decode('cp1252', errors='replace')
# Check return code
if process.returncode != 0:
return 'Error (code: {}): {}'.format(process.returncode, error_str)
else:
# You might still want to return any "info" or "progress" messages from stderr even if the operation succeeded.
# Thus, you can check if error_str is not empty and append it to the success message.
additional_info = '\nInfo from stderr: ' + error_str if error_str.strip() else ''
return 'Success, code returns: ' + result_str + additional_info
def dataframe_query(query):
"""allow you to query the avaible dataframe in the workspace"""
import pandas as pd
from langchain.llms import OpenAI
import os
workspace= Local_dir+'\..\..\workspace'
#create list of dataframes
df_list = []
for file in os.listdir(workspace):
if file.endswith(".csv"):
df_list.append(pd.read_csv(os.path.join(workspace, file)))
elif file.endswith(".xlsx"):
df_list.append(pd.read_excel(os.path.join(workspace, file)))
agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df_list, verbose=True)
r=agent.run(query)
return r
def browser_search(query):
"""allow you to navigate using the browser, provide url or keyword and instructions"""
import subprocess
response = str(subprocess.run(['python', Local_dir+'\..\browser_tool.py', query], text=True, capture_output=True))
return response
def wiki_search(query):
"""allow you to query the wikipedia api to get information about your query"""
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
return wikipedia.run(query)
def make_tool(toolcode):
"""
Send your toolcode, which is a string function with a docstring inside formatted as
def toolname(input):
\''' MANDATORY docstring to describe tool execution \'''
tool execution code
return "Success"
"""
runs_without_error, has_doc, tool_name = evaluate_function_string(toolcode)
if not runs_without_error:
return f"Error: {runs_without_error}"
if not has_doc:
return "Error: Function must have a docstring."
if not tool_name:
return "Error: Could not extract tool name from the provided code."
tools_path = os.path.join(Local_dir, f"{tool_name}.py")
with open(tools_path, "w") as f:
f.write('\n' + toolcode)
st.session_state.tool_manager = ToolManager()
st.session_state.tool_list = st.session_state.tool_manager.structured_tools
return 'Success'
| [
"This tool allow for the creation of an image from a text input : question",
"Question: {question}\n\n Answer: Let's think step by step.",
"question",
"Allow you to send python script code to complete the task and get the result of the code",
"Send the user question to an agent that will explore the database. query: question NOT a SQL query",
"This tool allow the call for a hugging_face NLP that returns the answer, input: question",
"Send a message to the client. Input: message",
"You should create a python code that precisely solves the problem asked. Always make one single python snippet and assume that exemples should be made with randomly generated data rather than loaded ones.\n format : The python code should be formated as ```python \n ... \n ``` \n ALWAYS finish your answer by \n TERMINATE"
] |
2024-01-10 | americium-241/Omnitool_UI | ui~chat_ui.py | import streamlit as st
from langchain.callbacks import StreamlitCallbackHandler
from agents.agent import AgentConfig
from storage.logger_config import logger
from .callbacks_ui import Custom_chat_callback,ToolCallback
from .settings_ui import list_custom_Agent
from config import SIMILARITY_MAX_DOC,TIMEOUT_AUDIO,PHRASE_TIME_LIMIT
from tools.utils import executecode
class StreamlitUI:
def display_message(self, role, message):
st.chat_message(role).write(message)
def get_prompt(self):
return st.chat_input()
def make_callback(self):
self.st_callbacks = [StreamlitCallbackHandler(st.container()),Custom_chat_callback(),ToolCallback()]
# would be better to cache this but makes the session selection more complicated
def initialize_chat_memory(session_id):
# Rebuild memory at each call in case of changed session
st.session_state.memory.clear()
if "session_id" in st.session_state :
st.session_state.messages = st.session_state.storage.get_chat_history(st.session_state.session_id)
for msg in st.session_state.messages:
if msg["role"] == 'user':
st.session_state.memory.chat_memory.add_user_message(msg["content"])
else :
st.session_state.memory.chat_memory.add_ai_message(msg["content"])
# AGENT CONFIGURATION
@st.cache_data # should be cached when using a local model, but mess up tool synchronisation when on (when you do back and forth).
def configure_agent(_model,_agent,_tools,_chat_history,_memory,session_id,selected_tools_names):
logger.info(f'Agent config for session {session_id} with model : {_model}, agent : {_agent}, tools : {_tools}')
logger.debug(f'Agent config for session {session_id} with memory : {_memory}')
if _agent in st.session_state.customAgentList :
agt=list(filter(lambda x : x[0] == _agent,list_custom_Agent))[0]
agent_config = agt[1]()
else :
agent_config = AgentConfig(_model,_agent, _tools ,_chat_history,_memory)
st.session_state.agent_instance = agent_config.initialize_agent()
def chat_page():
#Create chat
st.session_state.chat_ui = StreamlitUI()
s = ', '.join(st.session_state.selected_tools)
st.info('Selected tools : '+s)
st.markdown("---")
st.markdown("### 💭 Chat")
st.markdown("---") # Settings header
initialize_chat_memory(st.session_state.session_id)
configure_agent(st.session_state.model,st.session_state.agent, st.session_state.tools ,st.session_state.chat_history,st.session_state.memory,st.session_state.session_id,st.session_state.selected_tools)# Configure the agent
if "session_id" in st.session_state :
st.session_state.messages = st.session_state.storage.get_chat_history(st.session_state.session_id)
for msg in st.session_state.messages:
st.session_state.chat_ui.display_message(msg["role"], msg["content"])
prompt=''
if st.session_state.listen == True:
import speech_recognition as sr
import pyttsx3
endb=st.button('End listening')
engine = pyttsx3.init()
r = sr.Recognizer()
st.write("Calibrating...")
while st.session_state.listen == True and prompt == '':
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=1)
with st.spinner("Listening now..."):
if endb :
st.session_state.listen = False
st.rerun()
try:
audio = r.listen(source, timeout=TIMEOUT_AUDIO, phrase_time_limit=PHRASE_TIME_LIMIT)
with st.spinner("Recognizing..."):
text_ok = r.recognize_google(audio) # Using Google's Speech Recognition API
prompt = text_ok
except Exception as e:
unrecognized_speech_text = f"Sorry, I didn't catch that. Exception was: {e}"
text = unrecognized_speech_text
continue
if text_ok == "":
continue
if st.session_state.listen == False:
break
if prompt == '':
if u_prompt := st.session_state.chat_ui.get_prompt():
prompt = u_prompt
if prompt != '':
original_prompt=prompt
if st.session_state.database:
# Do a similarity search in the loaded documents with the user's input
similar_docs = st.session_state.database.similarity_search(prompt,k=SIMILARITY_MAX_DOC)
# Insert the content of the most similar document into the prompt
if similar_docs:
logger.info("Documents found : \n"+str(similar_docs))
prompt = '\n Relevant documentation : \n'+similar_docs[0].page_content+'\n' +'User prompt : \n '+prompt
for p in similar_docs :
logger.info(p.page_content)
prompt = st.session_state.prefix+prompt+st.session_state.suffix# SHOULD BE REMOVED AND WORKING WITH prefix
st.session_state.chat_ui.display_message("user", original_prompt)
logger.info('Input prompt : '+ prompt)
with st.chat_message("assistant"):
st.session_state.chat_ui.make_callback()
response = st.session_state.agent_instance.run(input = prompt,callbacks=st.session_state.chat_ui.st_callbacks)
logger.info('Chat response : '+ response)
st.session_state.chat_ui.display_message("assistant", response)
session_name = st.session_state.session_name.get(st.session_state.session_id, st.session_state.session_id)
st.session_state.storage.save_chat_message(st.session_state.session_id, "user", original_prompt,session_name)
st.session_state.storage.save_chat_message(st.session_state.session_id, "assistant", response,session_name)
st.session_state.storage.save_session_name(session_name,st.session_state.session_id)
if st.session_state.listen == True:
voices = engine.getProperty('voices')
# Initialize text-to-speech engine
engine.setProperty('rate', 210) # setting up new voice rate
engine.setProperty('voice', voices[1].id)
engine.say(response)
engine.runAndWait()#Stop execution as long as engine is running
prompt=''
st.rerun()
if 'Code_sender' in st.session_state.selected_tools :
try : # executed code can be empty
with st.empty().container():
with st.expander('Code'):
st.markdown('''```python \n '''+st.session_state.executed_code[-1]+'''```''')
except Exception as e:
pass
if st.session_state.autorun_state == True:
# Useful to have graph updates for interactive plotting from chatbot
try :
executecode(st.session_state.executed_code[-1])
except Exception as e:
pass
#logger.debug('Code auto exec error: '+str(e))
| [
"\n Relevant documentation : \n",
"\n",
"User prompt : \n "
] |
2024-01-10 | americium-241/Omnitool_UI | config.py | #Models, agent, key are in ui.settings_ui
import logging
from langchain.agents import AgentType
import os
#Define api_keys session_state if you want to hardcode them
os.environ["OPENAI_API_KEY"] =''
#os.environ["HUGGINGFACEHUB_API_TOKEN"] =''
# Logger level
LOGGER_LEVEL=logging.INFO
# Add a keys in KEYS list and a text_input will be available in settings tab UI
KEYS=["OPENAI_API_KEY","HUGGINGFACEHUB_API_TOKEN"]
# Add a model name to the list
MODELS=['gpt-3.5-turbo', 'gpt-4', 'gpt-3.5-turbo-0613']
gguf_models={'llama':{'model':'TheBloke/Llama-2-7b-Chat-GGUF','file':'llama-2-7b-chat.Q2_K.gguf'},
'mistral':{'model':'TheBloke/Mistral-7B-v0.1-GGUF','file':'mistral-7b-v0.1.Q2_K.gguf'}}
MODELS.extend(list(gguf_models.keys()))
#Monitoring langchain.agents and new_agents folder to add to agents list
#agents=[AgentType.OPENAI_MULTI_FUNCTIONS,AgentType.OPENAI_FUNCTIONS,AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, AgentType.ZERO_SHOT_REACT_DESCRIPTION,AgentType.CONVERSATIONAL_REACT_DESCRIPTION]
AGENTS=[eval('AgentType.'+a) for a in dir(AgentType) if a.isupper()]
#LLM temperature
TEMPERATURE=0
# Maximum thoughts iteration per query
MAX_ITERATIONS=20
#Document embedding chunk size
CHUNK_SIZE=500
#Similarity document search
SIMILARITY_MAX_DOC=5
#Audio recognition
TIMEOUT_AUDIO=10
PHRASE_TIME_LIMIT=50 | [] |
2024-01-10 | americium-241/Omnitool_UI | tools~tool_manager.py | import os
from langchain.tools import StructuredTool
from .base_tools import Pre_Structured_Tool
from .utils import get_class_func_from_module, monitorFolder
class ToolManager:
def __init__(self):
self.structured_tools = make_tools_list()
self.tools_ui={}
self.tools_description=self.make_tools_description()
def make_tools_description(self):
tools_description = {}
for t in self.structured_tools :
tools_description.update({t.name : t.description})
return tools_description
def get_tools(self):
return self.structured_tools
def get_tool_names(self):
return [tool.name for tool in self.structured_tools]
def get_selected_tools(self, selected_tool_names):
return [tool for tool in self.structured_tools if tool.name in selected_tool_names]
def make_tools_list():
#Define the path of the monitored folder for tool auto listing
monitored_files=monitorFolder(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tools_list'))
pre_tool = Pre_Structured_Tool # get a list of structured tools
base_tool_list=[]
for mod in monitored_files:
#Get the lists of tools to construct fro tool_list.py
listClassTool,listFunctionTool=get_class_func_from_module(mod)
# ListClassTool and ListFunctionTool are created on the fly from monitored files
for b_tool in listClassTool:
base_tool_list.append(b_tool[1]())
for func in listFunctionTool :
functool=StructuredTool.from_function(func[1])
base_tool_list.append(functool)
for pre_tool in Pre_Structured_Tool :
base_tool_list.append(pre_tool)
return base_tool_list
| [] |
2024-01-10 | americium-241/Omnitool_UI | agents~agents_list~new_agent.py | import re
import streamlit as st
from typing import List, Union, Callable
from langchain.prompts.base import StringPromptTemplate
from langchain.schema import AgentAction, AgentFinish
from langchain.agents import AgentOutputParser, AgentExecutor, LLMSingleActionAgent
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from config import MAX_ITERATIONS
class Custom_agent_exemple():
'''This exemple can be found at https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval'''
# If you make classes outside this one it will be considered as a class with a initialize_agent method
def __init__(self):
#Most of the init is useless here
self.name='My custom Agent'
self.llm = OpenAI(temperature=0,callbacks=[StreamingStdOutCallbackHandler()])
self.tools = st.session_state.tools
self.chat_history = st.session_state.chat_history
self.memory = st.session_state.memory
self.prefix=st.session_state.prefix
self.suffix=st.session_state.suffix
self.make_prompt()
self.make_parser()
def make_prompt(self):
# Set up the base template
template = self.prefix+ """
Answer the question with very long and detailed explanations, be very precise and make clear points. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, can be one of {tool_names} is you need to use tool
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Here are your memories : {memory}
Question: {input}
{agent_scratchpad}
""" + self.suffix
self.prompt = self.CustomPromptTemplate(
template=template,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"],
)
def make_parser(self):
self.output_parser = self.CustomOutputParser()
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
class CustomPromptTemplate(StringPromptTemplate):
template : str
tools_getter : Callable
def get_tools(self,query):
query_list_tool=st.session_state.tools
return query_list_tool
def __init__(self,template='',**kwargs):
super().__init__(template=template,tools_getter=self.get_tools,**kwargs)
self.template=template
self.tools_getter=self.get_tools
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
kwargs["memory"] = st.session_state.memory
tools = self.tools_getter(kwargs["input"])
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
)
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
return self.template.format(**kwargs)
def initialize_agent(self):
llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=self.output_parser,
stop=["\Final answer:"],
allowed_tools=[t.name for t in self.tools],
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=self.tools, verbose=True,memory=self.memory,
handle_parsing_errors=True,
max_iterations=MAX_ITERATIONS,
streaming=True,
)
return agent_executor
| [
"\n \n Answer the question with very long and detailed explanations, be very precise and make clear points. You have access to the following tools:\n\n {tools}\n\n Use the following format:\n\n Question: the input question you must answer\n Thought: you should always think about what to do\n Action: the action to take, can be one of {tool_names} is you need to use tool\n Action Input: the input to the action\n Observation: the result of the action\n ... (this Thought/Action/Action Input/Observation can repeat N times)\n Thought: I now know the final answer\n Final Answer: the final answer to the original input question\n\n Here are your memories : {memory}\n\n Question: {input}\n {agent_scratchpad}\n "
] |
2024-01-10 | americium-241/Omnitool_UI | ui~info_ui.py | import streamlit as st
def info_page():
st.info('This project is currently in development and bugs can be reported here : [Github issue](https://github.com/americium-241/Omnitool_UI/tree/master)')
st.markdown("""
# Usage guidelines
### API keys :
- Get an openAI key at : [OpenAI](https://platform.openai.com/)
- Hugging face key can be ommited if related tools are not used, or can befound at : [HuggingFace](https://huggingface.co/docs/hub/security-tokens)
- Keys can be added simply to the app (see custom)
## Set-up the chatbot in the settings page :
- Enter your API keys
- Choose a model : OpenAI or Llama are available (see custom)
- Pick an agent : How is the chatbot supposed to behave ?
All agents are very different, try and explore, but so far the OpenAI one does handle tools the most reliably (see custom)
be careful when using gpt-4 as token number and price can escalade rather quickly.
- Define prefix and suffix depending on the type of session you want to initiate
These are added at the beginning and end of the user input
- Load pdf or txt files to the vector database for similarity search in documents.
Relevant document chunk are added to the chatbot prompt before the user input
- Load any document to the workspace to facilitate future use of tools for in chat data manipulation
- Try the vocal control, this thing holds with strings so maybe it will crack, but never miss
a chance to say hello to Jarvis.
## Select tools in the tool page :
- Filter tool by name and description
- Select tools card and options
- In app add tool at the end of cards list :
* Name the python file to be created
* Write a function (single arguments works best for all agents)
* add a docstring
* add a relevant return that is sent to the chatbot
* submit and use
## Discuss with chatbot in the chat page:
- Start the session and ask a question, or select a previous session and continue it
- The bot can usually handle itself the tool calls, but results are more reliable with explicit usage description. For complex actions you should precise the tools execution order
- Change tools, settings, come back and explore multiple configuration within one session
## Custom
### Tools
You can make custom tools from multiple ways :
1. Make a new python file at Omnitool_UI/tools/tools_list :
- make a single function with a docstring to describe tool usage and a return relevant for the chatbot though process""")
function_exemple = """
import streamlit as st
def streamlit_info(message):
''' This function displays the message as a streamlit info card'''
st.info(message)
return 'Success '
"""
st.code(function_exemple,language="python")
st.markdown("""
- make a single class that inherits from UI_Tool with a _run method and a _ui method for option management
The TestTool option can guide you to the absolute path of the folder
""")
tool_exemple ="""
import streamlit as st
from streamlit_elements import elements, mui, html
import os
from storage.logger_config import logger
from tools.base_tools import Ui_Tool
Local_dir=dir_path = os.path.dirname(os.path.realpath(__file__))
class Testtool(Ui_Tool):
name = 'Testtool'
icon = '🌍'
title = 'Test tool'
description = 'This function is used so the human can make test, thank you to proceed, input : anything'
def _run(self, a):
# This function is executed by the chatbot when using tool
st.success(a)
logger.debug('During the test tool execution and with input : ' + a)
return 'Success'
def _ui(self):
# This function is executed at the creation of the tool card in the tool page
if "test_state" not in st.session_state:
st.session_state.test_state = False
def checkstate(value):
st.session_state.test_state = value['target']['checked']
if st.session_state.test_state is True :
st.success('Find me at '+ Local_dir)
# Expander placed outside (below) the card
with mui.Accordion():
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography("Options")
with mui.AccordionDetails():
mui.FormControlLabel(
control=mui.Checkbox(onChange=checkstate,checked= st.session_state.test_state),
label="Try to change me !")"""
st.code(tool_exemple, language="python")
st.markdown("""
2. Use the in-app add tool form. Only supports function tool creation
3. Use the chatbot make_tool tool. Only supports function tool creation
any tool create by the form or the make_tool are creating a new tool file (Omnitool_UI/tools/tools_list)
### Agents
You can make custom agents by creating a new python file at Omnitool_UI/agents/agents_list :
- Write a single class with an initialize_agent method that returns an object with a run method. The output of the run is expected to be the answer to the user input
- The custom agent example, taken from langchain how to, gives a minimalistic template to begin
### API keys
API keys are accessible in the config file. New text inputs can be added to the app simply by extending the KEYS list.
This is useful to set up the environment necessary for the execution of your tools
### Config file
- Other parameters can be modified in the config file :
- Models list
- Agents list
- Vector db chunk size embedding and number of document retrieved per similarity search
- Voice command time_outs
- Maximum intermediate thoughts iteration
- Logging level
- Thanks to streamlit interactivity, all files can be modified during the app execution that will continue to work and run the new code at next trigger
## Troubleshooting
This project is in development and bugs are to be expected. The flexibility of streamlit can lead to dead ends when combined with cached data (at our stage at least), sometimes a simple refresh is your best call.
Bug can be reported at :
Also available in right side menu
""") | [] |
2024-01-10 | americium-241/Omnitool_UI | tools~browser_tool.py |
from langchain.agents import AgentType
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit
from langchain.tools.playwright.utils import create_sync_playwright_browser
#create_async_playwright_browser,
import sys
"""allow you to navigate using the browser, provide url or keyword and instructions"""
sync_browser = create_sync_playwright_browser()
toolkit = PlayWrightBrowserToolkit.from_browser(sync_browser=sync_browser)
tools = toolkit.get_tools()
llm = ChatOpenAI(temperature=0, model="gpt-4")# or any other LLM, e.g., ChatOpenAI(), OpenAI()
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
streaming=True,
)
response = agent_chain.run(sys.argv[1])
| [] |
2024-01-10 | americium-241/Omnitool_UI | tools~base_tools.py | import inspect
from storage.logger_config import logger
from langchain.tools import BaseTool
import langchain.tools as ltools
from langchain.agents.agent_toolkits import FileManagementToolkit
from langchain.agents import load_tools
import streamlit as st
import os
Local_dir = os.path.dirname(os.path.realpath(__file__))
class Ui_Tool(BaseTool):
name = 'Base_tool'
link = r'https://github.com/americium-241/Omnitool_UI/tree/master'
icon = '🔧 '
description = 'Description'
def _run(self,a):
"""This function should be overwrite when creating a tool and a docstring have to be given"""
logger.debug('You are in the Base tool execution and I inputed :',a)
return 'Success'
def _ui(self):
# Overwrite this function to add options to the tool, use streamlit_extra mui components
pass
# Your existing function to check if a class has required attributes
def has_required_attributes(cls):
"""Check that class possesses a name and description attribute"""
required_attributes = ['name', 'description']
try:
instance = cls(**{attr: "default_value" for attr in required_attributes})
return True
except Exception as e:
#print(f"Failed to instantiate {cls.__name__} due to: {e}")
return False
def make_pre_structured_tools():
"""Monitoring langchains.tools and keeping only tools without any mandatory arguments for initialisation"""
module = ltools
tool_class_names = [member for name, member in inspect.getmembers(module) if isinstance(member, list)][0]
# Retrieve each class using its name and check if it has the required attributes
classes = [getattr(module, class_name) for class_name in tool_class_names]
p_tools = [cl() for cl in classes if has_required_attributes(cl)]
pre_tools= []
toolkit_file = FileManagementToolkit(root_dir=Local_dir+"\\..\\workspace")
pre_tools.extend(toolkit_file.get_tools())
tools_name=[t.name for t in pre_tools]
for t in p_tools :
if t not in pre_tools and t.name != 'DuckDuckGo Results JSON' and t.name not in tools_name:
pre_tools.append(t)
requests_tools = load_tools(["requests_all"])
pre_tools.extend(requests_tools)
return pre_tools
Pre_Structured_Tool=make_pre_structured_tools()
| [
"Description"
] |
2024-01-10 | americium-241/Omnitool_UI | ui~settings_ui.py | import streamlit as st
import os
from langchain.document_loaders import PyMuPDFLoader
from langchain.document_loaders import TextLoader
import autogen
from tools.utils import get_class_func_from_module,monitorFolder
from config import KEYS,MODELS,AGENTS
Local_DIR= os.path.dirname(os.path.realpath(__file__))
monitored_files=monitorFolder(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..', 'agents','agents_list'))
list_custom_Agent=[]
for f in monitored_files:
class_agent,_=get_class_func_from_module(f)
list_custom_Agent.extend(class_agent)
list_custom_Agent_names=[a[0] for a in list_custom_Agent]
AGENTS.extend(list_custom_Agent_names)
def api_keys_init():
with st.expander(label='API keys',expanded = True):
if "api_keys" not in st.session_state :
for i,k in enumerate(KEYS):
os.environ[k] = st.text_input(k, type="password")
st.session_state.api_keys = True
st.info("Please add your OpenAI API key to continue.")
st.stop()
else :
for k in KEYS:
os.environ[k] = st.text_input(k, type="password",value=os.environ[k])
#Necessary to have callbacks to keep states updated
def llm_menu_cb():
st.session_state.model=st.session_state.menu_model
def agent_menu_cb():
st.session_state.agent=st.session_state.menu_agent
def prefixprompt_menu_cb():
st.session_state.prefix=st.session_state.menu_prefixprompt
def suffixprompt_menu_cb():
st.session_state.suffix=st.session_state.menu_suffixprompt
def listen_menu_cb():
st.session_state.listen=st.session_state.menu_listen
def planexec_menu_cb():
st.session_state.plan_execute=st.session_state.menu_plan_exec
def empty_vdb():
keys=list(st.session_state.doc_manager.documents.keys())
for i,doc in enumerate(keys):
st.session_state.doc_manager.remove_document(doc)
st.session_state.doc_manager.database.delete([st.session_state.doc_manager.database.index_to_docstore_id[i]])
# Subpage functions
def file_upload():
with st.expander(label='Load documents',expanded = False):
col1,col2=st.columns([100,92])
uploaded_files = col1.file_uploader("Embbedings Files", type=['txt','pdf'], accept_multiple_files=True)
uploaded_files_workspace = col2.file_uploader("Workspace Files", type=['txt','pdf','csv','png','jpg'], accept_multiple_files=True)
if uploaded_files:
load_files(uploaded_files)
if uploaded_files_workspace:
load_files_workspace(uploaded_files_workspace)
col1.button('Empty document db',on_click=empty_vdb)
if len(list(st.session_state.doc_manager.documents.keys()))>0 :
col1.write('Documents loaded : \n'+str(list(st.session_state.doc_manager.documents.keys()))[1:-1])
@st.cache_data()
def load_files_workspace(uploaded_files):
workspace_dir=Local_DIR+'\\..\\workspace\\'
for file in uploaded_files:
load_file_workspace(file,workspace_dir)
@st.cache_data()
def load_file_workspace(file,workspace_dir):
file_value=file.getvalue()
with open(workspace_dir+str(file.name), 'wb') as f:
f.write(file_value)
@st.cache_data()
def load_files(uploaded_files):
for uploaded_file in uploaded_files:
load_file(uploaded_file)
st.session_state.doc_manager.create_embeddings_and_database()
st.session_state.documents = st.session_state.doc_manager.list_documents()
st.session_state.database = st.session_state.doc_manager.database
st.success("Documents loaded and embeddings created.")
@st.cache_data()
def load_file(uploaded_file):
if uploaded_file.type == 'application/pdf':
temp_file_path = f"./temp_{uploaded_file.name}"
with open(temp_file_path, mode='wb') as w:
w.write(uploaded_file.getvalue())
loader = PyMuPDFLoader(temp_file_path)
doc_content = loader.load()
st.session_state.doc_manager.add_document(uploaded_file.name, doc_content)
os.remove(temp_file_path) # remove temporary file after loading
if uploaded_file.type == 'text/plain' :
# Write uploaded file to a temporary file and load with TextLoader
temp_file_path = f"./temp_{uploaded_file.name}"
with open(temp_file_path, 'w') as f:
f.write(uploaded_file.read().decode('utf-8'))
loader = TextLoader(temp_file_path)
doc_content = loader.load()
st.session_state.doc_manager.add_document(uploaded_file.name, doc_content)
os.remove(temp_file_path) # remove temporary file after loading
def settings_page():
api_keys_init()
with st.expander(label='Settings',expanded = True):
col1,col2=st.columns(2)
st.session_state.agent = col1.selectbox("Select agent", options=AGENTS,key='menu_agent',on_change=agent_menu_cb,index=AGENTS.index(st.session_state.agent),help=Agent_Description[str(st.session_state.agent)])
st.session_state.model = col2.selectbox("Select a model", options=MODELS,key='menu_model',on_change=llm_menu_cb ,index=MODELS.index(st.session_state.model ))
st.session_state.prefix=col1.text_area('Prefix',key='menu_prefixprompt',on_change=prefixprompt_menu_cb,value= st.session_state.prefix,placeholder='First input for initial prompt')
st.session_state.suffix=col2.text_area('Suffix',key='menu_suffixprompt',on_change=suffixprompt_menu_cb,value= st.session_state.suffix,placeholder='Last input for initial prompt')
file_upload()
with st.expander(label='Experimental',expanded = False):
col1,col2,col3=st.columns(3)
st.session_state.listen= col1.checkbox('Start Listening',key='menu_listen',on_change=listen_menu_cb,value=st.session_state.listen )
st.session_state.plan_execute= col2.checkbox('Plan and execute',key='menu_plan_exec',on_change=planexec_menu_cb,value=st.session_state.plan_execute )
make_autogen_config()
def make_autogen_config():
param=""" "model": "{model}",
"api_key": "{key}" """.format(model=st.session_state.model,key=os.environ["OPENAI_API_KEY"])
# Make autogen llm_config
json_string = """[
{\n"""+str(param)+"""
}
]"""
# Store the JSON string in an environment variable
os.environ['OAI_CONFIG_LIST'] = json_string
config_list = autogen.config_list_from_json("OAI_CONFIG_LIST")
llm_config={
#"seed": 42, # seed for caching and reproducibility
"config_list": config_list, # a list of OpenAI API configurations
"temperature": 0, # temperature for sampling
}
st.session_state.autogen_llm_config=llm_config
Agent_Description={
'AgentType.OPENAI_FUNCTIONS':
"Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been explicitly fine-tuned to detect when a function should be called and respond with the inputs that should be passed to the function. The OpenAI Functions Agent is designed to work with these models.",
'AgentType.ZERO_SHOT_REACT_DESCRIPTION':
"This agent uses the ReAct framework to determine which tool to use based solely on the tool's description. Any number of tools can be provided. This agent requires that a description is provided for each tool.",
'AgentType.CONVERSATIONAL_REACT_DESCRIPTION':
"This agent is designed to be used in conversational settings. The prompt is designed to make the agent helpful and conversational. It uses the ReAct framework to decide which tool to use, and uses memory to remember the previous conversation interactions.",
'AgentType.SELF_ASK_WITH_SEARCH':
"This agent utilizes a single tool that should be named Intermediate Answer. This tool should be able to lookup factual answers to questions. This agent is equivalent to the original self ask with search paper, where a Google search API was provided as the tool.",
'AgentType.REACT_DOCSTORE':
"This agent uses the ReAct framework to interact with a docstore. Two tools must be provided: a Search tool and a Lookup tool (they must be named exactly as so). The Search tool should search for a document, while the Lookup tool should lookup a term in the most recently found document. This agent is equivalent to the original ReAct paper, specifically the Wikipedia example.",
'AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION':
"The structured tool chat agent is capable of using multi-input tools.",
}
for ag in AGENTS:
if str(ag) not in Agent_Description.keys():
Agent_Description.update({str(ag):'No description'})
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.