date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | selab-gatech/RESTGPT | src~restgpt.py | import openai.error
from specification_builder import SpecificationBuilder
from report_builder import ReportBuilder
import os
import json
import csv
def build_all_reports():
original_files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic", "ocvn"]
files = ["genome-nexus"]
for file in files:
print("Buidling specification for " + file)
report_builder = ReportBuilder(f'specifications/openapi_yaml/{file}.yaml', f'{file}_results.json')
report_builder.path_builder()
report_builder.save_report_to_file()
def build_one_report(file_path, file_name):
output_name = f"{file_name}_results.json"
#report_builder = ReportBuilder(file_path, output_name)
#report_builder.path_builder()
#report_builder.save_report_to_file()
spec_build = SpecificationBuilder(file_path, output_name)
spec_build.build_specification()
def build_all_specs(file_type):
original_files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic", "ocvn"]
files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic", "ocvn"]
for file in files:
print("Buidling specification for " + file)
spec_builder = SpecificationBuilder(f'specifications/openapi_yaml/{file}.yaml', f'outputs/{file}_results.{file_type}')
spec_builder.build_specification()
def build_one_spec(file_path, file_name):
output_name = f"outputs/{file_name}_results.json"
spec_build = SpecificationBuilder(file_path, output_name)
spec_build.build_specification()
def build_all_specs_eval(file_type):
try:
files = os.listdir("specifications/inputs")
for file in files:
print("Buidling specification for " + file + ".")
try:
spec_builder = SpecificationBuilder(f'specifications/inputs/{file}', f'outputs/{file}_results.{file_type}')
spec_builder.build_specification()
except ImportError:
print("The config file containing your API key does not exist.")
return
except KeyError or openai.error.AuthenticationError:
print("The API key used to access GPT-3.5 Turbo is invalid.")
return
except Exception:
print(f"Failed to build specification for {file}.")
except Exception:
print("Failed to access input specifications.")
def build_one_spec_with_report(file_path, file_name):
# language tool to test requestBody
# omdb to test general
output_path = f"outputs/{file_name}_results.json"
spec_build = SpecificationBuilder(file_path, output_path)
spec_build.build_specification_with_report(f"results/reports_update_512/{file_name}_results.json")
def build_all_specs_with_reports(output_type): # run this for RESTGPT paper enhanced specification generation
if output_type != "yaml" or output_type != "json":
output_type = "json" # default json
original_files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic",
"ocvn"]
files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic", "ocvn"]
for file in files:
print("Building enhanced specifictaion for " + file)
spec_builder = SpecificationBuilder(f'specifications/openapi_{output_type}/{file}.{output_type}', f'outputs/openapi_{output_type}/{file}_results.{output_type}')
spec_builder.build_specification_with_report(f"results/reports_update_512/{file}_results.json")
def build_openai_csv():
original_files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic",
"ocvn"]
files = ["rest-countries", "omdb", "language-tool", "spotify", "youtube", "genome-nexus", "ohsome", "fdic",
"ocvn"]
for file in files:
spec_builder = SpecificationBuilder(f'specifications/openapi_yaml/{file}.yaml')
file_path = f"results/reports_tokens256_temp0.2/{file}_results.json"
responses = spec_builder.generate_llm_query_objects(file_path)
file_name = f"csv/{file}.csv"
directory = os.path.dirname(file_name)
if not os.path.exists(directory):
os.makedirs(directory)
with open(file_name, mode='w', newline='', encoding='utf-8') as csv_file:
writer = csv.writer(csv_file)
# add the header
writer.writerow(['parameter', 'temperature', 'token_limit', 'prompt', 'response'])
for response in responses:
writer.writerow([response.parameter, response.temperature, response.token_limit, response.prompt,
response.response])
def docker_execute():
# use Docker environment variable when running the container
file_type = os.getenv("FILETYPE", "json") # default json
file_type = "yaml" if file_type == "yaml" else "json"
build_all_specs_eval(file_type) # uses the eval folder for the Docker containers
def predefined_inputs(file_type):
build_all_specs(file_type)
def custom_inputs(file_type):
build_all_specs_eval(file_type)
if __name__ == '__main__':
#docker_execute()
# predefined_inputs("yaml")
# custom_inputs("yaml") # change the file_type to your choosing
build_openai_csv()
| [] |
2024-01-10 | c2194/taskbook | flask~wx.py |
from uiautomation import WindowControl,MenuControl
import uiautomation as automation
import openai
import getapi
import mate
openai.api_key = "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"
wx = WindowControl( Name='深圳内部小分队')
print(wx)
#切换到窗口
wx.SwitchToThisWindow()
def call_back(result,json_obj,state):
outstr = result["outstr"]
#把字符串outstr 中的 \n 换成 {Shift}{Enter}
outstr = outstr.replace("\n","{Shift}{Enter}")
wx.EditControl(Name="输入").SendKeys(outstr)
wx.ButtonControl(Name="发送(S)").Click()
def ask(question, context=None):
if context is None:
prompt = question
else:
prompt = f"{context}\n{question}"
try:
response = openai.Completion.create(
#engine="davinci",
engine="text-davinci-003",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
frequency_penalty=0.5,
presence_penalty=0.5,
)
if context is None:
return response.choices[0].text.strip()
else:
return response.choices[0].text.replace(context, "").strip()
except:
pass
context = None
def chat(question,context=None):
answer = ask(question, context=context)
context = answer
return answer
#hw = wx.ListControl(Name='会话')
context = None
lodmess = []
bot_name = "机器人-小哆"
def get_message(chobj): #信息的单词是
ilist = chobj.GetFirstChildControl().GetChildren()
if len(ilist) ==0:
return [0]
else:
return [chobj.GetFirstChildControl().GetChildren()[0].Name,chobj.Name]
context = "你好"
get_api = getapi.get_api(call_back)
while True:
try:
last_msg = wx.ListControl(Name = "消息").GetChildren()
sendName = get_message(last_msg[-1])[0]
sendMsg = get_message(last_msg[-1])[1]
#查找 sendMsg 是否在开头包含 @+bot_name 字符串
if sendMsg.startswith("@"+bot_name):
print("bot_name")
#过滤掉@+bot_name
sendMsg = sendMsg.replace("@"+bot_name,"")
#查看 lodmess 是否有 sendMsg
if sendMsg in lodmess:
print("已经回复过了")
continue #如果有,则跳过
else:
lodmess.append(sendMsg)
#看 lodmess 的长度是否大于 10 ,如果大于,则删除第一个
if len(lodmess) > 10:
lodmess.pop(0)
#如果是,则调用gpt
#获取回复
reply = "亲爱的"+sendName+"。。。"
#回复消息
wx.EditControl(Name="输入").SendKeys(reply)
wx.ButtonControl(Name="发送(S)").Click()
print("回复成功")
#sendMsg = '\u2005你们好'
#过了掉\u2005
sendMsg = sendMsg.replace('\u2005','')
tcommand = mate.get_mate(sendMsg)
if tcommand != False:
get_api.add_get_api(tcommand[0])
else:
#getchat = chat(sendMsg,context)
#wx.EditControl(Name="输入").SendKeys(getchat)
wx.EditControl(Name="输入").SendKeys("今天墙太高,翻的超时了!")
wx.ButtonControl(Name="发送(S)").Click()
pass
except:
pass | [
"你好\nPLACEHOLDER"
] |
2024-01-10 | c2194/taskbook | flask~testgpt2.py | import openai
import os
openai.api_key = os.getenv("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")
model_name = 'gpt-3.5-turbo-0613'
# 加载提示词文件并获取提示词
#with open('./sum.prompt', 'r', encoding='utf-8') as f:
# prompt = f.read()
def gpt_sum(val1: int, val2: int):
# 首先给出gpt任务提示词
messages = [{'role': 'system', 'content': ""}]
# 模拟gpt的确认响应,后续可以直接以user角色给出gpt问题
messages.append({'role': 'assistant', "content": 'yes'})
# 以user角色给出gpt问题
user_input = f"\input: {val1}, {val2}"
messages.append({'role': 'user', 'content': user_input})
gpt_resp = openai.ChatCompletion.create(
model=model_name,
messages=messages,
timeout=30
)
if gpt_resp.choices and gpt_resp.choices[0]:
resp_str: str = gpt_resp.choices[0].message.content
if resp_str and resp_str.startswith('\output: '):
return int(resp_str[len('\output: '):].strip())
raise Exception(
f'Failed to get available response from gpt, resp_str={resp_str}')
if __name__ == '__main__':
terminal_input = input("Please give two integers, split by comma: ")
inputs: list[str] = terminal_input.split(',')
if len(inputs) < 2:
raise Exception("Invalid input, Please give two integers, split by comma")
val1 = int(inputs[0].strip())
val2 = int(inputs[1].strip())
print(f"result = {gpt_sum(val1, val2)}")
| [
"yes",
"\\input: PLACEHOLDER, PLACEHOLDER"
] |
2024-01-10 | kalvin807/pigeonGPT | pigeongpt~labeler.py | import os
import re
import openai
import structlog
import tiktoken
from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tenacity import retry, stop_after_attempt, wait_random_exponential
from pigeongpt.provider.gmail import Email
log: structlog.stdlib.BoundLogger = structlog.get_logger()
openai.api_key = os.getenv("OPENAI_API_KEY")
enc = tiktoken.get_encoding("cl100k_base")
LABELS = ["Alert", "Bill", "Booking", "Newsletter", "Promotion"]
MAX_EMAIL_TOKEN_USAGE = 4000
CHAT_GPT_MODEL = "gpt-3.5-turbo"
llm = ChatOpenAI(temperature=0, model_name=CHAT_GPT_MODEL) # type: ignore
text_splitter = RecursiveCharacterTextSplitter().from_tiktoken_encoder(
chunk_size=2000, chunk_overlap=50
)
summary_prompt = PromptTemplate(
template="Please read the following text and provide a concise summary that captures the main ideas, while retaining important context, warnings, and other key details that would be relevant for classifying the email: \n\n{text}.",
input_variables=["text"],
)
combine_summary_prompt = PromptTemplate(
template="Write summary based on the data below, start with the type of article: \n\n{text}",
input_variables=["text"],
)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(3))
def get_gpt_response(prompt, temperature=0.0):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt},
],
temperature=temperature,
)
def estimate_token_size(prompt):
return len(enc.encode(prompt))
def split_by_size(content_string, size=1000):
return [content_string[i : i + size] for i in range(0, len(content_string), size)]
def summarise_content(content_string: str):
splitted_texts = text_splitter.split_text(content_string)
docs = [Document(page_content=t) for t in splitted_texts]
chain = load_summarize_chain(
llm,
map_prompt=summary_prompt,
combine_prompt=combine_summary_prompt,
chain_type="map_reduce",
verbose=True,
)
return chain.run(docs)
def make_label_prompt(email: Email) -> str:
return f"""
Given the following email:
Subject: {email.subject}
Sender: {email.sender}
Content: {email.content}
Determine the nature of this email.
The possible labels are {",".join(f"'{label}'" for label in LABELS)} or 'unknown'.
Only reply the label and wrap the label in '###'.
"""
def preprocess_email(email: Email):
prompt = make_label_prompt(email)
if estimate_token_size(prompt) > MAX_EMAIL_TOKEN_USAGE:
email.content = summarise_content(email.content)
return email
def label_email(email: Email):
processed_email = preprocess_email(email)
prompt = make_label_prompt(processed_email)
response = get_gpt_response(prompt)
log.debug("label email", prompt=prompt, response=response)
response_message = response.choices[0].message.content.strip()
match = re.search(r"###(.*)###", response_message)
if match:
label = match.group(1)
if label not in LABELS:
return "unknown"
return label
else:
raise ValueError("No match found")
| [
"Write summary based on the data below, start with the type of article: \n\n{text}",
"Please read the following text and provide a concise summary that captures the main ideas, while retaining important context, warnings, and other key details that would be relevant for classifying the email: \n\n{text}."
] |
2024-01-10 | bharath-1311/chat_bot | pdf_chatbot.py | from langchain.document_loaders import PyPDFLoader, OnlinePDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
import pickle
import json
import os
import pypdf
loader = PyPDFLoader("gao-21-26sp.pdf")
data = loader.load()
text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs=text_splitter.split_documents(data)
from langchain.embeddings.openai import OpenAIEmbeddings
with open('config.json') as f:
config = json.load(f)['embedding']
embeddings = OpenAIEmbeddings(openai_api_key=config['openai'])
from langchain.vectorstores import Pinecone
import pinecone
with open('config.json') as f:
config = json.load(f)['vectordb']
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY', config["pinecone_key"])
PINECONE_API_ENV = os.environ.get('PINECONE_API_ENV', config["pinecone_env"])
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_API_ENV
)
index_name = config['pinecone_index']
docsearch=Pinecone.from_texts([t.page_content for t in docs], embeddings, index_name=index_name)
db = Pinecone.from_existing_index(index_name, embeddings)
with open('Llama-13B-GGUF.pkl', 'rb') as f:
embeddings = pickle.load(f)
docs=text_splitter.split_documents(data)
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
from langchain.chains import ConversationalRetrievalChain
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
db.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True)
import sys
chat_history = []
while True:
query = input('Prompt: ')
if query.lower() in ["exit", "quit", "q"]:
print('Exiting')
sys.exit()
result = qa_chain({'question': query, 'chat_history': chat_history})
print('Answer: ' + result['answer'])
chat_history.append((query, result['answer']))
| [] |
2024-01-10 | luotao21/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | ziliangpeng/naoshima | ml~nlp~wiki-llama.py | import os
import pickle
from llama_index import SimpleDirectoryReader, Document, VectorStoreIndex, TreeIndex
from loguru import logger
from llama_index import ServiceContext
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.llms.openai import OpenAI
from llama_index.llms.mock import MockLLM
from typing import (
Any,
Sequence,
)
from llama_index.llms.base import (
ChatMessage,
ChatResponse,
)
def load_documents():
naoshima_dir = os.environ["NAOSHIMA"]
wiki_pages_url = os.path.join(naoshima_dir, "crawler/wiki-pages/en")
documents = SimpleDirectoryReader(wiki_pages_url).load_data()
logger.info(f"Loaded {len(documents)} documents")
return documents
class O:
pass
class ZLLM(OpenAI):
# @llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
logger.info("Chatting...")
# logger.info(messages)
logger.info(kwargs)
resp = O()
resp.message = O()
resp.message.content = "Hello"
return resp
def main():
logger.info("Starting...")
cached_openai_key = os.environ.get("OPENAI_API_KEY")
# invalidate the key so that the service context will use the local model
os.environ["OPENAI_API_KEY"] = "false"
docs = load_documents()
if False and os.path.isfile("wiki-llama.pkl"):
new_index = pickle.load(open("wiki-llama.pkl", "rb"))
logger.info("Index loaded")
else:
# Can use either MockLLM or ZLLM
llm = MockLLM()
llm = ZLLM()
service_context = ServiceContext.from_defaults(embed_model="local", llm=llm)
# idx = VectorStoreIndex
idx = TreeIndex
new_index = idx.from_documents(docs, service_context=service_context)
logger.info("Index created")
# pickle.dump(new_index, open("wiki-llama.pkl", "wb"))
logger.info("Index saved")
# set Logging to DEBUG for more detailed outputs
query_engine = new_index.as_query_engine(service_context=service_context)
# print(cached_openai_key)
# os.environ["OPENAI_API_KEY"] = cached_openai_key
while True:
# query = input("Enter your question: ")
query = "What is Python?"
if query == "exit":
break
response = query_engine.query(query)
print(response)
break
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Gamma-Software/AppifyAi | generative_app~core~chains~conversational_retrieval_over_code.py | """Chain for chatting with a vector database."""
from __future__ import annotations
import inspect
from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Union
from pydantic import Extra
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseRetriever, Document
from langchain.chains.conversational_retrieval.base import (
CHAT_TURN_TYPE,
_get_chat_history,
)
from chains.prompt import (
CONDENSE_QUESTION_CODE_PROMPT,
PROMPT,
prompt_missing_imports_check,
)
from utils.security import analyze_security
from chains.parser import parse_code
def remove_entrypoint(code):
lines = code.split("\n")
modified_lines = []
entrypoint_found = False
for line in lines:
if (
line.strip() == 'if __name__ == "__main__":'
or line.strip() == "if __name__ == '__main__':"
):
entrypoint_found = True
elif entrypoint_found:
modified_lines.append(line.lstrip())
else:
modified_lines.append(line)
modified_code = "\n".join(modified_lines)
modified_code = modified_code.rstrip()
return modified_code
class BaseConversationalRetrievalCodeChain(Chain):
"""Chain for chatting with an index. Given the chat history,
the current code and a question, return the answer."""
combine_docs_chain: BaseCombineDocumentsChain
question_generator: LLMChain
missing_imports_chain: LLMChain
output_key: List[str] = ["code", "explanation"]
return_source_documents: bool = False
return_generated_question: bool = False
return_revision_request: bool = False
get_chat_history: Optional[Callable[[CHAT_TURN_TYPE], str]] = None
"""Return the source documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
allow_population_by_field_name = True
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return ["question"]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = ["code", "explanation"]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
if self.return_generated_question:
_output_keys = _output_keys + ["generated_question"]
return _output_keys
@abstractmethod
def _get_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
request = inputs["question"]
new_request = self.question_generator.run(
question=request, callbacks=_run_manager.get_child()
)
new_request = None if "None" in new_request else new_request
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if new_request is not None:
if accepts_run_manager:
docs = self._get_docs(new_request, inputs, run_manager=_run_manager)
else:
docs = self._get_docs(new_request, inputs) # type: ignore[call-arg]
else:
docs = []
new_inputs = inputs.copy()
# Remove any mentions of streamlit or python from the question
if new_request is not None:
new_request = new_request.replace("streamlit", "").replace("python", "")
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
new_inputs["chat_history"] = chat_history_str
answer = self.combine_docs_chain.run(
input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
)
code, expl = parse_code(answer)
is_code_not_safe = False
if code is not None:
# Run check code
is_code_not_safe = analyze_security(code)
if not is_code_not_safe:
code = remove_entrypoint(code)
# Check if imports are missing
code_checked = self.missing_imports_chain.run(code=code)
code_checked = None if code_checked == "None" else code_checked
if code_checked is not None:
code = code_checked
output: Dict[str, Any] = {self.output_key[0]: code, self.output_key[1]: expl}
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_request
if self.return_revision_request:
output["revision_request"] = is_code_not_safe
return output
@abstractmethod
async def _aget_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
request = inputs["question"]
new_request = await self.question_generator.arun(
question=request, callbacks=_run_manager.get_child()
)
new_request = None if "None" in new_request else new_request
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if new_request is not None:
if accepts_run_manager:
docs = await self._aget_docs(
new_request, inputs, run_manager=_run_manager
)
else:
docs = await self._aget_docs(new_request, inputs) # type: ignore[call-arg]
else:
docs = []
new_inputs = inputs.copy()
# Remove any mentions of streamlit or python from the question
if new_request is not None:
new_request = new_request.replace("streamlit", "").replace("python", "")
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
new_inputs["chat_history"] = chat_history_str
answer = await self.combine_docs_chain.arun(
input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
)
code, expl = parse_code(answer)
is_code_not_safe = True
if code is not None:
# Run check code
is_code_not_safe = analyze_security(code)
# Check if imports are missing
if not is_code_not_safe:
code = remove_entrypoint(code)
# Check if imports are missing
code_checked = self.missing_imports_chain.run(code=code)
code_checked = None if code_checked == "None" else code_checked
if code_checked is not None:
code = code_checked
output: Dict[str, Any] = {self.output_key[0]: code, self.output_key[1]: expl}
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_request
if self.return_revision_request:
output["revision_request"] = is_code_not_safe
return output
def save(self, file_path: Union[Path, str]) -> None:
if self.get_chat_history:
raise ValueError("Chain not savable when `get_chat_history` is not None.")
super().save(file_path)
class ConversationalRetrievalCodeChain(BaseConversationalRetrievalCodeChain):
"""Chain for chatting with an index."""
retriever: BaseRetriever
"""Index to connect to."""
max_tokens_limit: Optional[int] = None
"""If set, restricts the docs to return from store based on tokens, enforced only
for StuffDocumentChain"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(
self.combine_docs_chain, StuffDocumentsChain
):
tokens = [
self.combine_docs_chain.llm_chain.llm.get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
docs = self.retriever.get_relevant_documents(
question, callbacks=run_manager.get_child()
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
docs = await self.retriever.aget_relevant_documents(
question, callbacks=run_manager.get_child()
)
return self._reduce_tokens_below_limit(docs)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_CODE_PROMPT,
chain_type: str = "stuff",
verbose: bool = False,
condense_question_llm: Optional[BaseLanguageModel] = None,
missing_imports_llm: Optional[BaseLanguageModel] = None,
combine_docs_chain_kwargs: Optional[Dict] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalCodeChain:
"""Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {"prompt": PROMPT}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
verbose=verbose,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(
llm=_llm,
prompt=condense_question_prompt,
verbose=verbose,
callbacks=callbacks,
)
_llm_3 = missing_imports_llm or llm
missing_imports_chain = LLMChain(
llm=_llm_3, prompt=prompt_missing_imports_check
)
return cls(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
missing_imports_chain=missing_imports_chain,
callbacks=callbacks,
**kwargs,
)
| [] |
2024-01-10 | Gamma-Software/AppifyAi | generative_app~core~chains~doc_retriever.py | import shutil
import os
import sys
from typing import List
import streamlit as st
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import Language
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers.language import LanguageParser
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from chromadb.utils import embedding_functions
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import UnstructuredMarkdownLoader
import chromadb
import uuid
from chromadb.config import Settings
import tiktoken
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def num_tokens_from_string_list(docs: List[Document], encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = 0
max_tokens = 0
for doc in docs:
if len(encoding.encode(doc.page_content)) > max_tokens:
max_tokens = len(encoding.encode(doc.page_content))
num_tokens += len(encoding.encode(doc.page_content))
return num_tokens, max_tokens
def load_streamlit_doc_retriever(
openai_api_key: str,
chroma_server_host="localhost",
chroma_server_port="8000",
mode="docker",
) -> VectorStoreRetriever:
if openai_api_key is None:
raise Exception("Please provide an OpenAI API key.")
# Check if the Chroma database exists
if mode == "local":
if not os.path.exists(".doc_db/streamlit_chroma_db"):
raise Exception(
"The Chroma database for Streamlit does not exist. "
"Please run the script `doc_retriever.py` to create it."
)
# load from disk
if mode == "local":
retriever = Chroma(
persist_directory=".doc_db/streamlit_chroma_db",
embedding_function=OpenAIEmbeddings(openai_api_key=openai_api_key),
).as_retriever()
if mode == "docker":
client = chromadb.Client(
Settings(
chroma_api_impl="rest",
chroma_server_host=chroma_server_host,
chroma_server_http_port=chroma_server_port,
)
)
# tell LangChain to use our client and collection name
retriever = Chroma(
client=client,
collection_name="streamlit_doc",
embedding_function=OpenAIEmbeddings(openai_api_key=openai_api_key),
).as_retriever()
retriever.search_kwargs["distance_metric"] = "cos"
retriever.search_kwargs["fetch_k"] = 4
retriever.search_kwargs["maximal_marginal_relevance"] = True
retriever.search_kwargs["k"] = 4
return retriever
def is_docker_container_running(container_name: str) -> bool:
"""Returns True if the Docker container is running, False otherwise."""
if os.system(f"docker ps | grep {container_name} > /dev/null") == 0:
return True
print(
f"The Docker container {container_name} is not running."
"Please run `docker-compose up -d` to start it."
)
return False
def generate_retriever(
openai_api_key: str,
chroma_server_host="localhost",
chroma_server_port="8000",
mode="docker",
):
if mode == "local":
# Check if the Chroma database exists
if os.path.exists(".doc_db/streamlit_chroma_db"):
try:
choice = input(
"The Chroma database for Streamlit already exists. "
"Press Yes to delete it and create a new one. "
"Press Ctrl+C or enter anything else to cancel."
)
if choice in ["y", "Y", "yes", "Yes", "YES"]:
shutil.rmtree(".doc_db/streamlit_chroma_db")
else:
raise KeyboardInterrupt
except KeyboardInterrupt:
print("Cancelled.")
exit(0)
print("=== Clone the latest version of the Streamlit Doc repo...")
if os.path.exists("docs"):
shutil.rmtree("docs")
os.system(
"git clone https://github.com/streamlit/docs.git --branch main --depth 1 docs"
)
print(
"=== Create documents containing the Streamlit code base and split them into chunks..."
)
steamlit_doc_loader = DirectoryLoader(
"docs",
glob="content/**/*.md",
loader_cls=UnstructuredMarkdownLoader,
show_progress=True,
use_multithreading=True,
)
steamlit_doc = steamlit_doc_loader.load()
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size=2000,
chunk_overlap=50,
)
steamlit_doc_splitted = text_splitter.split_documents(steamlit_doc)
steamlit_doc_splitted
# Load python exemple source code
loader = GenericLoader.from_filesystem(
"docs/",
glob="python/api-examples-source/**/*.py",
suffixes=[".py"],
parser=LanguageParser(language=Language.PYTHON),
show_progress=True,
)
streamlit_code_example_doc = loader.load()
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=500, chunk_overlap=50
)
streamlit_code_example_doc_splitted = python_splitter.split_documents(
streamlit_code_example_doc
)
token, max_token = num_tokens_from_string(steamlit_doc_splitted, "cl100k_base")
print(
f"Number of tokens in source code: {token}, max "
f"tokens in source code: {max_token}, price: {token * 0.0001/1000}"
)
token, max_token = num_tokens_from_string(
streamlit_code_example_doc_splitted, "cl100k_base"
)
print(
f"Number of tokens in source code: {token}, max "
f"tokens in source code: {max_token}, price: {token * 0.0001/1000}"
)
print("=== Create embeddings and save them into Chroma Database for later use...")
embeddings_model = OpenAIEmbeddings(openai_api_key=openai_api_key)
docs = steamlit_doc_splitted + streamlit_code_example_doc_splitted
if mode == "local":
vectorstore = Chroma.from_documents(
documents=docs,
embedding=embeddings_model,
persist_directory=".doc_db/streamlit_chroma_db",
)
vectorstore.persist()
if mode == "docker":
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai_api_key, # Replace with your own OpenAI API key
model_name="text-embedding-ada-002",
)
client = chromadb.Client(
Settings(
chroma_api_impl="rest",
chroma_server_host=chroma_server_host,
chroma_server_http_port=chroma_server_port,
)
)
print(client.heartbeat())
client.reset() # resets the databas
collection = client.create_collection(
"streamlit_doc", embedding_function=openai_ef
)
for doc in docs:
collection.add(
ids=[str(uuid.uuid1())],
metadatas=doc.metadata,
documents=doc.page_content,
)
print("=== Remove Streamlit codesource.")
if os.path.exists("streamlit"):
shutil.rmtree("streamlit")
if __name__ == "__main__":
if len(sys.argv) < 3:
print("OpenAI API key is missing! Please add it in argument.")
print("Or you are missing the mode 'docker' or 'local'.")
exit(1)
openai_api_key = sys.argv[1]
generate_retriever(
openai_api_key,
mode=sys.argv[2],
chroma_server_host=st.secrets["chroma"]["host"],
chroma_server_port=st.secrets["chroma"]["port"],
)
| [] |
2024-01-10 | jingjingxinshang/dify | api~tasks~enable_segment_to_index_task.py | import datetime
import logging
import time
import click
from celery import shared_task
from langchain.schema import Document
from werkzeug.exceptions import NotFound
from core.index.index import IndexBuilder
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
@shared_task(queue='dataset')
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts([document], duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts([document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key)
| [] |
2024-01-10 | ezzye/bananabread | notes~function_calls~examples~vanilla.py | #!/usr/bin/env python
import langchain
import openai
import json
from requests_html import HTMLSession
import requests
# Environment Variables
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY', 'YourAPIKeyIfNotSet')
def get_lat_long_url(loc, language="en", form="json"):
place = loc.replace(" ", "+")
return f"https://geocoding-api.open-meteo.com/v1/search?name={place}&count=10&language={language}&format={form}"
def get_weather_url(lat, long):
return f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={long}&daily=temperature_2m_max,temperature_2m_min,sunrise,sunset,uv_index_max,precipitation_sum,rain_sum,showers_sum,precipitation_hours,precipitation_probability_max,windspeed_10m_max,windgusts_10m_max,winddirection_10m_dominant&windspeed_unit=mph&timezone=Europe%2FLondon"
def get_request(url, sesh=False):
headers = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate',
}
web_response = None # Initialize web_response to None
try:
if sesh:
session = HTMLSession()
web_response = session.get(url, headers=headers)
session.close()
else:
web_response = requests.get(url, headers=headers)
except requests.exceptions.RequestException as e:
print(e)
return web_response
def get_latitude_longitude(location):
"""Get the current lat and longitude for a given location name"""
url = get_lat_long_url(location)
lat_json = get_request(url).json()
return lat_json
def get_weather(lati, longi, sesh=False):
url = get_weather_url(lati, longi)
request_result = get_request(url)
return request_result.json()
def make_location_dict():
keys = ["name", "country", "admin3", "admin2", "admin1"]
args = eval(ai_response_message['function_call']['arguments'])
return {key: args.get(key, None) for key in keys}
def make_location_name_string(location_dict):
return ", ".join([v for k, v in location_dict.items() if v is not None])
# APIs for weather see https://open-meteo.com/en/docs eg
# https://api.open-meteo.com/v1/forecast?latitude=51.56246&longitude=-0.07401&daily=temperature_2m_max,temperature_2m_min,sunrise,sunset,uv_index_max,precipitation_sum,rain_sum,showers_sum,precipitation_hours,precipitation_probability_max,windspeed_10m_max,windgusts_10m_max,winddirection_10m_dominant&windspeed_unit=mph&timezone=Europe%2FLondon
# {
# "latitude": 51.56,
# "longitude": -0.08000016,
# "generationtime_ms": 0.9769201278686523,
# "utc_offset_seconds": 3600,
# "timezone": "Europe/London",
# "timezone_abbreviation": "BST",
# "elevation": 30.0,
# "daily_units": {
# "time": "iso8601",
# "temperature_2m_max": "°C",
# "temperature_2m_min": "°C",
# "sunrise": "iso8601",
# "sunset": "iso8601",
# "uv_index_max": "",
# "precipitation_sum": "mm",
# "rain_sum": "mm",
# "showers_sum": "mm",
# "precipitation_hours": "h",
# "precipitation_probability_max": "%",
# "windspeed_10m_max": "mp/h",
# "windgusts_10m_max": "mp/h",
# "winddirection_10m_dominant": "°"
# }
# for geo location see https://open-meteo.com/en/docs/geocoding-api
# eg https://geocoding-api.open-meteo.com/v1/search?name=Stoke+Newington&count=10&language=en&format=json
# {
# "results": [
# {
# "id": 2636843,
# "name": "Stoke Newington",
# "latitude": 51.56246,
# "longitude": -0.07401,
# "elevation": 28.0,
# "feature_code": "PPLX",
# "country_code": "GB",
# "admin1_id": 6269131,
# "admin2_id": 2648110,
# "admin3_id": 3333148,
# "timezone": "Europe/London",
# "country_id": 2635167,
# "country": "United Kingdom",
# "admin1": "England",
# "admin2": "Greater London",
# "admin3": "Hackney"
# },
# {
# "id": 2636844,
# ...
# }
# ],
# "generationtime_ms": 0.6699562
# }
def find_most_matching_dict(checking_dict, dict_list):
max_matches = 0
most_matching_dict = None
for d in dict_list:
matches = sum(k in checking_dict and checking_dict[k] == v for k, v in d.items())
if matches > max_matches:
max_matches = matches
most_matching_dict = d
if max_matches < 2:
print(f"max_matches {max_matches} not sure if this the location you want")
most_matching_dict = None
return most_matching_dict
function_descriptions = [
{
"name": "get_current_latitude_and_longitude",
"description": "Get the current latitude and longitude for a given location name",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The name of the location to get the latitude and longitude for. Eg 'Aston'",
},
"language": {
"type": "string",
"description": "ISO 639-1 code for the language to use. Eg 'en'",
},
"format": {
"type": "string",
"description": "format file extension Eg 'json' or 'csv",
},
"country": {
"type": "string",
"description": "name of the country in full NOT code to search in. Eg 'United Kingdom'. This can be inferred from country code e.g. 'GB' or 'UK'",
},
"admin1": {
"type": "string",
"description": "name of administration level 1 Eg 'England'. This can be inferred from admin2 e.g. 'West Midlands' OR can be inferred from admin3 e.g. 'Birmingham' OR can be inferred from country e.g. 'United Kingdom' as 'Birmingham' in 'England' in'United Kingdom'",
},
"admin2": {
"type": "string",
"description": "name of administration level 2 Eg 'West Midlands' This can be inferred from admin3 e.g. 'Birmingham' OR can be inferred from country e.g. 'United Kingdom' as 'Birmingham', in 'West Midlands', in 'United Kingdom'",
},
"admin3": {
"type": "string",
"description": "name of administration level 3 Eg 'Birmingham' This can be inferred from admin2 e.g. 'West Midlands' OR can be inferred from country e.g. 'United Kingdom' as 'Birmingham', in 'West Midlands', in 'United Kingdom'",
},
},
},
},
{
"name": "get_current_weather",
"description": "Get the current weather for a latitude and longitude.",
"parameters": {
"type": "object",
"properties": {
"latitude": {
"type": "number",
"description": "The latitude of the location to get the weather for. Eg 51.56246",
},
"longitude": {
"type": "number",
"description": "The longitude of the location to get the weather for. Eg -0.07401",
},
},
},
},
]
user_query = "What's the weather like in Dalston, Hackney, London UK?"
response = openai.ChatCompletion.create(
model="gpt-4-0613",
# This is the chat message from the user
messages=[{"role": "user", "content": user_query}],
functions=function_descriptions,
function_call="auto",
)
ai_response_message = response["choices"][0]["message"]
location_details_dict = make_location_dict()
latitude_longitude_json = get_latitude_longitude(location_details_dict['name'])
most_matching_result = find_most_matching_dict(location_details_dict, latitude_longitude_json['results'])
latitude = most_matching_result['latitude']
longitude = most_matching_result['longitude']
user_query = f"What's the weather like at lattitude {latitude} and longitude {longitude}?"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
# This is the chat message from the user
messages=[{"role": "user", "content": user_query}],
functions=function_descriptions,
function_call="auto",
)
ai_response_message2 = response["choices"][0]["message"]
latitude_num = eval(ai_response_message2['function_call']['arguments']).get("latitude")
longitude_num = eval(ai_response_message2['function_call']['arguments']).get("longitude")
weather_json = get_weather(latitude_num, longitude_num)
temperature_2m_max = weather_json['daily']['temperature_2m_max'][0]
temperature_2m_min = weather_json['daily']['temperature_2m_min'][0]
sunrise = weather_json['daily']['sunrise'][0]
sunset = weather_json['daily']['sunset'][0]
uv_index_max = weather_json['daily']['uv_index_max'][0]
precipitation_sum = weather_json['daily']['precipitation_sum'][0]
rain_sum = weather_json['daily']['rain_sum'][0]
showers_sum = weather_json['daily']['showers_sum'][0]
precipitation_hours = weather_json['daily']['precipitation_hours'][0]
precipitation_probability_max = weather_json['daily']['precipitation_probability_max'][0]
windspeed_10m_max = weather_json['daily']['windspeed_10m_max'][0]
print()
print()
print(f"The weather for: {make_location_name_string(location_details_dict)}")
print(f"latitude {latitude_num} longitude {longitude_num}")
print("-----------------------------------------------------------------------")
print(f"temperature_2m_max {temperature_2m_max}°C \ntemperature_2m_min {temperature_2m_min}°C \n\nsunrise {sunrise} \nsunset {sunset} \n\nuv_index_max {uv_index_max} \n\nprecipitation_sum {precipitation_sum}mm \nrain_sum {rain_sum}mm \nshowers_sum {showers_sum}mm \nprecipitation_hours {precipitation_hours}h \nprecipitation_probability_max {precipitation_probability_max}% \n\nwindspeed_10m_max {windspeed_10m_max} mp/h")
| [] |
2024-01-10 | faisal-saddique/LawyerAI | utilities~create_index.py | # Import required libraries
from langchain.document_loaders import (
PyMuPDFLoader, # For loading PDF files
DirectoryLoader, # For loading files from a directory
TextLoader, # For loading plain text files
Docx2txtLoader, # For loading DOCX files
UnstructuredPowerPointLoader, # For loading PPTX files
UnstructuredExcelLoader # For loading XLSX files
)
from langchain.document_loaders.csv_loader import CSVLoader # For loading CSV files
import os
from langchain.text_splitter import RecursiveCharacterTextSplitter # For splitting text into smaller chunks
from utils import (update_vectorstore_FAISS, update_vectorstore_PINECONE, convert_filename_to_key)
from dotenv import load_dotenv # For loading environment variables from .env file
load_dotenv()
# Replace with the name of the directory carrying your data
data_directory = "E:\\DESKTOP\\FreeLanceProjects\\muhammad_thaqib\\LawyerAI\\data"
# Load your documents from different sources
def get_documents():
# Create loaders for PDF, text, CSV, DOCX, PPTX, XLSX files in the specified directory
pdf_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.pdf", loader_cls=PyMuPDFLoader)
txt_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.txt", loader_cls=TextLoader)
csv_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.csv", loader_cls=CSVLoader)
docx_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.docx", loader_cls=Docx2txtLoader)
pptx_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.pptx", loader_cls=UnstructuredPowerPointLoader)
xlsx_loader = DirectoryLoader(f"./{data_directory}", glob="**/*.xlsx", loader_cls=UnstructuredExcelLoader)
# Initialize documents variable
docs = None
# Load files using the respective loaders
pdf_data = pdf_loader.load()
txt_data = txt_loader.load()
csv_data = csv_loader.load()
docx_data = docx_loader.load()
pptx_data = pptx_loader.load()
xlsx_data = xlsx_loader.load()
# Combine all loaded data into a single list
docs = pdf_data + txt_data + csv_data + docx_data + pptx_data + xlsx_data
# Return all loaded data
return docs
# Get the raw documents from different sources
raw_docs = get_documents()
# Split the documents into smaller chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2300, chunk_overlap=10)
docs = text_splitter.split_documents(raw_docs)
# Print the number of documents and characters in the first document
print(f'You have {len(docs)} document(s) in your data')
print(f'There are {len(docs[0].page_content)} characters in your first document')
for object in docs:
try:
object.metadata["filename_key"] = convert_filename_to_key(os.path.split(object.metadata['source'])[-1])
except Exception as oops:
print(f"Object causing error is: {object}")
update_vectorstore_PINECONE(docs=docs)
""" # Start the interactive loop to take user queries
while True:
query = input("Ask your query: ") # Take user input
# Perform similarity search in the vector database and get the most similar documents
docs = db.similarity_search(query, k=3)
ans = utils.get_answer(query=query,context=docs)
print(f"Answer: {ans}") """ | [] |
2024-01-10 | stakewalker/ClimaCast | clima.py | import requests
import json
from datetime import datetime
import openai
import env
def call_tele(msg):
requests.get('https://api.telegram.org/bot' + env.tl_key + '/sendMessage?chat_id=' + env.chat_id + '&parse_mode=Markdown&text=' + msg)
openai.api_key = env.openai_key
now = requests.get(f"https://apiadvisor.climatempo.com.br/api/v1/weather/locale/{env.city_id}/current?token={env.clima}").json()
all_temps = requests.get(f"http://apiadvisor.climatempo.com.br/api/v2/forecast/temperature/locale/{env.city_id}/hours/168?token={env.clima}").json()['temperatures']
# Select a few hours and parse data
temps = [[i['date'].split(" ")[1][:5], i['value']] for i in all_temps]
temps_table = [temps[i] for i in range(0,len(temps[:20]),4)]
prompt = f"""
WRITE YOUR PROMPT HERE
INSERT DATA {now} AND {temps_table}
TIP: USE CHATGPT TO GENERATE IT
"""
gpt_msg = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": prompt
}
],
temperature=1,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
message = gpt_msg['choices'][0]['message']['content']
#print(message) # Debug
call_tele(message) # Send message | [
"\nWRITE YOUR PROMPT HERE\nINSERT DATA PLACEHOLDER AND PLACEHOLDER\nTIP: USE CHATGPT TO GENERATE IT\n"
] |
2024-01-10 | ezzcodeezzlife/streamlit-cohere-boilerplate | myapp.py | import streamlit as st
import cohere
from dotenv import load_dotenv
import os
load_dotenv()
co = cohere.Client(os.getenv('COHERE_API_KEY'))
# Initialization
if 'output' not in st.session_state:
st.session_state['output'] = 'Output:'
def generate_hashtags(input):
if len(input) == 0:
return None
response = co.generate(
model='large',
prompt='Given a post, this program will generate relevant hashtags.\n\nPost: Why are there no country songs about software engineering\nHashtag: #softwareengineering #code \n--\nPost: Your soulmate is in the WeWork you decided not to go to\nHashtag: #wework #work \n--\nPost: If shes talking to you once a day im sorry bro thats not flirting that standup\nHashtag: #standup #funny \n--\nPost: {}\nHashtags:'.format(input),
max_tokens=20,
temperature=0.5,
k=0,
p=1,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=["--"],
return_likelihoods='NONE')
st.session_state['output'] = response.generations[0].text
st.balloons()
st.title('Hashtag Generator')
st.subheader('Boilerplate for Co:here, Streamlit, Streamlit Cloud')
st.write('''This is a simple **Streamlit** app that generates hashtags from a small Post title caption.''')
input = st.text_area('Enter your post title caption here', height=100)
st.button('Generate Hashtags', on_click = generate_hashtags(input))
st.write(st.session_state.output) | [] |
2024-01-10 | ddkang/aidb | config~legal_analysis.py | from aidb.config.config_types import AIDBListType
from aidb.inference.examples.detectron_local_ocr import DetectronLocalOCR
from aidb.inference.examples.openai_inference_service import OpenAIText
DB_URL = 'sqlite+aiosqlite://'
DB_NAME = 'aidb_test_legal.sqlite'
ocr = DetectronLocalOCR(
name="ocr",
model_path="tests/data/model_final.pth")
openai_gpt = OpenAIText(
name="openai_gpt",
token=None, # leave it None if you want AIDB to read token from env variable OPENAI_API_KEY. Otherwise replace None with your own token in str.
columns_to_input_keys=[('messages', AIDBListType(), 'content')],
response_keys_to_columns=[('choices', AIDBListType(), 'message', 'content')],
input_columns_types=[str],
output_columns_types=[str],
default_args={"model": "gpt-4-1106-preview",
('messages', AIDBListType(), 'role'): "user"},
prompt_prefix='"',
prompt_suffix='''"
The paragraph above is from a law document.
Classify whether or not the reasoning of the given paragraph in an opinion is based on the legal framework of textualism.
Only answer True or False. Do not provide any reason. Do not add \'.\' in the end.')''')
inference_engines = [
{
"service": ocr,
"input_col": ("pdf.path", "pdf.id"),
"output_col": ("ocr.text", "ocr.id")
},
{
"service": openai_gpt,
"input_col": ("ocr.text", "ocr.id"),
"output_col": ("textualism.label", "textualism.id")
}
]
blobs_csv_file = "tests/data/law_pdf.csv"
blob_table_name = "pdf"
blobs_keys_columns = ["id"]
"""
dictionary of table names to list of columns
"""
tables = {
"ocr": [
{"name": "id", "is_primary_key": True, "refers_to": ("pdf", "id"), "dtype": int},
{"name": "text", "dtype": str}
],
"textualism": [
{"name": "id", "is_primary_key": True, "refers_to": ("ocr", "id"), "dtype": int},
{"name": "label", "dtype": str}
]
}
| [] |
2024-01-10 | bkane2/habitual-response-generation | src~gpt_interface.py | import backoff
import openai
from openai.error import RateLimitError, Timeout, ServiceUnavailableError, APIConnectionError, APIError
from transformers import GPT2Tokenizer
from util import *
openai.api_key = read_file('_keys/openai.txt')
TOKENIZER = GPT2Tokenizer.from_pretrained("gpt2")
MODEL_COSTS = {
'text-davinci-003' : 0.02,
'gpt-3.5-turbo' : 0.002
}
AVG_TOKENS_PER_CHAR = 0.25
@backoff.on_exception(backoff.expo, (RateLimitError, Timeout, ServiceUnavailableError, APIConnectionError, APIError))
def generate_instruct_gpt(model, prompt, suffix=None, stop=None, max_tokens=256):
response = openai.Completion.create(
model=model,
prompt=prompt,
suffix=suffix,
max_tokens=max_tokens,
stop=stop
)
return response
def cost_instruct_gpt(model, prompt, avg_resp_len, suffix=None, stop=None, max_tokens=256, tokenizer=TOKENIZER):
tokens = tokenizer(prompt)['input_ids']
n_tokens = len(tokens)
if suffix:
n_tokens += len(tokenizer(suffix)['input_ids'])
n_tokens += AVG_TOKENS_PER_CHAR * min(avg_resp_len, max_tokens)
return (MODEL_COSTS[model] / 1000) * n_tokens
@backoff.on_exception(backoff.expo, (RateLimitError, Timeout, ServiceUnavailableError, APIConnectionError, APIError))
def generate_chat_gpt(model, prompt, preamble=None, examples=[], stop=None, max_tokens=2048):
messages=[]
if preamble:
messages.append({"role": "system", "content": preamble})
for example in examples:
messages.append({"role": "user", "content": example[0]})
messages.append({"role": "assistant", "content": example[1]})
messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model=model,
messages=messages,
stop=stop,
max_tokens=max_tokens
)
return response
def cost_chat_gpt(model, prompt, avg_resp_len, preamble=None, examples=[], stop=None, max_tokens=1024, tokenizer=TOKENIZER):
n_tokens = 0
if preamble:
n_tokens += len(tokenizer(preamble)['input_ids'])
for example in examples:
n_tokens += len(tokenizer(example[0])['input_ids'])
n_tokens += len(tokenizer(example[1])['input_ids'])
n_tokens += len(tokenizer(prompt)['input_ids'])
n_tokens += AVG_TOKENS_PER_CHAR * min(avg_resp_len, max_tokens)
return (MODEL_COSTS[model] / 1000) * n_tokens | [] |
2024-01-10 | colbynoel/auto-dubber | backend~code~auto-dubber-openai.py | import audio_video_script as avs
from datetime import datetime
# import google.cloud.translate
import elevenlabs
from google.cloud import translate
import html
import openai
import os
import subprocess
TODAYS_DATE = datetime.now().date()
GOOGLES_MAX_TOKENS = 500000
def get_video_name(video_file: str):
video_name = video_file.split("/")[-1].split(".")[0]
print(f"get_video_name[video_name: {video_name}]")
return video_name
def update_request_limit():
request_limit_tracker = "./code/rate_limits_openai.txt"
requests_made_str = ""
requests_made = 0
try:
with open(request_limit_tracker, "r") as file:
requests_made_str = file.read().strip()
except FileNotFoundError:
print("File Not Found. Cannot update request limit")
requests_made = int(requests_made_str) + 1
with open(request_limit_tracker, "w") as file:
file.write(str(requests_made))
return requests_made
def check_request_limit(requests_made: int):
return True if requests_made < 200 else False
class AudioFileTooLong(Exception):
def __init__(self, message="File too large to transcribe. File size must be less than 25 MB"):
self.message = message
super().__init__(self.message)
def transcribe_audio_whisperai(audio_file_path: str):
with open(audio_file_path, "rb") as audio_file:
transcription = openai.Audio.transcribe(model="whisper-1", file=audio_file, response_format="srt")
return transcription
def write_transcription_to_file(srt_transcription: str, video_name: str):
file_path = f"./transcriptions/{TODAYS_DATE}_{video_name}_transcription.srt"
with open(file_path, "w") as file:
file.write(srt_transcription)
return file_path
def turn_srt_file_to_paragraph(srt_file):
try:
with open(srt_file, "r", encoding="utf-8") as file:
lines = file.readlines()
paragraph = ""
is_subtitle_block = False
for line in lines:
line = line.strip() # Check for blank line
if not line:
continue
elif line.isdigit(): # Check if line number
continue
elif "-->" in line: # Check if time sequence
continue
else: # Subtitle line
paragraph += f"{line} | "
return paragraph.strip()
except FileNotFoundError:
return "SRT file not found."
def update_token_tracker(transcribed_paragraph: str):
tokens_transcribed = len(transcribed_paragraph)
google_translate_api_tracker = "./token_tracker/google_translation_api.txt"
with open(google_translate_api_tracker, "r") as file:
tokens_transcribed_so_far = file.read().strip()
total_tokens_transcribed = int(tokens_transcribed_so_far) + tokens_transcribed
with open(google_translate_api_tracker, "w") as file:
file.write(str(total_tokens_transcribed))
return total_tokens_transcribed
class GoogleTokenLimitReached(Exception):
def __init__(self, message="Google's Translate API has reached maxed token. Please try again later."):
self.message = message
super().__init__(self.message)
def write_translation_to_file(translated_text: str, video_name: str):
file_path = f"./translated_text/{video_name}_translation.txt"
with open(file_path, 'w', encoding="utf-8") as file:
file.write(translated_text)
return file_path
def google_translate_basic(text: str, target_language_code: str):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./credentials/autodubber-f5336cc011ff.json"
from google.cloud import translate_v2 as translate
translate_client = translate.Client()
if isinstance(text, bytes):
text = text.decode("utf-8")
split_text = text.split("|")
translated_segments = []
for line in split_text:
translation = translate_client.translate(line, target_language=target_language_code)
translated_segments.append(translation["translatedText"].strip())
# result = translate_client.translate(text, target_language=target_language_code)
# translation = result["translatedText"]
translated_text = " | ".join(translated_segments)
# Doing this so thing like "No voy a cruzar el Pacífico" won't be in the translation
decoded_text = html.unescape(translated_text)
return decoded_text
def get_dubbed_audio(translated_text: str, video_name: str):
filename = f"./translated_audio/{video_name}.wav"
elevenlabs_api_key = avs.get_api_credentials("./credentials/elevenlabs_creds.txt")
elevenlabs.set_api_key(elevenlabs_api_key)
audio = elevenlabs.generate(
text=translated_text,
voice="Ryan", # Need to changle later to allow user to pick voice
model="eleven_multilingual_v2"
)
elevenlabs.save(audio=audio, filename=filename)
def make_srt_translation(srt_file: str, translated_text: str):
srt_file_list = srt_file.split("/")
filename_split = srt_file.split("_")
filename_split[-1] = "translation.srt"
out_file = "_".join(filename_split)
print(f"out_file: {out_file}")
try:
with open(srt_file, "r", encoding="utf-8") as file:
lines = file.readlines()
translated_text_split = translated_text.split("|")
translated_text_split = [line.strip() for line in translated_text_split]
print(f"translated_text_split: {translated_text_split}")
with open(out_file, "w", encoding="utf-8") as file:
index = 0
for line in lines:
line = line.strip()
if not line:
continue
elif line.isdigit():
file.write(f"{line}\n")
elif "-->" in line:
file.write(f"{line}\n")
else:
file.write(f"{translated_text_split[index]}\n\n")
index += 1
# if index < len(translated_text_split):
# file.write(f"{translated_text_split[index]}\n\n")
# index += 1
# else:
# print(f"Warning: No corresponding translation found for subtitle line {index + 1}")
return out_file
except FileNotFoundError:
return "SRT file not found."
def add_subtitles_to_video(video_file, translated_srt_file, output_file):
command = [
"ffmpeg", # Runs FFmpeg software
"-i", video_file, # Input file
"-vf", f"subtitles={translated_srt_file}", # Apply video filter w/ specified SRT file
output_file
]
subprocess.run(command)
def main():
API_KEY_OPENAI = avs.get_api_credentials("./credentials/openai_credentials.txt")
print(f"API_KEY: {API_KEY_OPENAI}")
openai.api_key = API_KEY_OPENAI
MAX_MB_FOR_VIDEO = 25
video_file = "./video/BadFriendsPod.mp4"
video_name = get_video_name(video_file)
print(f"main[video_name: {video_name}]")
todays_date = avs.TODAYS_DATE
requests_made_so_far = update_request_limit()
is_within_limits = check_request_limit(requests_made_so_far)
if is_within_limits:
audio_file = avs.video_to_audio(video_file)
# audio_file = F"./audio/output_audio_{TODAYS_DATE}.wav"
# print(f"main[audio_file: {audio_file}]")
file_size = os.path.getsize(audio_file) / (1024 * 1024)
print(f"audio_file size: {file_size} MB\n\n")
if file_size > MAX_MB_FOR_VIDEO:
raise AudioFileTooLong
transcription = transcribe_audio_whisperai(audio_file) # UNCOMMENT LATER
srt_file = write_transcription_to_file(transcription, video_name)
srt_paragraph = turn_srt_file_to_paragraph(srt_file)
print(f"srt_paragraph: {srt_paragraph}\n\n")
total_tokens_transcribed = update_token_tracker(srt_paragraph)
print(f"total_tokens_transcribed: {total_tokens_transcribed}\n\n")
if total_tokens_transcribed >= GOOGLES_MAX_TOKENS:
raise GoogleTokenLimitReached
translation = google_translate_basic(srt_paragraph, "es-419") # es-419 translates to Latin American Spanish (Not Spain)
print(f"translation: {translation}")
translated_file = write_translation_to_file(translation, video_name)
# get_dubbed_audio(translation, video_name)
# srt_file_path = "./transcriptions/2023-09-25_CoachPrime_transcription.srt"
with open(translated_file, "r", encoding="utf-8") as file:
translated_text = file.read()
# print(f"translated_text: {translated_text}")
translated_srt_file = make_srt_translation(srt_file, translated_text)
subtitled_video = f"./video/{video_name}_translated_subtitles.mp4"
add_subtitles_to_video(video_file, translated_srt_file, subtitled_video)
if __name__ == "__main__":
main()
# Issue: Google translate is moving the pipe around, which then throws off the translation and the
# subtitles. Thinking about using open AI to get the translation. Might be the easiest move.
| [] |
2024-01-10 | john-adeojo/ai_travel_agent | src~run_chains.py | """
This module provides functionality to get arguments for flight search from user query and
to find flights using a language model and a SQL database toolkit.
"""
import json
import openai
from langchain.tools import tool
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.llms.openai import OpenAI
from langchain.agents.agent_types import AgentType
def get_args(query_user, OPENAI_KEY):
"""
Extracts necessary parameters for flight search from user query using OpenAI API.
This function takes a user query and an OpenAI key as inputs, sends the user query to
OpenAI API to extract necessary parameters including the number of adults, departure date,
return date, destination location code, and origin location code for flight search.
Parameters:
query_user (str): User query to be sent to OpenAI API.
openai_key (str): OpenAI key to authenticate with the API.
Returns:
num_adults (int): Number of adults for the flight.
departureDate (str): Departure date in the format YYYY-MM-DD.
returnDate (str): Return date in the format YYYY-MM-DD.
destinationLocationCode (str): IATA code for the destination location.
originLocationCode (str): IATA code for the origin location.
"""
function_call = [
{
"name": "search_for_flights",
"description": "Requests flight data from Amadeus API and writes to SQLite database",
"parameters": {
"type": "object",
"properties": {
"num_adults":{
"type":"integer",
"description": '''Based on the query, respond with the number of adults'''
},
"departureDate": {
"type":"string",
"description": '''Based on the query, respond with the Departure Date. Dates are specified in the ISO 8601 YYYY-MM-DD format. '''
},
"returnDate": {
"type":"string",
"description": '''Based on the query, respond with the Return Date. Dates are specified in the ISO 8601 YYYY-MM-DD format. '''
},
"destinationLocationCode":{
"type":"string",
"description": '''Based on the query, respond with an airport IATA code from the city which the traveler is going. E.g CDG for Charles de Gaulle Airport'''
},
"originLocationCode": {
"type": "string",
"description": '''Based on the query, respond with an airport IATA code from the city which the traveler will depart from. E.g CDG for Charles de Gaulle Airport'''
},
"TypeofflightReuqest": {
"type": "string",
"description": '''Based on the query, respond with the type of flight the user is requesting E.g cheapest, shortest, fastest, least stops etc.'''
},
},
"required": ["destinationLocationCode", "originLocationCode", "departureDate", "returnDate", "num_adults", "TypeofflightReuqest"]
}
}
]
openai.api_key = OPENAI_KEY
message = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[{"role": "user", "content": query_user}],
functions = function_call,
function_call = 'auto',
temperature=0
)
response_message = message["choices"][0]["message"]["function_call"]["arguments"]
parsed_data = json.loads(response_message)
# Accessing variables
num_adults = parsed_data['num_adults']
departureDate = parsed_data['departureDate']
returnDate = parsed_data['returnDate']
destinationLocationCode = parsed_data['destinationLocationCode']
originLocationCode = parsed_data['originLocationCode']
TypeofflightReuqest = parsed_data['TypeofflightReuqest']
print("Number of Adults: ", num_adults)
print("Departure Date: ", departureDate)
print("Return Date: ", returnDate)
print("Destination Location Code: ", destinationLocationCode)
print("Origin Location Code: ", originLocationCode)
print("Origin Location Code: ", TypeofflightReuqest)
return num_adults, departureDate, returnDate, destinationLocationCode, originLocationCode, TypeofflightReuqest
# run SQLDatabase chain
def find_flights(query, llm, db):
"""
Executes a search for flights using a language model and a SQL database toolkit.
Parameters:
query (str): The query to be executed, typically a natural language description of the flights to find.
llm (LanguageModel): The language model used to process the query and generate SQL commands.
db (Database): The database object where the flight data is stored and from which data will be retrieved.
Returns:
Response: The response from the agent executor's run method, typically containing the search results or an error message.
"""
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
return agent_executor.run(query) | [] |
2024-01-10 | john-adeojo/ai_travel_agent | src~front_end.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from run_chains import get_args, find_flights
from search_flights import pull_flights
from utils import SingletonToken, query_template
st.markdown(
"""
#### Prototype Built by [Data-Centric Solutions](https://www.data-centric-solutions.com/)
""",
unsafe_allow_html=True,
)
# Side panel for OpenAI token input
st.sidebar.title("Configuration")
OPENAI_KEY = st.sidebar.text_input("Enter OpenAI Key", type="password")
# Initialize an empty placeholder
placeholder = st.empty()
if OPENAI_KEY:
SingletonToken.set_token(OPENAI_KEY)
OPENAI_KEY = SingletonToken.get_token()
# If OpenAI key and data_url are set, enable the chat interface
st.title("Find my flights🛫 ")
query_user = placeholder.text_input("Search for flights...")
if st.button("Submit"):
try:
num_adults, departureDate, returnDate, destinationLocationCode, originLocationCode, TypeofflightReuqest = get_args(query_user, OPENAI_KEY)
except Exception:
st.write("Please make sure you tell us the origin, destination, departure and return dates, and number of adults")
db = pull_flights(originLocationCode, destinationLocationCode, departureDate, returnDate, num_adults)
llm = ChatOpenAI(temperature=0, model="gpt-4-0613", openai_api_key=OPENAI_KEY)
query = query_template(num_adults, departureDate, returnDate, destinationLocationCode, originLocationCode, TypeofflightReuqest)
response = find_flights(query, llm, db)
st.markdown(f"Here's your suggested Journey: : {response}")
else:
# If OpenAI key and data_url are not set, show a message
placeholder.markdown(
"""
**Please enter your OpenAI key and data URL in the sidebar.**
Follow this [link](https://www.howtogeek.com/885918/how-to-get-an-openai-api-key/) to get your OpenAI API key.
""",
unsafe_allow_html=True,
) | [] |
2024-01-10 | john-adeojo/ai_travel_agent | src~create_database.py | """
This module provides functionality to load data into a
sqlite database stored in memory
"""
from sqlalchemy import create_engine
import pandas as pd
from langchain import SQLDatabase
def load_data(journey_pricing, flights):
"""
Loads flight and journey pricing data into an in-memory SQLite database.
This function creates an in-memory SQLite database, loads flight and journey pricing data
into this database, and returns an SQLDatabase object that serves as the interface to this database.
Parameters:
journey_pricing (pd.DataFrame): A DataFrame containing journey pricing data.
flights (pd.DataFrame): A DataFrame containing flight data.
Returns:
db (SQLDatabase): An SQLDatabase object that serves as the interface to the SQLite database.
"""
engine = create_engine('sqlite:///:memory:')
# Write the data to the SQLite database
flights.to_sql('flights', engine, if_exists='replace', index=False)
journey_pricing.to_sql('journey_pricing', engine, if_exists='replace', index=False)
# Check if the data was loaded correctly
df_loaded = pd.read_sql('SELECT * FROM flights', engine)
db = SQLDatabase(engine)
return db | [] |
2024-01-10 | Wangyibo321/GPTuner | src~knowledge_handler~knowledge_transformation.py | import openai
import re
import psutil
import textwrap
import json
import os
import random
import tiktoken
from collections import Counter
import time
class KGTrans:
def __init__(self, api_base, api_key, db="postgres", model="gpt-4"):
self.db = db
self.api_base = api_base
self.api_key = api_key
self.model = model
self.knob_path = f"./knowledge_collection/{self.db}"
self.knob_num = 0
self.money = 0
self.token = 0
self.total_time = 0
self.cur_token = 0
self.cur_money = 0
self.cur_time = time.time()
self.__connect()
self._define_path()
def __connect(self):
openai.api_base = self.api_base
openai.api_key = self.api_key
def _define_path(self):
self.knob_info_path = os.path.join(self.knob_path, "knob_info/system_view.json")
self.summary_path = os.path.join(self.knob_path, "tuning_lake")
self.skill_json_path = os.path.join(self.knob_path, "structured_knowledge/normal/")
self.max_path = os.path.join(self.knob_path, "structured_knowledge/max/")
self.official_path = os.path.join(self.knob_path, "/knob_info/official_document.json")
self.special_path = os.path.join(self.knob_path, "structured_knowledge/special/")
def get_answer(self, prompt):
response = openai.ChatCompletion.create(
model=self.model,
messages = [{
"role": "user",
"content": prompt
}],
n=1,
stop=None,
temperature=0
)
return response.choices[0].message["content"].strip()
def calc_token(self, in_text, out_text=""):
enc = tiktoken.encoding_for_model("gpt-4")
return len(enc.encode(out_text+in_text))
def calc_money(self, in_text, out_text):
"""money for gpt4"""
return (self.calc_token(in_text) * 0.03 + self.calc_token(out_text) * 0.06) / 1000
def remove_html_tags(self, text):
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
def extract_json_from_text(self, text):
json_pattern = r'\{[^{}]*\}'
match = re.search(json_pattern, text)
if match:
try:
json_data = json.loads(match.group())
return json_data
except json.JSONDecodeError:
return None
else:
return None
def get_hardware_info(self):
available_cpu_cores = psutil.cpu_count(logical=False)
memory = psutil.virtual_memory()
total_memory = memory.total
total_memory = total_memory / (1024 * 1024 * 1024)
root_disk = psutil.disk_usage('/')
total_disk_space = root_disk.total
total_disk_space = total_disk_space / (1024 * 1024 * 1024)
return available_cpu_cores, int(total_memory), int(total_disk_space)
def get_disk_type(self, device="sda"):
rotational_path = f'/sys/block/{device}/queue/rotational'
if os.path.exists(rotational_path):
with open(rotational_path, 'r') as file:
rotational_value = file.read().strip()
if rotational_value == '0':
return 'SSD'
elif rotational_value == '1':
return 'HDD'
else:
return 'Unknown'
else:
return 'Unknown'
def get_examples(self):
example_path = f"./example_pool/"
file_list = os.listdir(example_path)
# 使用random.sample()函数从文件列表中随机选择三个文件
random_examples_name = random.sample(file_list, 3)
random_examples = []
for i in range(3):
with open(os.path.join(example_path, random_examples_name[i]), "r") as file:
example = f"<example>\n{file.read()}\n<\example>"
random_examples.append(example)
return '\n'.join(random_examples)
def get_skill(self, knob):
cpu_cores, ram_size, disk_size = self.get_hardware_info()
disk_type = self.get_disk_type()
try:
with open(os.path.join(self.summary_path, knob+".txt"), 'r') as file:
summary = file.read()
except:
print(f"The tuning pool of {knob} is empty, generate the tuning pool first.")
raise
prompt = textwrap.dedent(f"""
Suppose you are an experienced DBA, and you are required to tune a knob of {self.db}.
TASK DESCRIPTION:
Given the knob name along with its suggestion and hardware information, your job is to offer three values that may lead to the best performance of the system and meet the hardware resource constraints. The three values you need to provide are 'suggested_values', 'min_values', and 'max_values'. If you can identify one or more exact discrete suggested values, treat them as 'suggested_values'. If the suggested values fall within a continuous interval, provide the 'min_value' and 'max_value' for that interval.
Note that the result you provide should be derived or inferred from the information provided. The result values should be numerical, and if a unit is needed, you can only choose from [KB, MB, GB, ms, s, min]; other units are not permitted.
The question you need to solve will be given in the HTML tag <question>, the suggested steps to follow to finish the job are in <step>, and some examples will be given in the <example> tag.
<step>
Step 1: Check if the suggestion provides values for the knob; if so, identify the relevant sentences and move to Step 2. If not, move to Step 2. Note that there may be several sentences you should try to find them all.
Step 2: Check if the suggestion recommends some values related to hardware information. If so, proceed to Step 3; if not, proceed to Step 4.
Step 3: Read the hardware information to figure out the hardware-relevent value(s); some easy computation may be required.
Step 4: Check whether the suggestion offers a specific recommended value or a recommended range for good performance or both of them. Note that sometimes the default value or the permitted value range of the knob is given, but these are not the recommended values for optimal DBMS performance, so ignore these values.
Step 5: If discrete suggested values are given, list them under 'suggested_values'.
Step 6: If a suggested range is given, set the upper and lower bounds of the range as the 'max_value' and 'min_value', respectively.
Step 7: Return the result in JSON format.
</step>
<EXAMPLES>
{self.get_examples()}
</EXAMPLES>
<question>
KNOB: {knob}
SUGGESTION: {summary}
HARDWARE INFORMATION: The machine running the dbms has a RAM of {ram_size} GB, a CPU of {cpu_cores} cores, and a {disk_size} GB {disk_type} drive.
JSON RESULT TEMPLATE:
{{
"suggested_values": [], // these should be exact values with a unit if needed (allowable units: KB, MB, GB, ms, s, min)
"min_value": null, // change it if there is a hint about the minimum value in SUGGESTIONS
"max_value": null // change it if there is a hint about the maximum value in SUGGESTIONS, it should be larger than min_value
}}
</question>
Let us think step by step and finally provide me with the result in JSON format. If no related information is provided in suggestions, just keep the result values at their default.
""")
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return answer
def vote(self, knob):
skill_json_files = os.listdir(self.skill_json_path)
if knob + ".txt" not in skill_json_files:
min_l = []
max_l = []
suggested_l = []
for i in range(5):
print(f"vote for {knob}, round {i}")
result_txt = self.get_skill(knob)
result_json = self.extract_json_from_text(result_txt)
suggested_values = result_json["suggested_values"]
min_value = result_json["min_value"]
max_value = result_json["max_value"]
min_l.append(min_value)
max_l.append(max_value)
suggested_l = suggested_l + suggested_values
skill_json = {}
min_counts = Counter(min_l)
max_counts = Counter(max_l)
suggested_counts = Counter(suggested_l)
sorted_min = sorted(min_counts.items(), key=lambda x: x[1], reverse=True)
sorted_max = sorted(max_counts.items(), key=lambda x: x[1], reverse=True)
sorted_suggested = sorted(suggested_counts.items(), key=lambda x: x[1], reverse=True)
print(f"Vote result for {knob}:")
print(sorted_min, sorted_max, sorted_suggested)
if len(sorted_min)!=0:
most_common_min, _ = sorted_min[0]
skill_json["min_value"] = most_common_min
else:
skill_json["min_value"] = None
if len(sorted_max)!=0:
most_common_max, _ = sorted_max[0]
skill_json["max_value"] = most_common_max
else:
skill_json["min_value"] = None
if len(sorted_suggested) != 0:
most_common_suggested_count = sorted_suggested[0][1]
most_common_suggested = [item[0] for item in sorted_suggested if item[1] == most_common_suggested_count]
skill_json["suggested_values"] = most_common_suggested
else:
skill_json["suggested_values"] = []
with open(os.path.join(self.skill_json_path, knob+".json"), 'w') as file:
json.dump(skill_json, file)
def classify_special_knob(self, knob_name):
if os.path.exists(self.official_path):
with open(self.official_path, 'r') as json_file:
data = json.load(json_file)
knob_list = data["params"]
description = None
for knob in knob_list:
if knob["name"] == knob_name:
description = self.remove_html_tags(knob["description"])
if description is None:
return None
prompt = textwrap.dedent(f"""
Database Management Systems (DBMS) have settings referred to as 'knobs'. Numerical knobs typically have a natural order. However, some 'special' numerical knobs have special values, such as -1 or 0, that break this natural order. When set to a special value, such knob performs a very different function compared to its regular operation, such as disabling a feature. Otherwise, it behaves like a regular numerical knob. Let us think step by step, please classify a knob as a 'special knob' based on its DESCRIPTION and provide the RESULT in JSON format.
KNOB:
{knob_name}
DESCRIPTION:
{description}
RESULT:
{{
"think_procedure": {{procedure}} // fill 'procedure' with your 'think step by step procedure'
"special_knob”: {{bool}}, // fill 'bool' with 'true' or 'false'
"special_value: {{value}} // fill 'value' with its special value if it is a special knob
}}
""")
else:
prompt = textwrap.dedent(f"""
Database Management Systems (DBMS) have settings referred to as 'knobs'. Numerical knobs typically have a natural order. However, some 'special' numerical knobs have special values, such as -1 or 0, that break this natural order. When set to a special value, such knob performs a very different function compared to its regular operation, such as disabling a feature. Otherwise, it behaves like a regular numerical knob. Let us think step by step, please classify a knob of {self.db}as a 'special knob' and provide the RESULT in JSON format.
KNOB:
{knob_name}
RESULT:
{{
"think_procedure": {{procedure}} // fill 'procedure' with your 'think step by step procedure'
"special_knob”: {{bool}}, // fill 'bool' with 'true' or 'false'
"special_value: {{value}} // fill 'value' with its special value if it is a special knob
}}
""")
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
print(f"prepare special skill for {knob_name}")
return answer
def prepare_special_skill(self, knob):
file_name = f"{knob}.json"
if file_name not in os.listdir(self.special_path):
result = self.classify_special_knob(knob)
if result is not None:
json_result = self.extract_json_from_text(result)
with open(f"{self.special_path}{file_name}", 'w') as file:
json.dump(json_result, file)
def mysql_provide_max(self, knob):
if os.path.exists(os.path.join(self.max_path, knob+".txt")):
return None
cpu_cores, ram_size, disk_size = self.get_hardware_info()
disk_type = self.get_disk_type()
with open(self.knob_info_path, 'r') as file:
knob_info = json.load(file)[knob]
upper_bound = knob_info.get("max_val")
prompt = textwrap.dedent(f"""
Database Management Systems (DBMS) have settings referred to as 'knobs'. There is always a legitimate range for a numerical knob. But for some knobs, the upper bound is too large, so that it is impossible to set such a large value in practice. Given a knob of mysql, your job is to judge whether the upper bound of this knob is too large, if so, offer your suggested upper bound according to your experience and the hardware information I provide. Your suggested upper bound cannot be larger than the upper bound of the knob and cannot be larger than '9,223,372,036,854,775,807'. If the knob is not numerical, return null.
KNOB:
{knob}
UPPER_BOUND:
{upper_bound}
HARDWARE INFORMATION: The machine running the dbms has a RAM of {ram_size} GB, a CPU of {cpu_cores} cores, and a {disk_size} GB {disk_type} drive.
Now think step by step and give me the suggested upper bound. The answer should either be a number or null. Just return the answer, do not provide other information.
""")
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
with open(os.path.join(self.max_path, knob+".txt"), 'w') as file:
file.write(answer)
return answer
def pipeline(self, knob):
print(f"begin to prepare structured knowledge for {knob}")
self.cur_time = time.time()
skill_json_files = os.listdir(self.skill_json_path)
if knob + ".json" not in skill_json_files:
self.vote(knob)
# Special
self.prepare_special_skill(knob)
# Since the upper bound of some knob in mysql is too big, ask gpt to propose an upper bound.
if self.db == "mysql":
self.mysql_provide_max(knob)
self.cur_time = time.time() - self.cur_time
self.total_time = self.total_time + self.cur_time
self.knob_num += 1
print(f"Finished to prepare structured knowledge for {knob}")
print(f"total token:{self.token}, total money:{self.money}, total time: {self.total_time}, knob num: {self.knob_num}")
print(f"ave token: {self.token/self.knob_num}, ave money:{self.money/self.knob_num}, ave time:{self.total_time/self.knob_num},")
| [
"\n Database Management Systems (DBMS) have settings referred to as 'knobs'. There is always a legitimate range for a numerical knob. But for some knobs, the upper bound is too large, so that it is impossible to set such a large value in practice. Given a knob of mysql, your job is to judge whether the upper bound of this knob is too large, if so, offer your suggested upper bound according to your experience and the hardware information I provide. Your suggested upper bound cannot be larger than the upper bound of the knob and cannot be larger than '9,223,372,036,854,775,807'. If the knob is not numerical, return null. \n \n KNOB: \n PLACEHOLDER\n UPPER_BOUND:\n PLACEHOLDER\n HARDWARE INFORMATION: The machine running the dbms has a RAM of PLACEHOLDER GB, a CPU of PLACEHOLDER cores, and a PLACEHOLDER GB PLACEHOLDER drive.\n\n Now think step by step and give me the suggested upper bound. The answer should either be a number or null. Just return the answer, do not provide other information.\n ",
"\n Database Management Systems (DBMS) have settings referred to as 'knobs'. Numerical knobs typically have a natural order. However, some 'special' numerical knobs have special values, such as -1 or 0, that break this natural order. When set to a special value, such knob performs a very different function compared to its regular operation, such as disabling a feature. Otherwise, it behaves like a regular numerical knob. Let us think step by step, please classify a knob as a 'special knob' based on its DESCRIPTION and provide the RESULT in JSON format. \n KNOB: \n PLACEHOLDER\n DESCRIPTION: \n PLACEHOLDER\n RESULT: \n {\n \"think_procedure\": {procedure} // fill 'procedure' with your 'think step by step procedure'\n \"special_knob”: {bool}, // fill 'bool' with 'true' or 'false' \n \"special_value: {value} // fill 'value' with its special value if it is a special knob\n }\n "
] |
2024-01-10 | Wangyibo321/GPTuner | src~knowledge_handler~knowledge_preparation.py | import openai
import re
import psutil
import textwrap
import json
import os
import tiktoken
import time
class KGPre:
def __init__(self, api_base, api_key, db="postgres", model="gpt-4"):
self.db = db
self.api_base = api_base
self.api_key = api_key
self.model = model
self.knob_path = f"./knowledge_collection/{self.db}"
self.knob_num = 0
self.money = 0
self.token = 0
self.total_time = 0
self.cur_token = 0
self.cur_money = 0
self.cur_time = time.time()
self.__connect()
self._define_path()
def __connect(self):
openai.api_base = self.api_base
openai.api_key = self.api_key
def _define_path(self):
self.knob_info_path = f"./knowledge_collection/{self.db}/knob_info/system_view.json"
self.gpt_path = f"./knowledge_collection/{self.db}/knowledge_sources/gpt"
self.web_path = f"./knowledge_collection/{self.db}/knowledge_sources/web"
self.manual_path = f"./knowledge_collection/{self.db}/knowledge_sources/manual"
self.summary_path = f"./knowledge_collection/{self.db}/tuning_lake"
self.official_path = f"./knowledge_collection/{self.db}/knob_info/official_document.json"
def get_answer(self, prompt):
response = openai.ChatCompletion.create(
model=self.model,
messages = [{
"role": "user",
"content": prompt
}],
n=1,
stop=None,
temperature=0
)
return response.choices[0].message["content"].strip()
def calc_token(self, in_text, out_text=""):
enc = tiktoken.encoding_for_model("gpt-4")
return len(enc.encode(out_text+in_text))
def calc_money(self, in_text, out_text):
"""money for gpt4"""
return (self.calc_token(in_text) * 0.03 + self.calc_token(out_text) * 0.06) / 1000
def remove_html_tags(self, text):
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
def extract_json_from_text(self, text):
json_pattern = r'\{[^{}]*\}'
match = re.search(json_pattern, text)
if match:
try:
json_data = json.loads(match.group())
return json_data
except json.JSONDecodeError:
return None
else:
return None
def get_hardware_info(self):
available_cpu_cores = psutil.cpu_count(logical=False)
memory = psutil.virtual_memory()
total_memory = memory.total
total_memory = total_memory / (1024 * 1024 * 1024)
root_disk = psutil.disk_usage('/')
total_disk_space = root_disk.total
total_disk_space = total_disk_space / (1024 * 1024 * 1024)
return available_cpu_cores, int(total_memory), int(total_disk_space)
def get_disk_type(self, device="sda"):
rotational_path = f'/sys/block/{device}/queue/rotational'
if os.path.exists(rotational_path):
with open(rotational_path, 'r') as file:
rotational_value = file.read().strip()
if rotational_value == '0':
return 'SSD'
elif rotational_value == '1':
return 'HDD'
else:
return 'Unknown'
else:
return 'Unknown'
def get_suggestions_from_gpt(self, knob_name):
suggestions_prompt = textwrap.dedent(f"""
There are many useful manuals to guide the knob tuning process. For knob '{knob_name}' in {self.db}, summerize the way to set the value for it in a sentence. This sentence should be associated with concrete numbers as more detailed information if needed.
""")
suggestions = self.get_answer(suggestions_prompt)
self.token += self.calc_token(suggestions_prompt, suggestions)
self.money += self.calc_money(suggestions_prompt, suggestions)
return suggestions
def get_suggestions_from_manual(self, knob_name):
if not os.path.exists(f"./knowledge_collection/{self.db}/knob_info/official_document.json"):
return None
with open(self.official_path, 'r') as json_file:
data = json.load(json_file)
knob_list = data["params"]
description = None
for knob in knob_list:
if knob["name"] == knob_name:
description = self.remove_html_tags(knob["description"])
if description:
summerize_prompt = textwrap.dedent(f"""
Summerize the description for knob '{knob_name}' in a sentence. This sentence should be associated with concrete numbers as more detailed information if needed.
DESCRIPTION:
{description}
SENTECNCE:
""")
answer = self.get_answer(summerize_prompt)
self.token += self.calc_token(summerize_prompt, answer)
self.money += self.calc_money(summerize_prompt, answer)
return answer
else:
return None
def prepare_knowledge(self, knob_name):
knowledge_path = os.path.join(self.knob_path, "knowledge_sources")
file_name = f"{knob_name}.txt"
if file_name not in os.listdir(os.path.join(knowledge_path, "gpt")):
print(f"Preparing knowledge from gpt for knob: {knob_name}")
gpt_suggestions = self.get_suggestions_from_gpt(knob_name)
with open(os.path.join(knowledge_path, "gpt", file_name), "w") as file:
file.write(gpt_suggestions)
if file_name not in os.listdir(os.path.join(knowledge_path, "manual")):
print(f"Preparing knowledge from manual for knob: {knob_name}")
manual_suggestions = self.get_suggestions_from_manual(knob_name)
if manual_suggestions:
with open(os.path.join(knowledge_path, "gpt", file_name), "w") as file:
file.write(manual_suggestions)
def prune_suggestion(self, official_doc, gpt_suggestion, web_suggestion):
prompt = textwrap.dedent(f"""
I first give you information of a knob of {self.db} which is extracted from the official document in json format, this offers the constraints of the value of each knob. Then I offer you two suggestions for this knob from GPT and WEB, judge whether each suggestion satisfies the constraints of the offcial document. If there is a contradiction between certain suggestion and the official document, remove the contradictory part. If there is not a contradiction, return the original suggestion.
Step 1: Read the OFFICIAL_DOC especially the "max_val", "min_val" and "unit". Figure out the actual min_value and max_value. Note that sometimes "min_val and "max_val" are not the actual min_value and max_value, they need to be computed considering "unit" which is the actual unit of the "max_val", "min_val", "reset_val".
Step 2: Figure out if the suggestions contain any numerical value that is illegal according to the OFFICIAL_DOC, unit conversion may be required in the process. If so, remove the illegal values and the relevant information, rewrite the corresponding suggestion.
Step 3: Return your answer in json format.
OFFICIAL_DOC:
{official_doc}
GPT_SUGGESTION:
{gpt_suggestion}
WEB_SUGGESTION:
{web_suggestion}
Now think step by step, and give me the result in json format.:
{{
"gpt_suggestion": null , // if there is a contradiction, remove the contradictory part, else return the corresponding original suggestion.
"web_suggestion": null // if there is a contradiction, remove the contradictory part, else return the corresponding original suggestion.
}}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return self.extract_json_from_text(answer)
def prune_contradiction(self, suggestions_json):
prompt = textwrap.dedent(f"""
I will give you three suggestions for tuning a knob of {self.db}. Your job is to find contradictions between the given suggestions. If there is contradictory information between certain suggestions, especially the contradictions of values, keep the information provided by the higher-priority suggestion and only remove the contradictory information provided by the lower-priority suggestion. Do not remove the other information. The priority is defined in sequence as "manual_suggestion, web_suggestion, gpt_suggestion" from higher to lower. So manual_suggestion should not be changed. If there is contradiction within the same suggestion, keep it. Try to make your summary encapsulates information from the three suggestions as much as possible except from the contradictory parts.
THREE SUGGESTIONS:
{suggestions_json}
Now let's think step by step, and give me the result in legal json format.:
{{
"gpt_suggestion": null, // if the original provided suggestion is empty, return null, else return the corresponding answer.
"web_suggestion": null, // if the original provided suggestion is empty, return null, else return the corresponding answer.
"manual_suggestion": null // if the original provided suggestion is empty, return null, else return the origional manual_suggestion.
}}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return self.extract_json_from_text(answer)
def prune_default(self, official_doc, suggestions_json):
prompt = textwrap.dedent(f"""
I offer you three suggestions for tuning a knob of {self.db} derived from GPT, web and manual. Your job is to identify whether each suggestion contains information which state the legal range of the knob witch is the same as the OFFICIAL_DOC and remove it. If you find this kind of information, rewrite the suggestion so that it does not include this information about "min_val" and "max_val" in the OFFICIAL_DOC, but it should contain all the other information included in the corresponding original information especially some suggested values or ranges. You need to read the OFFICIAL_DOC to figure out if the suggestion includes these values which exists in the official document implicitly, unit conversion may be considered in this process.
I need you to return the three suggestions in the same json format.
Step 1: Read the OFFICIAL_DOC especially the "max_val", "min_val" and "unit". Figure out the actual min_value, max_value. Note that sometimes "min_val and "max_val" are not the actual min_value and max_value, they need to be computed considering "unit" which is the actual unit of the "max_val", "min_val".
Step 2: Figure out if the suggestions contain any numerical value that is the same as one of your computed min_value and max_value in Step 2. If so, remove them.
Step 3: Rewrite the suggestion so that it does not include any information about "min_val" and "max_val", but it should contain all the other information included in the corresponding original information especially some suggested values or ranges.
Step 4: Return your three suggestions in the same json format.
OFFICIAL_DOC:
{official_doc}
THREE SUGGESTIONS:
{suggestions_json}
Now let's think step by step and give me the result in legal json format:
{{
"gpt_suggestion": null , // if the original suggestion is empty, return null, else return the corresponding answer.
"web_suggestion": null, // if the original suggestion is empty, return null, else return the corresponding answer.
"manual_suggestion": null // if the original suggestion is empty, return null, else return the corresponding answer.
}}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return self.extract_json_from_text(answer)
def greedy_summarize(self, suggestions_json):
prompt = textwrap.dedent(f"""
Summarize the three suggestions provided in the JSON format below into a single comprehensive suggestion. Try to make your summary encapsulates information from the three suggestions as much as possible. If there is contradictory information between certain suggestions, keep the information provided by the higher-priority suggestion and remove the information provided by the lower-priority suggestion. The priority is defined in sequence as "manual_suggestion, web_suggestion, gpt_suggestion" from higher to lower. Your response should also be structured as a suggestion. Now let's think step by step and give me the answer.
THREE SUGGESTIONS:
{suggestions_json}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return answer
def check_summary(self, summary, suggestions_json):
prompt = textwrap.dedent(f"""
Decide if the following summary is consistent with corresponding suggestions which are provided in json format. Note that consistency means all information in the summary is supported by the suggestions. There should not be any contradiction in the summary, especially the contradictions of values. Your answer should either be "No" or "Yes".
Suggestions:{suggestions_json}
Summary:{summary}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return answer
def revise_summarize(self, suggestions_json, summary):
prompt = textwrap.dedent(f"""
Given three suggestions provided in the JSON format below, you should summarize them into a single comprehensive suggestion. I will also provide you a improper summary suggestion which may be inconsistent with the three suggestions.You should identify the problem in the improper summary and resummarize the three suggestions into a single comprehensive suggestion which encapsulates all the information from the three suggestions. If there is conflicting information between certain suggestions, keep the information provided by the higher-priority suggestion and ignore the information provided by the lower-priority suggestion. The priority is defined in sequence as "manual_suggestion, web_suggestion, gpt_suggestion" from higher to lower. Your response should also be structured as a suggestion. Now let's think step by step and give me the answer.
Note that you should just give me your summarized suggestion only. Do not provide me other information.
THREE SUGGESTIONS: {suggestions_json}
IMPROPER SUMMARY SUGGESTION: {summary}
"""
)
answer = self.get_answer(prompt)
self.token += self.calc_token(prompt, answer)
self.money += self.calc_money(prompt, answer)
return answer
def pipeline(self, knob):
print(f"begin to prepare the tuning pool for {knob}")
self.cur_time = time.time()
with open(self.knob_info_path) as json_file:
knob_info = json.load(json_file)
summary_files = os.listdir(self.summary_path)
self.prepare_knowledge(knob)
gpt_suggestion, web_suggestion, manual_suggestion = None, None, None
try:
with open(os.path.join(self.gpt_path, knob+".txt"), 'r') as file:
gpt_suggestion = file.read()
except:
pass
try:
with open(os.path.join(self.web_path, knob+".txt"), 'r') as file:
web_suggestion = file.readline()
except:
pass
try:
with open(os.path.join(self.manual_path, knob+".txt"), 'r') as file:
manual_suggestion = file.readline()
except:
manual_suggestion
pass
if knob + ".txt" not in summary_files:
sources_json = self.prune_suggestion(knob_info[knob], gpt_suggestion, web_suggestion)
sources_json["manual_suggestion"] = manual_suggestion
sources_json = self.prune_contradiction(sources_json)
sources_json = self.prune_default(knob_info[knob], sources_json)
sources_json = sources_json
summary = self.greedy_summarize(sources_json)
print(f"SUMMARY:{summary}")
check = self.check_summary(summary, sources_json)
i = 1 # 防止死循环
while check=="No":
summary = self.revise_summarize(sources_json, summary)
check = self.check_summary(summary, sources_json)
print(f"RESUMMARY:{summary}")
i += 1
if i >= 3:
break
with open(os.path.join(self.summary_path, knob+".txt"), 'w') as file:
file.write(summary)
self.cur_time = time.time() - self.cur_time
self.total_time = self.total_time + self.cur_time
self.knob_num += 1
print(f"Finished to prepare knowledge source for {knob}")
print(f"accumulated token:{self.token}, accumulated money:{self.money}, accumulated time: {self.total_time}, accumulated knob num: {self.knob_num}")
print(f"ave token: {self.token/self.knob_num}, ave money:{self.money/self.knob_num}, ave time:{self.total_time/self.knob_num},") | [
"\n Given three suggestions provided in the JSON format below, you should summarize them into a single comprehensive suggestion. I will also provide you a improper summary suggestion which may be inconsistent with the three suggestions.You should identify the problem in the improper summary and resummarize the three suggestions into a single comprehensive suggestion which encapsulates all the information from the three suggestions. If there is conflicting information between certain suggestions, keep the information provided by the higher-priority suggestion and ignore the information provided by the lower-priority suggestion. The priority is defined in sequence as \"manual_suggestion, web_suggestion, gpt_suggestion\" from higher to lower. Your response should also be structured as a suggestion. Now let's think step by step and give me the answer.\n Note that you should just give me your summarized suggestion only. Do not provide me other information.\n THREE SUGGESTIONS: PLACEHOLDER\n IMPROPER SUMMARY SUGGESTION: PLACEHOLDER\n ",
"\n Decide if the following summary is consistent with corresponding suggestions which are provided in json format. Note that consistency means all information in the summary is supported by the suggestions. There should not be any contradiction in the summary, especially the contradictions of values. Your answer should either be \"No\" or \"Yes\".\n Suggestions:PLACEHOLDER\n Summary:PLACEHOLDER\n ",
"\n Summerize the description for knob 'PLACEHOLDER' in a sentence. This sentence should be associated with concrete numbers as more detailed information if needed.\n DESCRIPTION:\n PLACEHOLDER\n SENTECNCE:\n ",
"\n Summarize the three suggestions provided in the JSON format below into a single comprehensive suggestion. Try to make your summary encapsulates information from the three suggestions as much as possible. If there is contradictory information between certain suggestions, keep the information provided by the higher-priority suggestion and remove the information provided by the lower-priority suggestion. The priority is defined in sequence as \"manual_suggestion, web_suggestion, gpt_suggestion\" from higher to lower. Your response should also be structured as a suggestion. Now let's think step by step and give me the answer.\n THREE SUGGESTIONS:\n PLACEHOLDER\n "
] |
2024-01-10 | cpankow/excesspower-utils | bin~ligolw_inj_snr | #!/usr/bin/env python
import sys
import os
import bisect
from collections import defaultdict
from optparse import OptionParser
import itertools
import hashlib
import numpy
import scipy.optimize
from glue.ligolw import lsctables, table, utils, ligolw, ilwd
table.use_in(ligolw.LIGOLWContentHandler)
lsctables.use_in(ligolw.LIGOLWContentHandler)
from glue import lal as gluelal
from glue.ligolw.utils import process
from pylal import antenna
#from pylal import coherent_inspiral_metric as metric
#from pylal import coherent_inspiral_metric_detector_details as details
from pylal.series import read_psd_xmldoc, LIGOLWContentHandler
import lal
import lalburst
import lalsimulation
import lalmetaio
import lalinspiral
#
# Utility functions
#
def copy_sim_inspiral( row ):
"""
Turn a lsctables.SimInspiral into a SWIG wrapped lalburst.SimInspiral
"""
swigrow = lalmetaio.SimInspiralTable()
for simattr in lsctables.SimInspiralTable.validcolumns.keys():
if simattr in ["waveform", "source", "numrel_data", "taper"]:
# unicode -> char* doesn't work
setattr( swigrow, simattr, str(getattr(row, simattr)) )
else:
setattr( swigrow, simattr, getattr(row, simattr) )
# FIXME: This doesn't get copied properly, and so is done manually here.
swigrow.geocent_end_time = lal.LIGOTimeGPS(row.geocent_end_time, row.geocent_end_time_ns)
return swigrow
def copy_sim_inspiral_swig( swigrow, table ):
"""
Turn a SWIG wrapped lalburst.SimInspiral into a lsctables.SimInspiral
"""
row = table.RowType()
for simattr in lsctables.SimInspiralTable.validcolumns.keys():
if simattr in ["waveform", "source", "numrel_data", "taper"]:
# unicode -> char* doesn't work
setattr( row, simattr, str(getattr(swigrow, simattr)) )
else:
setattr( row, simattr, getattr(swigrow, simattr) )
return row
def copy_sim_burst_swig( swigrow, table ):
"""
Turn a SWIG wrapped lalburst.SimBurst into a lsctables.SimBurst
"""
row = table.RowType()
row.hrss = swigrow.hrss
row.time_geocent_gps = gluelal.LIGOTimeGPS(float(swigrow.time_geocent_gps)).seconds
row.time_geocent_gps_ns = gluelal.LIGOTimeGPS(float(swigrow.time_geocent_gps)).nanoseconds
row.psi = swigrow.psi
row.amplitude = swigrow.amplitude or 0
row.egw_over_rsquared = swigrow.egw_over_rsquared or 0
row.waveform_number = swigrow.waveform_number or 0
row.pol_ellipse_angle = swigrow.pol_ellipse_angle or 0.0
row.simulation_id = ilwd.ilwdchar("sim_burst:simulation_id:%d" % swigrow.simulation_id)
row.q = swigrow.q
row.waveform = swigrow.waveform
row.bandwidth = swigrow.bandwidth
row.process_id = ilwd.ilwdchar("process:process_id:%d" % swigrow.process_id)
row.time_slide_id = ilwd.ilwdchar("time_slide:time_slide_id:0")
row.frequency = swigrow.frequency
row.ra = swigrow.ra
row.time_geocent_gmst = swigrow.time_geocent_gmst
row.pol_ellipse_e = swigrow.pol_ellipse_e
row.duration = swigrow.duration
row.dec = swigrow.dec
return row
def copy_sim_burst( row ):
"""
Turn a lsctables.SimBurst into a SWIG wrapped lalburst.SimBurst
"""
swigrow = lalburst.CreateSimBurst()
swigrow.hrss = row.hrss or 0
time = lal.LIGOTimeGPS( row.time_geocent_gps, row.time_geocent_gps_ns )
swigrow.time_geocent_gps = time
swigrow.psi = row.psi
swigrow.amplitude = row.amplitude or 0
swigrow.egw_over_rsquared = row.egw_over_rsquared or 0
swigrow.waveform_number = row.waveform_number or 0
swigrow.pol_ellipse_angle = row.pol_ellipse_angle or 0.0
swigrow.simulation_id = int(row.simulation_id)
swigrow.q = row.q or 0.0
swigrow.waveform = str(row.waveform)
swigrow.bandwidth = row.bandwidth or 0.0
swigrow.process_id = int(row.process_id)
swigrow.frequency = row.frequency or 0.0
swigrow.ra = row.ra
swigrow.time_geocent_gmst = row.time_geocent_gmst or 0
swigrow.pol_ellipse_e = row.pol_ellipse_e or 0.0
swigrow.duration = row.duration or 0.0
swigrow.dec = row.dec
return swigrow
def parse_psd_file( filestr ):
"""
Map the user-provided PSD file string into a function to be called as PSD(f).
"""
try:
xmldoc = utils.load_filename(filestr, contenthandler=LIGOLWContentHandler)
psd = read_psd_xmldoc(xmldoc).values()[0]
f = numpy.arange(0, len(psd.data)*psd.deltaF, psd.deltaF)
psd = psd.data
except:
f, psd = numpy.loadtxt( filestr, unpack=True )
def anon_interp( fvals ):
return numpy.interp( fvals, f, psd )
return anon_interp
def parse_psd_func( funcstr ):
"""
Map the user-provided PSD function string to its equivalent function in lalsimulation.
"""
try:
return getattr( lalsimulation, funcstr )
except AttributeError:
raise AttributeError( "Could not find PSD function %s in lalsimulation" % funcstr )
def sim_to_sngl_table(sim_type, table_type):
column_over = list(set(table_type.validcolumns) & set(sim_type.validcolumns))
if sim_type == lsctables.SimInspiralTable:
return lsctables.New(table_type, column_over + ["search", "channel", "eff_distance", "ifo", "event_id", "end_time", "end_time_ns", "snr"])
elif sim_type == lsctables.SimBurstTable:
return lsctables.New(table_type, column_over + ["search", "channel", "central_freq", "ifo", "event_id", "start_time", "start_time_ns", "peak_time", "peak_time_ns", "snr"])
def sim_to_sngl_insp(sim, ifo, sngl_table):
"""
Take the information in sim and transfer overlapping information to a SnglInspiral row, thus appended to sngl_table.
"""
if "SimInspiral" in str(sim): # Don't ask
column_over = list(set(sngl_table.validcolumns) & set(lsctables.SimInspiralTable.validcolumns))
else:
column_over = list(set(sngl_table.validcolumns) & set(lsctables.SimBurstTable.validcolumns))
sngl_row = sngl_table.RowType()
for col in column_over:
setattr(sngl_row, col, getattr(sim, col))
sngl_row.set_end( gluelal.LIGOTimeGPS(
int(getattr(sim, "%s_end_time" % ifo[0].lower())),
getattr(sim, "%s_end_time_ns" % ifo[0].lower()) ) )
sngl_row.ifo = ifo
sngl_row.search = "fake"
sngl_row.channel = "fake"
sngl_row.process_id = ilwd.ilwdchar("process:process_id:0")
if isinstance(sim, lsctables.SimInspiral):
sngl_row.eff_distance = getattr(row, "eff_dist_%s" % ifo[0].lower())
sngl_table.append(sngl_row)
return sngl_row
def sim_to_sngl_burst(sim, ifo, sngl_table):
"""
Take the information in sim and transfer overlapping information to a SnglBurst row, thus appended to sngl_table.
"""
if "SimInspiral" in str(sim): # Don't ask
column_over = list(set(sngl_table.validcolumns) & set(lsctables.SimInspiralTable.validcolumns))
else:
column_over = list(set(sngl_table.validcolumns) & set(lsctables.SimBurstTable.validcolumns))
sngl_row = sngl_table.RowType()
for col in column_over:
setattr(sngl_row, col, getattr(sim, col))
sngl_row.set_peak(gluelal.LIGOTimeGPS( sim.time_geocent_gps.gpsSeconds, sim.time_geocent_gps.gpsNanoSeconds))
if sim.duration is not None:
# Likely BTLWNB
sngl_row.set_start(sngl_row.get_peak() - sim.duration/2.0)
else:
# Likely CSG
sngl_row.set_start(sngl_row.get_peak() - sim.q/(numpy.sqrt(2)*numpy.pi*sim.frequency)/2.0)
if sim.bandwidth is not None:
sngl_row.central_freq = sim.frequency + sim.bandwidth/2.0
else:
sngl_row.central_freq = sim.frequency
sngl_row.ifo = ifo
sngl_row.search = "fake"
sngl_row.channel = "fake"
sngl_row.process_id = ilwd.ilwdchar("process:process_id:0")
sngl_table.append(sngl_row)
return sngl_row
def hoff_single_ifo_snr(hf, psd=None, f_low=0, f_high=None):
"""
Determine the single IFO SNR of a frequency domain waveform hf, with a given psd. If no psd is supplied (None), then return the hrss. Integration will take place over f_low (0 by default) to f_high (likely nyquist).
"""
hrss = hf.data.data.conj() * hf.data.data
if psd is None:
return sqrt(hrss.sum()*hf.deltaF)
# Determine limits of integration
#print "Low frequency of integration %f" % int_f_low_bin
int_f_low_bin = int(numpy.round(f_low/hf.deltaF))
#print "High frequency of integration %f" % int_f_high_bin
if f_high is None:
# FIXME: Assumes the hf, psd have the same deltaF and f0
int_f_high_bin = min(len(hf.data.data), len(psd.data.data))
else:
int_f_high_bin = int(numpy.round(f_high/hf.deltaF))
# divide and sum for SNR
nw_ip = ( hrss / psd )[int_f_low_bin:int_f_high_bin]
power = 4 * numpy.real(sum(nw_ip)) * hf.deltaF
return numpy.sqrt(power)
def rescale_injection_uniform_snr(inj_row, snr, snr_scale):
"""
Reassign the injection SNR to be a random variate in the interval (tuple) provided in snr_scale. See rescale_injection_snr for details on how this is accomplished. Returns the new SNR chosen.
"""
scale_abs = abs(snr_scale[1] - snr_scale[0])
new_snr = numpy.random.random() * scale_abs + snr_scale[0]
rescale_injection_snr(inj_row, snr, new_snr)
return new_snr
def rescale_injection_normal_snr(inj_row, snr, mean, std):
"""
Reassign the injection SNR to be a random variate drawn from a normal distribution centered on mean with a given variance. See rescale_injection_snr for details on how this is accomplished. Returns the new SNR chosen.
"""
new_snr = numpy.random.normal(mean, std)
rescale_injection_snr(inj_row, snr, new_snr)
return new_snr
def rescale_injection_snr(inj_row, snr, new_snr):
"""
Rescale a parameter in the inj_row to match a new_snr value. The value of the new_snr argument will scale the injection to the provided value.
"""
snr_ratio = new_snr / snr
if isinstance(inj_row, lsctables.SimInspiral) or isinstance(inj_row, lalmetaio.SimInspiralTable):
inj_row.distance /= snr_ratio
elif inj_row.waveform == "BTLWNB":
inj_row.egw_over_rsquared *= snr_ratio**2
inj_row.hrss *= snr_ratio
elif inj_row.waveform == "SineGaussian":
inj_row.hrss *= snr_ratio
elif inj_row.waveform == "StringCusp":
# CHECK: Is a string cusp injection SNR directly proportional to its
# amplitude?
inj_row.ampltude *= snr_ratio
elif inj_row.waveform == "Impulse":
inj_row.ampltude *= snr_ratio
inj_row.hrss *= snr_ratio
else:
if hasattr(inj_row, "waveform"):
raise ValueError("Don't know how to scale burst waveform %s" % inj_row.waveform)
else:
raise ValueError("Don't know how to scale %s" % str(type(inj_row)))
#
# Efficiency calculation routines
#
def loglogistic( x, alpha, beta ):
"""
The log-logistic CDF distribution with parameters alpha (scale), beta (dispersion).
"""
return 1.0/(1+(x/alpha)**(-beta))
def loglogisticinv( y, alpha, beta ):
"""
The inverse of the log-logistic CDF distribution.
"""
return alpha*(1.0/y - 1)**(-1.0/beta)
def get_eff_params( xbins, ybins, alpha0=5e-24, beta0=1 ):
"""
Fit the data to a loglogistic CDF curve to get the parameters for efficiency values.
"""
params, corr = scipy.optimize.curve_fit( loglogistic, xbins, ybins, (alpha0, beta0) )
return params
def determine_10_50_90_eff( params ):
"""
Use fit parameters to invert the efficiency function and figure out the statistic values at 10%, 50%, and 90%.
"""
a, b = params
return (loglogisticinv(0.1, a, b), loglogisticinv(0.5, a, b), loglogisticinv(0.9, a, b))
def rescale_number( x, orig_intv, new_intv ):
"""
Rescale a number x which lines within the original interval orig_intv such that it occupies the same ``rank'' within the new interval new_intv.
TODO: Allow for a two-sided interval such that the rescaling is about the mean.
"""
oint = orig_intv[1] - orig_intv[0]
nint = new_intv[1] - new_intv[0]
return ( (x - orig_intv[0])/oint * nint ) + new_intv[0]
optp = OptionParser()
optp.add_option("--det-psd-func", action="append", help="Set a detector to use in the network with the corresponding PSD function from lalsimulation. Example: --det-psd-func H1=SimNoisePSDaLIGOZeroDetHighPower. Can be set multiple times to add more detectors to the network.")
optp.add_option("--waveform-length", type=float, default=32.0, help="Length of waveform in seconds. Note that this affects the binning of the PSD. Default is 32 s")
optp.add_option("--nyquist-frequency", type=float, default=4096.0, help="Nyquist frequency of the PSD to generate. Default is 4096 Hz.")
optp.add_option("--low-frequency-cutoff", type=float, default=0.0, help="Frequency at which to start the integration. The max of the waveform f_lower or this option is used. Default is 0.")
optp.add_option("--high-frequency-cutoff", type=float, help="Frequency at which to end the integration. The min of the nyquist frequency or this option is used. Default is 0.")
optp.add_option("--snr-thresh", type=float, default=8.0, help="SNR threshold (network) to use for efficiency measurement. Default is 8.")
optp.add_option("--eff-attribute", default="hrss", help="SimBurst attribute against which to measure efficiency. Default is 'hrss', valid choices are any SimBurst attribute.")
optp.add_option("--eff-bins", default=20, type=int, help="Number of bins over which to measure efficiency. Default is 20.")
optp.add_option("--eff-rank", default="netSNR", help="How to rank injections for efficiency purposes. Valid choices are ``netSNR'' (network SNR), ``cohSNR'' (coherent SNR), and ``eta'' (cWB network amplitude). Default is netSNR.")
optp.add_option("--calculate-only", action="store_true", help="Do not perform any injection cutting or squeezing, calculate values only.")
optp.add_option("--machine-parse", action="store_true", help="Print in a machine parseable way.")
optp.add_option("--no-print-single-ifo", action="store_true", default=False, help="Don't print individual IFO SNRs.")
optp.add_option("--no-print-network", action="store_true", default=False, help="Don't print network SNRs.")
optp.add_option("--no-print-coherent", action="store_true", default=False, help="Don't print coherent SNRs.")
optp.add_option("--no-print-cwb", action="store_true", default=False, help="Don't print CWB ampltiudes.")
optp.add_option("--verbose", action="store_true", help="Be verbose.")
optp.add_option("--single-snr-cut-thresh", type=float, help="Cut any injection with a single detector SNR less than this.")
optp.add_option("--coherent-snr-cut-thresh", type=float, help="Cut any injection with a coherent SNR less than this.")
optp.add_option("--coherent-amplitude-cut-thresh", type=float, help="Cut any injection with a cWB coherent ampltiude (eta) less than this.")
optp.add_option("--sim-id", type=int, help="Only calculate for simulation_id N")
optp.add_option("--store-sngl", help="Store the values of the simulated event and SNR calculation in a sngl_* event row. Valid choices are sngl_inspiral, sngl_burst.")
optp.add_option("--store-sim", action="store_true", help="Store the value of the SNR calculation in a sim_burst event row in the column ``amplitude''. This is only valid for sim_bursts.")
optp.add_option("--squeeze-attribute", default="hrss", help="Squeeze this attribute for injections so as to make the more detectable. Default is 'hrss'. You might try 'distance' for SimInspirals.")
optp.add_option("--rescale-snr-uniform-min", type=float, help="Rescale all SNRs to be uniform deviates within range specified by this and --rescale-snr-uniform-max")
optp.add_option("--rescale-snr-uniform-max", type=float, help="Rescale all SNRs to be uniform deviates within range specified by this and --rescale-snr-uniform-min")
optp.add_option("--rescale-snr-normal-mean", type=float, help="Rescale all SNRs to be normal deviates with parameters specified by this and --rescale-snr-normal-stddev")
optp.add_option("--rescale-snr-normal-stddev", type=float, help="Rescale all SNRs to be normal deviates with parameters specified by this and --rescale-snr-normal-mean")
optp.add_option("--rescale-snr-network", action="store_true", help="Use network SNR to rescale the injection strength.")
opts, args = optp.parse_args()
#
# Sanity checks
#
uniform_scale, uniform_scale_network, norm_params, norm_params_network = None, None, None, None
if opts.rescale_snr_uniform_min and not opts.rescale_snr_uniform_max:
sys.exit("Uniform rescaling requested, but bounds not completely specified.")
elif opts.rescale_snr_network and opts.rescale_snr_uniform_min and opts.rescale_snr_uniform_max:
uniform_scale_network = (opts.rescale_snr_uniform_min, opts.rescale_snr_uniform_max)
elif opts.rescale_snr_uniform_min and opts.rescale_snr_uniform_max:
uniform_scale = (opts.rescale_snr_uniform_min, opts.rescale_snr_uniform_max)
if opts.rescale_snr_normal_mean and not opts.rescale_snr_normal_stddev:
sys.exit("Normal rescaling requested, but parameters not completely specified.")
elif opts.rescale_snr_network and opts.rescale_snr_normal_mean and opts.rescale_snr_normal_stddev:
norm_params_network = (opts.rescale_snr_normal_mean, opts.rescale_snr_normal_stddev)
elif opts.rescale_snr_normal_mean and opts.rescale_snr_normal_stddev:
norm_params = (opts.rescale_snr_normal_mean, opts.rescale_snr_normal_stddev)
if uniform_scale is not None and norm_params is not None:
sys.exit("Two rescaling strategy parameter sets requested, please choose either --rescale-snr-normal-* or --rescale-snr-uniform-*")
#
# Detector, PSD setup
#
detectors = {"H1": lal.CachedDetectors[lal.LHO_4K_DETECTOR],
"L1": lal.CachedDetectors[lal.LLO_4K_DETECTOR],
"V1": lal.CachedDetectors[lal.VIRGO_DETECTOR]}
uniq_det_comb = list( itertools.combinations( detectors.keys(), 2 ) )
# Default is R=8192, and data with a length of 32 seconds. Sorta excessive.
fnyq = opts.nyquist_frequency
deltaF = 1.0/opts.waveform_length
eff_attr = opts.eff_attribute
f = numpy.linspace(0, int(fnyq/deltaF), int(fnyq/deltaF)+1)*deltaF
detector_psds = {}
if opts.det_psd_func is None:
raise ArgumentError("Need at least one detector / PSD specified.")
for dspec in opts.det_psd_func:
det, psdfunc = dspec.split("=")
# Secondary mapping of psdfuncs to actual numerical arrays
if os.path.isfile( psdfunc ):
if opts.verbose:
print "Detector %s will use %s as the PSD" % (det, psdfunc)
detector_psds[det] = numpy.array(parse_psd_file( psdfunc )(f))
else:
if opts.verbose:
print "Looking up function %s in lalsimulation for use with %s as the PSD" % (psdfunc, det)
detector_psds[det] = numpy.array(map(parse_psd_func( psdfunc ), f))
# Set DC sensitivity to something sensible, since it often comes out nan
detector_psds[det][0] = detector_psds[det][1]
if len(detector_psds) == 0:
raise ArgumentError("Need at least one detector / PSD specified.")
for d in detectors.keys():
if d not in detector_psds.keys():
del detectors[d]
#
# Generate required FFT plans
#
# samples = rate * length
fwdlen = int(2.0*fnyq/deltaF)
fwdplan = lal.CreateForwardREAL8FFTPlan( fwdlen, 0 )
#
# Get XML document contents
#
xmldoc = utils.load_filename(args[0], contenthandler=ligolw.LIGOLWContentHandler)
#
# Initialize random seed
# This is done so as to ensure the same random SNRs are drawn each time for any
# given file
#
with open(args[0]) as xmlfile:
h = hashlib.new("md5", xmlfile.read())
numpy.random.seed(int(h.hexdigest()[:8], 16))
#
# Get sim tables
#
sims = []
sim_burst = []
try:
sim_burst = table.get_table( xmldoc, lsctables.SimBurstTable.tableName )
sims.extend( map(copy_sim_burst, sim_burst) )
except ValueError:
if opts.verbose:
print >>sys.stderr, "No SimBurst table found, skipping..."
sim_insp = []
try:
sim_insp = table.get_table( xmldoc, lsctables.SimInspiralTable.tableName )
sims.extend( map(copy_sim_inspiral, sim_insp) )
except ValueError:
if opts.verbose:
print >>sys.stderr, "No SimInspiral table found, skipping..."
snr_thresh = opts.snr_thresh
injected, efficiencies, count = defaultdict(list), defaultdict(list), defaultdict(lambda: 0)
# Store injection waveform temporarily
injection_waveforms = {}
# Store sims to cut
cut_sims = []
# Store realized SNRs in Sngl* rows
if opts.store_sngl == "sngl_inspiral":
sngl_table = sim_to_sngl_table(lsctables.SimInspiralTable, lsctables.SnglInspiralTable)
elif opts.store_sngl == "sngl_burst":
sngl_table = sim_to_sngl_table(lsctables.SimBurstTable, lsctables.SnglBurstTable)
sim_snr_table = lsctables.New(lsctables.SimBurstTable)
i = 0
for row in sims:
i += 1
snr = {}
if opts.sim_id is not None and opts.sim_id != int(row.simulation_id):
continue
for d, det in detectors.iteritems():
psd = detector_psds[d]
# inspiral case
if type(row) == lalmetaio.SimInspiralTable:
time = row.geocent_end_time
hp, hx = lalinspiral.SimInspiralChooseWaveformFromSimInspiral( row, 0.5/fnyq )
hp.epoch += time
hx.epoch += time
h = lalsimulation.SimDetectorStrainREAL8TimeSeries( hp, hx,
row.longitude, row.latitude, row.polarization, det )
ra, dec, psi = row.longitude, row.latitude, row.polarization
f_low_wave = max(opts.low_frequency_cutoff, row.f_lower)
# burst case
elif type(row) == lalmetaio.SimBurst:
time = row.time_geocent_gps
hp, hx = lalburst.GenerateSimBurst( row, 0.5/fnyq )
h = lalsimulation.SimDetectorStrainREAL8TimeSeries( hp, hx,
row.ra, row.dec, row.psi, det )
ra, dec, psi = row.ra, row.dec, row.psi
# FIXME: What's the f_lower for a burst? central_freq - 2*band?
# more over, that might not make sense for things like cosmic
# strings
f_low_wave = max(opts.low_frequency_cutoff, 0)
else:
raise TypeError("%s: It's not a burst, and it's not an inspiral... what do you want me to do here?" % type(row))
# zero pad
needed_samps = int(2.0*fnyq/deltaF)
prevlen = h.data.length
if h.data.length < needed_samps:
h = lal.ResizeREAL8TimeSeries( h, 0, needed_samps )
elif h.data.length > needed_samps:
h = lal.ResizeREAL8TimeSeries( h, h.data.length-needed_samps, needed_samps )
# Forward FFT
# adjust heterodyne frequency to match flow
hf = lal.CreateCOMPLEX16FrequencySeries(
name = "FD signal",
epoch = h.epoch,
f0 = 0,
deltaF = deltaF,
sampleUnits = lal.DimensionlessUnit,
length = h.data.length/2 + 1
)
lal.REAL8TimeFreqFFT(hf, h, fwdplan)
injection_waveforms[d] = hf
snr[d] = hoff_single_ifo_snr(hf, psd, f_low_wave, min(opts.high_frequency_cutoff or float("inf"), opts.nyquist_frequency))
#
# Reassign SNRs to be within a certain range
#
old_snr = snr[d]
if norm_params is not None:
snr[d] = rescale_injection_normal_snr(row, snr[d], norm_params[0], norm_params[1])
elif uniform_scale is not None:
snr[d] = rescale_injection_uniform_snr(row, snr[d], uniform_scale)
hf.data.data *= snr[d]/old_snr
#
# Print out some information
#
if opts.machine_parse and not opts.no_print_single_ifo:
print snr[d]
elif not opts.no_print_single_ifo:
print "Waveform %s at %10.3f has SNR in %s of %f" % (row.waveform, time, d, snr[d])
#
# Make 'sngl_' rows with the optimal SNR information
#
if opts.store_sngl == "sngl_inspiral":
sngl_row = sim_to_sngl_insp(row, det.frDetector.prefix, sngl_table)
sngl_row.snr = snr[d]
sngl_row.event_id = sngl_table.get_next_id()
elif opts.store_sngl == "sngl_burst":
sngl_row = sim_to_sngl_burst(row, det.frDetector.prefix, sngl_table)
sngl_row.snr = snr[d]**2 # SNR column is energy, really
sngl_row.event_id = sngl_table.get_next_id()
#
# Network SNR
#
net_snr = numpy.sqrt(sum([ s**2 for s in snr.values()]))
if opts.store_sim:
row.amplitude = net_snr
sim_snr_table.append(row)
if opts.machine_parse and not opts.no_print_network:
print "%f" % net_snr
elif not opts.no_print_network:
print "Network SNR for %s at %10.3f is %f" % (row.waveform, time, net_snr)
#
# Coherent SNR
#
time_delays = {}
responses = {}
d0 = "H1"
for d in detectors.keys():
time_delays[(d0,d)] = antenna.timeDelay( float(time), ra, dec, 'radians', d0, d )
responses = {}
# TODO: move this up to the PSD section, since it's only a function of frequency
S_f_coh = numpy.zeros( (2, len(f)) )
hp_coh = numpy.zeros( len(f), dtype=numpy.complex128 )
hx_coh = numpy.zeros( len(f), dtype=numpy.complex128 )
hp_coh_det = {}
hx_coh_det = {}
for d in detectors.keys():
responses[d] = antenna.response( float(time), ra, dec, 0, psi, 'radians', d )[:2]
# Creighton and Anderson eq. 7.137
S_f_coh += numpy.array([ numpy.conj(r)*r/detector_psds[d] for r in responses[d] ])
# Creighton and Anderson eq. 7.136a (numerator)
hp_coh_det[d] = injection_waveforms[d].data.data * numpy.exp( 2*numpy.pi * 1j * f * time_delays[(d0,d)] ) * responses[d][0] / detector_psds[d]
hp_coh += hp_coh_det[d]
# Creighton and Anderson eq. 7.136b (numerator)
hx_coh_det[d] = injection_waveforms[d].data.data * numpy.exp( 2*numpy.pi * 1j * f * time_delays[(d0,d)] ) * responses[d][1] / detector_psds[d]
hx_coh += hx_coh_det[d]
df = injection_waveforms[d].deltaF
# Creighton and Anderson eq. 7.136a,b (full)
hp_coh_f = hp_coh / S_f_coh[0]
hx_coh_f = hx_coh / S_f_coh[1]
# Creighton and Anderson eq. 7.138
coh_snr = 4 * numpy.real(sum(hp_coh_f.conj() * hp_coh_f * S_f_coh[0])) * df
coh_snr += 4 * numpy.real(sum(hx_coh_f.conj() * hx_coh_f * S_f_coh[1])) * df
if opts.machine_parse and not opts.no_print_coherent:
print "%g" % numpy.sqrt(coh_snr)
elif not opts.no_print_coherent:
print "Coherent SNR: %g" % numpy.sqrt(coh_snr)
coh_energy = 0
coh_energy_mat = {}
for ((d1, hf_d1), (d2, hf_d2)) in itertools.combinations( tuple(hp_coh_det.iteritems()), 2 ):
coh_energy_mat[(d1,d2)] = sum(hf_d1.conj() * hf_d2 / S_f_coh[0])*df
coh_energy += coh_energy_mat[(d1,d2)]
eta_est = numpy.sqrt(abs(coh_energy)/len(detectors.values()))
if opts.machine_parse and not opts.no_print_cwb:
print "%g" % eta_est
elif not opts.no_print_cwb:
print "Estimated cWB ranking statistic (1G) %g" % eta_est
#
# Reassign SNRs to be within a certain range -- network version
#
if norm_params_network is not None:
rescale_injection_normal_snr(row, net_snr, norm_params_network[0], norm_params_network[1])
elif uniform_scale_network is not None:
rescale_injection_uniform_snr(row, net_snr, uniform_scale_network)
if opts.single_snr_cut_thresh is not None and any(map(lambda s: s < opts.single_snr_cut_thresh, snr.values())):
cut_sims.append( (type(row), row.simulation_id ) )
continue
if opts.coherent_snr_cut_thresh is not None and coherent_snr < opts.coherent_snr_cut_thresh:
cut_sims.append( (type(row), row.simulation_id ) )
continue
if opts.coherent_amplitude_cut_thresh is not None and eta_est < opts.coherent_amplitude_cut_thresh:
cut_sims.append( (type(row), row.simulation_id ) )
continue
# TODO: Use different ranking statistic for efficiency
if net_snr > snr_thresh:
efficiencies[row.waveform].append( row )
injected[row.waveform].append( row )
count[row.waveform] += 1
#
# Output rescaled SNRs
#
if norm_params_network is not None or norm_params is not None or uniform_scale is not None:
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
if sim_burst:
simb_snr_table = lsctables.New(lsctables.SimBurstTable)
if sim_insp:
simi_snr_table = lsctables.New(lsctables.SimInspiralTable)
for sim in sims:
if isinstance(sim, lsctables.SimInspiral):
simi_snr_table.append(sim)
elif isinstance(sim, lalmetaio.SimInspiralTable):
simi_snr_table.append(copy_sim_inspiral_swig(sim, simi_snr_table))
elif isinstance(sim, lsctables.SimBurst):
simb_snr_table.append(sim)
elif isinstance(sim, lalmetaio.SimBurst):
simb_snr_table.append(copy_sim_burst_swig(sim, simb_snr_table))
if sim_burst and simb_snr_table:
xmldoc.childNodes[0].appendChild(simb_snr_table)
if sim_insp and simi_snr_table:
xmldoc.childNodes[0].appendChild(simi_snr_table)
utils.write_filename(xmldoc, "snr_rescaled.xml.gz", gz=True)
#
# Identify injections not passing the SNR thresholds
#
try:
sim_burst = table.get_table(xmldoc, lsctables.SimBurstTable.tableName)
sb_ids = map(lambda sb: sb[1], filter(lambda cut: cut[0] == lalmetaio.SimBurst, cut_sims))
print "%d burst injections would be cut from SNR thresholds." % len(sb_ids)
n = len(sim_burst)-1
for i, sb in enumerate(sim_burst[::-1]):
if int(sb.simulation_id) in sb_ids:
del sim_burst[n-i]
except ValueError:
pass
try:
sim_insp = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName)
si_ids = map(lambda sb: sb[1], filter(lambda cut: cut[0] == lalmetaio.SimInspiralTable, cut_sims))
print "%d inspiral injections would be cut from SNR thresholds." % len(si_ids)
n = len(sim_insp)-1
for i, si in enumerate(sim_insp[::-1]):
if int(si.simulation_id) in si_ids:
del sim_insp[n-i]
except ValueError:
pass
#
# Record "triggers" with optimal information
#
if opts.store_sngl is not None or opts.store_sim:
xmldoc = ligolw.Document()
xmldoc.childNodes.append(ligolw.LIGO_LW())
if opts.store_sngl is not None:
xmldoc.childNodes[0].appendChild(sngl_table)
ifos = reduce(str.__add__, [ifo[0] for ifo in detectors.keys()])
if opts.store_sim:
for i, row in enumerate(sim_snr_table):
sim_snr_table[i] = copy_sim_burst_swig(row, sim_snr_table)
xmldoc.childNodes[0].appendChild(sim_snr_table)
ifos = reduce(str.__add__, [ifo[0] for ifo in detector_psds.keys()])
process.register_to_xmldoc(xmldoc, sys.argv[0], opts.__dict__)
utils.write_filename(xmldoc, "%s-FAKE_SEARCH.xml.gz" % ifos, gz=True)
if opts.calculate_only:
exit(0)
if len(cut_sims) > 0:
print "Writing cut XML file."
utils.write_filename( xmldoc, "cut_tmp.xml" )
if opts.sim_id is not None:
exit()
from pylal import imr_utils
from pylal import rate
for wave, eff in efficiencies.iteritems():
ndet = len(eff)
if opts.verbose:
print "Report for waveform family %s" % wave
print "Efficiency: %d / %d = %f" % (ndet, count[wave], float(ndet)/count[wave])
# Number of injections for this type
inj = injected[wave]
#
# Measure efficiency
#
effbins = imr_utils.guess_nd_bins( inj, {eff_attr: (opts.eff_bins, rate.LogarithmicBins)} )
eff, err = imr_utils.compute_search_efficiency_in_bins( eff, inj, effbins, lambda sim: (getattr(sim, eff_attr),) )
#
# Fit efficiency to loglogistic curve
#
cents = eff.centres()[0]
for c, ef, er in zip(eff.centres()[0], eff.array, err.array):
if opts.verbose:
print "%2.1g %2.1f +/- %0.2f" % (c, ef, er)
try:
attr_scale = numpy.average(map(lambda sim: getattr(sim, eff_attr), inj))
a, b = get_eff_params( cents, eff.array, attr_scale )
except RuntimeError:
print >>sys.stderr, "Warning, couldn't fit efficiency, check bins. Skipping..."
continue
ten, fifty, ninety = determine_10_50_90_eff( (a,b) )
if opts.verbose:
print "10%% / 50%% / 90%% efficiency estimate: %2.2g %2.2g %2.2g" % (ten, fifty, ninety)
if opts.squeeze_attribute is not None:
#new_range = ( max(0, ninety - (ninety-ten)), ten + (ninety-ten) )
new_range = ( ten, ninety )
orig_range = []
for sim in (sim_insp or []) + (sim_burst or []):
if sim.waveform != wave:
continue
try:
orig_range.append( getattr( sim, opts.squeeze_attribute ) )
except:
# TODO: Warn?
pass
orig_range = sorted( orig_range )
orig_range = (orig_range[0], orig_range[-1])
if opts.verbose:
print "Squeeze distribution %s according to efficiency values. Will map %s -> %s" % (opts.squeeze_attribute, str(orig_range), str(new_range))
for sim in sim_burst:
if sim.waveform != wave:
continue
try:
setattr( sim, opts.squeeze_attribute, rescale_number( getattr( sim, opts.squeeze_attribute ), orig_range, new_range ) )
except AttributeError:
# TODO: Warn?
pass
for sim in sim_insp:
if sim.waveform != wave:
continue
try:
setattr( sim, opts.squeeze_attribute, rescale_number( getattr( sim, opts.squeeze_attribute ), orig_range, new_range ) )
except AttributeError:
# TODO: Warn?
pass
utils.write_filename( xmldoc, "squeeze_tmp.xml" )
| [] |
2024-01-10 | open2ai/dify | api~core~completion.py | from typing import Optional, List, Union
from langchain.callbacks import CallbackManager
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage
from core.constant import llm_constant
from core.callback_handler.llm_callback_handler import LLMCallbackHandler
from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
DifyStdOutCallbackHandler
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
from core.llm.error import LLMBadRequestError
from core.llm.llm_builder import LLMBuilder
from core.chain.main_chain_builder import MainChainBuilder
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
from core.memory.read_only_conversation_token_db_buffer_shared_memory import \
ReadOnlyConversationTokenDBBufferSharedMemory
from core.memory.read_only_conversation_token_db_string_buffer_shared_memory import \
ReadOnlyConversationTokenDBStringBufferSharedMemory
from core.prompt.prompt_builder import PromptBuilder
from core.prompt.prompt_template import OutLinePromptTemplate
from core.prompt.prompts import MORE_LIKE_THIS_GENERATE_PROMPT
from models.model import App, AppModelConfig, Account, Conversation, Message
class Completion:
@classmethod
def generate(cls, task_id: str, app: App, app_model_config: AppModelConfig, query: str, inputs: dict,
user: Account, conversation: Optional[Conversation], streaming: bool, is_override: bool = False):
"""
errors: ProviderTokenNotInitError
"""
cls.validate_query_tokens(app.tenant_id, app_model_config, query)
memory = None
if conversation:
# get memory of conversation (read-only)
memory = cls.get_memory_from_conversation(
tenant_id=app.tenant_id,
app_model_config=app_model_config,
conversation=conversation
)
inputs = conversation.inputs
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
conversation=conversation,
is_override=is_override,
inputs=inputs,
query=query,
streaming=streaming
)
# build main chain include agent
main_chain = MainChainBuilder.to_langchain_components(
tenant_id=app.tenant_id,
agent_mode=app_model_config.agent_mode_dict,
memory=ReadOnlyConversationTokenDBStringBufferSharedMemory(memory=memory) if memory else None,
conversation_message_task=conversation_message_task
)
chain_output = ''
if main_chain:
chain_output = main_chain.run(query)
# run the final llm
try:
cls.run_final_llm(
tenant_id=app.tenant_id,
mode=app.mode,
app_model_config=app_model_config,
query=query,
inputs=inputs,
chain_output=chain_output,
conversation_message_task=conversation_message_task,
memory=memory,
streaming=streaming
)
except ConversationTaskStoppedException:
return
@classmethod
def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
chain_output: str,
conversation_message_task: ConversationMessageTask,
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool):
final_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict,
streaming=streaming
)
# get llm prompt
prompt = cls.get_main_llm_prompt(
mode=mode,
llm=final_llm,
pre_prompt=app_model_config.pre_prompt,
query=query,
inputs=inputs,
chain_output=chain_output,
memory=memory
)
final_llm.callback_manager = cls.get_llm_callback_manager(final_llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=final_llm,
prompt=prompt,
mode=mode
)
response = final_llm.generate([prompt])
return response
@classmethod
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, chain_output: Optional[str],
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
Union[str | List[BaseMessage]]:
pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt
if mode == 'completion':
prompt_template = OutLinePromptTemplate.from_template(
template=("""Use the following CONTEXT as your learned knowledge:
[CONTEXT]
{context}
[END CONTEXT]
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
""" if chain_output else "")
+ (pre_prompt + "\n" if pre_prompt else "")
+ "{query}\n"
)
if chain_output:
inputs['context'] = chain_output
prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
prompt_content = prompt_template.format(
query=query,
**prompt_inputs
)
if isinstance(llm, BaseChatModel):
# use chat llm as completion model
return [HumanMessage(content=prompt_content)]
else:
return prompt_content
else:
messages: List[BaseMessage] = []
human_inputs = {
"query": query
}
human_message_prompt = "{query}"
if chain_output:
human_inputs['context'] = chain_output
human_message_instruction = """Use the following CONTEXT as your learned knowledge.
[CONTEXT]
{context}
[END CONTEXT]
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
"""
if pre_prompt:
extra_inputs = {k: inputs[k] for k in
OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
if k in inputs}
if extra_inputs:
human_inputs.update(extra_inputs)
human_message_instruction += pre_prompt + "\n"
human_message_prompt = human_message_instruction + "Q:{query}\nA:"
else:
if pre_prompt:
extra_inputs = {k: inputs[k] for k in
OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
if k in inputs}
if extra_inputs:
human_inputs.update(extra_inputs)
human_message_prompt = pre_prompt + "\n" + human_message_prompt
# construct main prompt
human_message = PromptBuilder.to_human_message(
prompt_content=human_message_prompt,
inputs=human_inputs
)
if memory:
# append chat histories
tmp_messages = messages.copy() + [human_message]
curr_message_tokens = memory.llm.get_messages_tokens(tmp_messages)
rest_tokens = llm_constant.max_context_token_length[
memory.llm.model_name] - memory.llm.max_tokens - curr_message_tokens
rest_tokens = max(rest_tokens, 0)
history_messages = cls.get_history_messages_from_memory(memory, rest_tokens)
messages += history_messages
messages.append(human_message)
return messages
@classmethod
def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
streaming: bool, conversation_message_task: ConversationMessageTask) -> CallbackManager:
llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
if streaming:
callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
else:
callback_handlers = [llm_callback_handler, DifyStdOutCallbackHandler()]
return CallbackManager(callback_handlers)
@classmethod
def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
max_token_limit: int) -> \
List[BaseMessage]:
"""Get memory messages."""
memory.max_token_limit = max_token_limit
memory_key = memory.memory_variables[0]
external_context = memory.load_memory_variables({})
return external_context[memory_key]
@classmethod
def get_memory_from_conversation(cls, tenant_id: str, app_model_config: AppModelConfig,
conversation: Conversation,
**kwargs) -> ReadOnlyConversationTokenDBBufferSharedMemory:
# only for calc token in memory
memory_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
# use llm config from conversation
memory = ReadOnlyConversationTokenDBBufferSharedMemory(
conversation=conversation,
llm=memory_llm,
max_token_limit=kwargs.get("max_token_limit", 2048),
memory_key=kwargs.get("memory_key", "chat_history"),
return_messages=kwargs.get("return_messages", True),
input_key=kwargs.get("input_key", "input"),
output_key=kwargs.get("output_key", "output"),
message_limit=kwargs.get("message_limit", 10),
)
return memory
@classmethod
def validate_query_tokens(cls, tenant_id: str, app_model_config: AppModelConfig, query: str):
llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
model_limited_tokens = llm_constant.max_context_token_length[llm.model_name]
max_tokens = llm.max_tokens
if model_limited_tokens - max_tokens - llm.get_num_tokens(query) < 0:
raise LLMBadRequestError("Query is too long")
@classmethod
def recale_llm_max_tokens(cls, final_llm: Union[StreamableOpenAI, StreamableChatOpenAI],
prompt: Union[str, List[BaseMessage]], mode: str):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_limited_tokens = llm_constant.max_context_token_length[final_llm.model_name]
max_tokens = final_llm.max_tokens
if mode == 'completion' and isinstance(final_llm, BaseLLM):
prompt_tokens = final_llm.get_num_tokens(prompt)
else:
prompt_tokens = final_llm.get_messages_tokens(prompt)
if prompt_tokens + max_tokens > model_limited_tokens:
max_tokens = max(model_limited_tokens - prompt_tokens, 16)
final_llm.max_tokens = max_tokens
@classmethod
def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
app_model_config: AppModelConfig, user: Account, streaming: bool):
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=app.tenant_id,
model_name='gpt-3.5-turbo',
streaming=streaming
)
# get llm prompt
original_prompt = cls.get_main_llm_prompt(
mode="completion",
llm=llm,
pre_prompt=pre_prompt,
query=message.query,
inputs=message.inputs,
chain_output=None,
memory=None
)
original_completion = message.answer.strip()
prompt = MORE_LIKE_THIS_GENERATE_PROMPT
prompt = prompt.format(prompt=original_prompt, original_completion=original_completion)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
inputs=message.inputs,
query=message.query,
is_override=True if message.override_model_configs else False,
streaming=streaming
)
llm.callback_manager = cls.get_llm_callback_manager(llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=llm,
prompt=prompt,
mode='completion'
)
llm.generate([prompt])
| [
"\n",
"Use the following CONTEXT as your learned knowledge:\n[CONTEXT]\n{context}\n[END CONTEXT]\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification. \nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n",
"PLACEHOLDER\nPLACEHOLDER",
"completion",
"PLACEHOLDERQ:{query}\nA:",
"Use the following CONTEXT as your learned knowledge:[CONTEXT]{context}[END CONTEXT]When answer to user:- If you don't know, just say that you don't know.- If you don't know when you are not sure, ask for clarification. Avoid mentioning that you obtained the information from the context.And answer according to the language of the user's question.PLACEHOLDER\n{query}\n",
"{query}\n",
"{query}"
] |
2024-01-10 | khalilmokni/course_plan_suggestion | app~pdf_to_text.py | from PyPDF2 import PdfReader
from fastapi import HTTPException
import requests as r
import openai
from app.config import CHAT_MODEL, MAX_TOKENS, MODEL_TOKEN_LIMIT, OPENAI_API_KEY
import tiktoken
import uuid
import requests
import os
openai.api_key = OPENAI_API_KEY
def url_ok(url):
return requests.head(url, allow_redirects=True).status_code == 200
def is_pdf(url):
#check if the url is valid or not
if not url_ok(url):
raise HTTPException(status_code=400, detail="this is not a valid url")
response = requests.head(url)
content_type = response.headers.get('Content-Type', '')
if content_type.lower() == 'application/pdf':
return True
else:
return False
def get_pdf_file_content(pdf_url: str) -> str:
#check if the url contains a PDF file
if not is_pdf(pdf_url):
raise HTTPException(status_code=400, detail="this is not a pdf file")
response = r.get(pdf_url)
#generate random names for pdf files
pdf_temp_name = str(uuid.uuid4())+'.pdf'
with open(pdf_temp_name, 'wb') as pdf_file:
pdf_file.write(response.content)
try:
reader = PdfReader(pdf_temp_name)
except:
raise HTTPException(status_code=400, detail="this is not a valid pdf file")
text = ''
for page in reader.pages:
text += page.extract_text()
os.remove(pdf_temp_name)
return text
def openaiCompletionApi(url):
pdfContent = get_pdf_file_content(url)
if not pdfContent:
raise HTTPException(status_code=400, detail="PDF does not contain any text")
prompt = "generate a lesson plan associated with the content of the PDF below. The plan must be as the same language as the PDF language"
# Initialize the tokenizer
tokenizer = tiktoken.encoding_for_model(CHAT_MODEL)
# Encode the text_data into token integers
token_integers = tokenizer.encode(pdfContent)
# Split the token integers into chunks based on max_tokens
chunk_size = MAX_TOKENS - len(tokenizer.encode(prompt))
chunks = [
token_integers[i : i + chunk_size]
for i in range(0, len(token_integers), chunk_size)
]
# Decode token chunks back to strings
chunks = [tokenizer.decode(chunk) for chunk in chunks]
messages = [
{"role": "user", "content": prompt},
{
"role": "user",
"content": "To provide the context for the above prompt, I will send you PDF content in parts. When I am finished, I will tell you 'ALL PARTS SENT'. Do not answer until you have received all the parts.",
},
]
for chunk in chunks:
messages.append({"role": "user", "content": chunk})
# Check if total tokens exceed the model's limit
if sum(len(tokenizer.encode(msg["content"])) for msg in messages) > MODEL_TOKEN_LIMIT:
raise HTTPException(status_code=400, detail="PDF file is too large")
# Add the final "ALL PARTS SENT" message
messages.append({"role": "user", "content": "ALL PARTS SENT"})
messages.append({"role": "user", "content": "The lesson plan must summarize and address the major topics mentioned in the input content but must in no way be a complete paraphrase of the content. The result must be a document Valid Markdown."})
messages.append({"role": "user", "content": "Do not mention anything about timing of each part or short summarys or anything else.Just give the lesson plan with a valid markdown"})
response = openai.ChatCompletion.create(model=CHAT_MODEL, messages=messages)
res = response.choices[0].message["content"].strip()
md_temp_name = str(uuid.uuid4())+'.md'
with open(md_temp_name, 'wb') as plan:
plan.write(res.encode())
return md_temp_name
'''
partie2 bonus : géerer des ressources annexes
prompt2 = "give me link resources from this text"
msg = [
{"role": "user", "content": prompt2},
{
"role": "user",
"content": res
},
]
response2 = openai.ChatCompletion.create(model=CHAT_MODEL, messages=msg)
final_response_2 = response2.choices[0].message["content"].strip()
print(final_response_2)
'''
| [
"The lesson plan must summarize and address the major topics mentioned in the input content but must in no way be a complete paraphrase of the content. The result must be a document Valid Markdown.",
"generate a lesson plan associated with the content of the PDF below. The plan must be as the same language as the PDF language",
"ALL PARTS SENT",
"To provide the context for the above prompt, I will send you PDF content in parts. When I am finished, I will tell you 'ALL PARTS SENT'. Do not answer until you have received all the parts.",
"Do not mention anything about timing of each part or short summarys or anything else.Just give the lesson plan with a valid markdown"
] |
2024-01-10 | josca42/trustpilotGPT | assistant~llm.py | import tiktoken
import openai
from typing import Union, Optional
import wandb
from wandb.sdk.data_types.trace_tree import Trace
from datetime import datetime
import cohere
from assistant.config import config
import streamlit
import os
os.environ[
"WANDB_MODE"
] = "disabled" # FIXME: Uncomment this line to disable wandb during development
wandb.login(
key=config["WANDB_API_KEY"],
)
run = wandb.init(
project="trustpilot-chat",
)
LLM_cohere = cohere.Client(config["COHERE_API_KEY"])
def embed(texts: Union[list[str], str], model="cohere"):
if isinstance(texts, str):
texts = [texts]
texts = [text.replace("\n", " ") for text in texts]
if model == "cohere":
response = LLM_cohere.embed(
texts=texts,
model="embed-multilingual-v2.0",
)
embeddings = response.embeddings
else:
response = openai.Embedding.create(
input=texts,
model="text-embedding-ada-002",
)
embeddings = [data.get("embedding") for data in response.data]
return embeddings
class GPT:
def __init__(self, log: bool, question: str, st: streamlit = None) -> None:
self.question = question
self.log = log
self.root_span = None
self.st = st
if log:
self.root_span = Trace(
name="Chat",
kind="agent",
start_time_ms=timestamp(),
metadata={"user": "josca"},
inputs={"user": question},
)
def completion(
self,
messages,
model="gpt-4", # "gpt-3.5-turbo-0613",
temperature=0,
functions=[],
stop=None,
name="",
kind="",
write_to_streamlit=True,
) -> str:
start = timestamp()
stream = True if self.st and write_to_streamlit else False
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
stop=stop,
stream=stream,
)
if stream:
with self.st.chat_message("assistant", avatar="🤖"):
message_placeholder = self.st.empty()
full_response = ""
for chunk in response:
full_response += chunk.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
else:
full_response = response.choices[0].message.content
if self.log:
self.root_span.add_child(
Trace(
name=name,
kind=kind if kind else None,
start_time_ms=start,
end_time_ms=timestamp(),
inputs=wandb_format_msgs(messages),
outputs={"assistant": full_response},
model_dict={"model": model},
)
)
return full_response
def finish(self):
self.root_span.end_time_ms = timestamp()
self.root_span.log("chat_test")
def timestamp():
return round(datetime.now().timestamp() * 1000)
def wandb_format_msgs(msgs):
return {msg["role"]: msg["content"] for msg in msgs}
def num_tokens_from_string(string: str, model: str = "gpt-3.5-turbo") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(string))
return num_tokens
| [] |
2024-01-10 | josca42/trustpilotGPT | frontend.py | import streamlit as st
import openai
from assistant.chat import chat
from assistant.config import config
st.title("ChatGPT-like clone")
openai.api_key = config["OPENAI_API_KEY"]
with st.sidebar:
new_chat = st.button(
":heavy_plus_sign: New chat", key="new_chat", use_container_width=True
)
st.sidebar.button(":exclamation: Stop generating", use_container_width=True)
st.caption("Today")
st.button("Enter some text", key="1", use_container_width=True)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
if message["role"] == "user":
with st.chat_message("user", avatar="🧑💻"):
st.markdown(message["content"])
elif message["role"] == "assistant":
with st.chat_message("assistant", avatar="🤖"):
st.markdown(message["content"])
elif message["role"] == "plot":
st.plotly_chart(message["content"], use_container_width=True)
else:
pass
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar="🧑💻"):
st.markdown(prompt)
msgs_chat_input = [m for m in st.session_state.messages if m["role"] != "plot"][-3:]
msgs_chat_output = chat(messages=msgs_chat_input, st=st)
st.session_state.messages.extend(msgs_chat_output)
| [] |
2024-01-10 | uber/piranha | experimental~piranha_playground~rule_inference~piranha_chat.py | # Copyright (c) 2023 Uber Technologies, Inc.
#
# <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
# <p>http://www.apache.org/licenses/LICENSE-2.0
#
# <p>Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from pathlib import Path
from typing import List, Optional, Tuple
import attr
import openai
from piranha_playground.rule_inference.utils.logger_formatter import \
CustomFormatter
logger = logging.getLogger("PiranhaAgent")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
class PiranhaChatException(Exception):
pass
@attr.s
class PiranhaGPTChat:
"""
A class to manage and interact with OpenAI ChatModels to generate and improve Piranha rule graphs.
"""
explanation = '''
Your task is to improve refactoring rules for Polyglot Piranha, a tool that uses tree-sitter for parsing and refactoring code.
The rules are expressed in a domain-specific language (DSL) specific to Polyglot Piranha. Examples and explanations of the DSL will be provided below.
You will be provided with the original and refactored code snippets, along with statically inferred rules verified by an algorithm.
However, these rules may appear unnatural since they were automatically generated. Your goal is to make them resemble rules written by humans
Key requirements:
- The semantics of the rules should remain unchanged unless a specific request to alter them is made.
- Strive to simplify the rules as much as possible. Always simplify lengthy s-expressions.
- Explain each rule individually. Explain the rule in a way that a human can understand it.
Please structure your response using toml and markdown format. Refer to the expected output format below, as it will be parsed automatically.
========================= Piranha Rule Graph =========================
Piranha is language to express cascading program transformation.
Each node in graph represents a transformation rule that identifies and modify specific code snippets.
The edges between rules specify when, and where each rule should be applied.
========================= Piranha Rule Explanation =========================
Rules are represented in TOML. Each rule should contain at least one rule with the following properties:
- "query": Tree-sitter query to find the code pattern to refactor
- "replace_node": The captured node in the query that will be replaced
- "replace_string": Replacement string or pattern for the refactored code
- "holes": Placeholders in your queries that will be instantiated at runtime
- "is_seed_rule": Specifies whether this rule is an entry point for the rule graph.
Additionally, the rule can have optional filters. Filters can have properties like "enclosing_node",
"not_contains", "contains", "at_least", "at_most". The filters are used to specify conditions for the rule to be applied.
========================= Rule Format =========================
```toml
# Define your rule within this section
[[rules]]
# Provide a unique name for your rule
name = "your_rule_name"
# Write a Tree-sitter query to identify the code pattern for refactoring. The outer most node should always be captured.
# The tree-sitter query depends on the language. The nodes you see here are for Java. You need to only use nodes in the TASK!
query = """(
(method_invocation name: (_) @name
arguments: (argument_list) @args) @invk
(#eq? @name @hole1))
"""
# Specify the captured node from the query that will be replaced
# Never add @ before the node name! Otherwise it will NOT compile!
replace_node = "invk"
# Replacement string that will substitute `replace_node`
replace = "X.other_string @args"
# Specify any placeholders in your queries that will be filled in at runtime
# In our case hole1 is used in the query, but not defined. Therefore it is a hole.
holes = ["hole1"]
# Specify if this rule should be triggered first. If it depends on other rules, set to false
is_seed_rule = true
# If necessary, define filters for your rule
[[rules.filters]]
# This pattern should match any ancestor of the captured node (optional)
enclosing_node = "(your_enclosing_node_pattern) @your_capture_name"
# Define patterns that should not be present within the enclosing_node (optional)
# Always use a list, even if you only have one pattern.
not_contains = [
"""(
(identifier) @id
(#eq? @id "x"))
""",
]
# Define a pattern that should be present within the enclosing_node (optional)
contains =
"""(
(identifier) @other_id
(#eq? @other_id "y"))
"""
# Define the minimum and maximum number of children that should match the 'contains' pattern (optional)
at_least = 1
at_most = 5
```
========================= Edge Explanation =========================
Edges allow rules to depend on each other, thus establishing a hierarchy or sequence of application among rules.
For instance, if a rule is defined to match a method invocation, another rule could be drafted to match a method declaration.
In this case, the method name identified from the declaration could be utilized within the invocation.
An edge essentially describes the direction of dependency between two or more rules. It signifies that a particular rule
('from') is based on, or derives information from, one or more other rules ('to').
Edges are also represented in the TOML format, and their structure is typically not modified unless there's a need to
change the dependencies between rules. Your main task, unless otherwise specified, is to ensure that the 'from' and 'to'
components of the edge correctly correspond to the names of your improved rules.
========================= Edge Format =========================
[[edges]]
# Scope of the rule - usually "Global"
scope = "Global"
# Name of the rule that depends on other rules (your rule name)
from = "your_rule_name"
# List of rules that your rule depends on (could be one or multiple)
to = ["other_rule_name", "another_rule_name"]
========================= Expected output format =========================
Your output should be a single TOML file containing the improved rules and edges, as well as an explanation in Markdown format.
Rule Graph
```toml
[[rules]] # For each rule
...
[[edges]] # For each edge
...
```
Explanation
```md
#### `<your_rule_name1>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
#### `<your_rule_name2>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
```
========================= Rule Examples =========================
'''
input_template = """
========================= Task =========================
=== Source code ===
{source_code}
=== Tree-sitter representation (source code) ===
{source_tree}
=== Tree-sitter representation (target code) ===
{target_tree}
=== Diff ===
{diff}
=== Rules and edges to improve ===
{rules}
=== Additional requirements ===
{hints}
========================= Please simplify the rules and edges =========================
Remember, the goal is to simplify the rules and edges as much as possible while still achieving the same result.
You should only use nodes you see in the tree-sitter representation of the source code!!
"""
add_filter_prompt = '''
Can you to further refine the following rule? Here is the request:
{desc}
========================= Current rule =========================
{rule}
========================= Task =========================
Improve the rule by incorporating a filter. You are permitted to add only two types of filters: enclosing_node and contains.
You should also include an explanation for the new rule.
You're allowed to add any number of filters and further restrict the nodes using #eq, #not-eq, and #match.
Key requirements:
- Structure your response using TOML and Markdown formatting for automatic parsing.
- You can ONLY chose filters from the list below. You may refine them but they should not deviate from the list.
- Be sure to use unique names for your capture groups to avoid overlapping with existing ones from the query!
- Make sure all the nodes are named. Every captured node should have a unique name, including the outermost node.
- Always surround enclosing_node and contains with parenthesis (...), including the #eq, #not-eq, and #match operators.
========================= Expected output format =========================
Rules
```toml
[[rules]]
....
[[rules.filters]] # filter 1
enclosing_node = """(class_declaration) @class"""
contains = """(
(identifier) @id (#eq? @id "x")
)"""
at_least = 1
at_most = 1
[[rules.filters]] # filter 2
enclosing_node = """(method_invocation) @invk"""
[[rules.filters]] # filter 3
enclosing_node = """(class_declaration) @class"""
contains = """(
(method_declaration
(modifiers) @modifiers
name: (identifier) @name) @decl
(#eq? @name "x")
)
```
Explanation
```md
#### `<your_rule_name1>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
```
========================= List of Filters =========================
=== Potential filters for enclosing node ===
{enclosing_node_filters}
"""]
========================= Errors to avoid =========================
Not surrounding the query with parenthesis (...):
enclosing_node = "(identifier) @name) (#eq? @name \"x\")"
is wrong!! it should be:
enclosing_node = """((identifier) @name) (#eq? @name "x"))"""
```
'''
holes = attr.ib(type=dict)
messages = attr.ib(type=list, default=attr.Factory(list))
temperature = attr.ib(
type=float,
default=0.3,
validator=[
attr.validators.ge(0),
attr.validators.le(1),
],
)
model = attr.ib(
default="gpt-4-32k",
validator=attr.validators.in_(["gpt-4", "gpt-4-32k", "gpt-3.5-turbo-16k"]),
)
def __attrs_post_init__(self):
"""
Automatically called after the initialization of the instance. It gathers example rules and edge
files from a specified path, formats the content and adds it to the internal message list.
:param None
:return None
"""
examples = self._get_examples("../../src/cleanup_rules/java")
formatted = (
PiranhaGPTChat.explanation
+ "\n"
+ examples
+ "\n"
+ PiranhaGPTChat.input_template.format(**self.holes)
)
self.messages.append({"role": "user", "content": formatted})
def append_system_message(self, system_message: str):
"""
Appends a message from the GPT model to the internal message list.
:param system_message: str: The message content to be added to the message list.
:return None
"""
self.messages.append({"role": "assistant", "content": system_message})
def append_user_followup(self, followup_message: str):
"""
Appends a follow-up message from the user to the internal message list.
:param followup_message: str: The message content to be added to the message list.
:return None
"""
self.messages.append({"role": "user", "content": followup_message})
def get_model_response(self) -> str:
"""
Fetches the latest message from the GPT model. If the latest message is from the user, it will trigger
a new GPT model prediction and append the response to the internal message list.
:param None
:return str: The latest message content from the GPT model.
"""
latest_message = self.messages[-1]
if latest_message["role"] == "assistant":
return latest_message["content"]
else:
completions = self.get_completion(n_samples=1)
content = completions[0]
self.append_system_message(content)
return content
def append_improve_request(self, desc, rule, enclosing_nodes):
"""
Appends a request to improve the rule to the internal message list.
:param desc: str: Description of the request.
:param rule: str: The rule to be improved.
:param enclosing_nodes: str: The enclosing nodes to be included in the rule.
:return None
"""
self.messages.append(
{
"role": "user",
"content": PiranhaGPTChat.add_filter_prompt.format(
desc=desc,
rule=rule,
enclosing_node_filters=enclosing_nodes,
),
}
)
def get_completion(self, n_samples: int = 1) -> Optional[List[str]]:
"""
Attempts to generate a new GPT model prediction based on the internal message list. It handles
common OpenAI API exceptions such as rate limiting and API errors.
:param n_samples: int: Number of samples to generate from the model.
:return List[str]: A list of generated messages. None if an API exception occurs.
:raises PiranhaChatException: If it fails to generate a completion from the GPT model after three attempts.
"""
for _ in range(3):
try:
logger.debug("Attempting to get completion from GPT.")
response = openai.ChatCompletion.create(
model=self.model,
messages=self.messages,
temperature=self.temperature, # this is the degree of randomness of the model's output
n=n_samples,
)
return [
response.choices[i].message.content
for i in range(len(response.choices))
]
except (
openai.error.RateLimitError,
openai.error.Timeout,
openai.error.APIError,
) as e:
logger.error(e)
sleep_time = 0.5
logger.error(f"Rate limit reached. Sleeping for {sleep_time}s.")
time.sleep(sleep_time)
raise PiranhaChatException("Failed to get completion from GPT.")
@staticmethod
def _get_examples(path_to_examples_rules):
"""
Walks through a specified directory to gather and format the content of example rule and edge files.
The formatted content is then returned as a single string.
:param path_to_examples_rules: str: Path to the directory containing example rule and edge files.
:return str: Formatted content of example rule and edge files.
"""
task_examples = ""
for root, dirs, files in os.walk(path_to_examples_rules):
for file in files:
if file.endswith("rules.toml") or file.endswith("edges.toml"):
file_name = os.path.join(root, file)
file_contents = Path(file_name).read_text()
file_contents = "\n".join(
[
line
for line in file_contents.split("\n")
if not line.startswith("#")
]
)
task_examples += f"<file_name_start> {file_name} <file_name_end>\n"
task_examples += f"```toml {file_contents}```\n"
return task_examples
| [
"\nCan you to further refine the following rule? Here is the request:\n \n{desc}\n \n========================= Current rule =========================\n \n{rule}\n\n========================= Task =========================\n\nImprove the rule by incorporating a filter. You are permitted to add only two types of filters: enclosing_node and contains. \nYou should also include an explanation for the new rule.\n\nYou're allowed to add any number of filters and further restrict the nodes using #eq, #not-eq, and #match. \n\n\nKey requirements:\n - Structure your response using TOML and Markdown formatting for automatic parsing.\n - You can ONLY chose filters from the list below. You may refine them but they should not deviate from the list.\n - Be sure to use unique names for your capture groups to avoid overlapping with existing ones from the query!\n - Make sure all the nodes are named. Every captured node should have a unique name, including the outermost node.\n - Always surround enclosing_node and contains with parenthesis (...), including the #eq, #not-eq, and #match operators.\n\n\n========================= Expected output format =========================\n\nRules\n\n```toml\n\n[[rules]]\n....\n\n[[rules.filters]] # filter 1\nenclosing_node = \"\"\"(class_declaration) @class\"\"\"\ncontains = \"\"\"(\n (identifier) @id (#eq? @id \"x\")\n)\"\"\"\nat_least = 1\nat_most = 1\n\n[[rules.filters]] # filter 2\nenclosing_node = \"\"\"(method_invocation) @invk\"\"\"\n\n[[rules.filters]] # filter 3\nenclosing_node = \"\"\"(class_declaration) @class\"\"\"\ncontains = \"\"\"(\n(method_declaration\n (modifiers) @modifiers\n name: (identifier) @name) @decl\n (#eq? @name \"x\")\n)\n```\n\nExplanation\n\n```md\n#### `<your_rule_name1>`\n\n- <Your detailed explanation>\n- <Include multiple bullet points if necessary>\n```\n\n========================= List of Filters =========================\n\n=== Potential filters for enclosing node ===\n\n{enclosing_node_filters}\n\n\n\"\"\"]\n\n========================= Errors to avoid =========================\n\nNot surrounding the query with parenthesis (...):\nenclosing_node = \"(identifier) @name) (#eq? @name \"x\")\"\n\nis wrong!! it should be:\nenclosing_node = \"\"\"((identifier) @name) (#eq? @name \"x\"))\"\"\"\n\n```\n",
"\n========================= Task =========================\n\n=== Source code === \n\n{source_code}\n\n=== Tree-sitter representation (source code) ===\n\n{source_tree}\n\n=== Tree-sitter representation (target code) ===\n\n{target_tree}\n\n=== Diff === \n\n{diff}\n\n=== Rules and edges to improve === \n\n{rules}\n\n=== Additional requirements === \n\n{hints}\n========================= Please simplify the rules and edges =========================\n\nRemember, the goal is to simplify the rules and edges as much as possible while still achieving the same result.\nYou should only use nodes you see in the tree-sitter representation of the source code!!\n\n "
] |
2024-01-10 | ymcui/Chinese-LLaMA-Alpaca-2 | scripts~privategpt~privateGPT_refine.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
import argparse
import time
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH', 8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS', 4))
from constants import CHROMA_SETTINGS
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, max_tokens=model_n_ctx, n_ctx=model_n_ctx,
n_gpu_layers=1, n_batch=model_n_batch, callbacks=callbacks, n_threads=8, verbose=False)
case "GPT4All":
llm = GPT4All(model=model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False)
case _default:
# raise exception if model_type is not supported
raise Exception(f"Model type {model_type} is not supported. Please choose one of the following: LlamaCpp, GPT4All")
# The followings are specifically designed for Chinese-Alpaca-2
# For detailed usage: https://github.com/ymcui/Chinese-LLaMA-Alpaca-2/wiki/privategpt_en
alpaca2_refine_prompt_template = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n"
"这是原始问题:{question}\n"
"已有的回答: {existing_answer}\n"
"现在还有一些文字,(如果有需要)你可以根据它们完善现有的回答。"
"\n\n{context_str}\n\n"
"请根据新的文段,进一步完善你的回答。 [/INST]"
)
alpaca2_initial_prompt_template = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n"
"以下为背景知识:\n{context_str}\n"
"请根据以上背景知识,回答这个问题:{question} [/INST]"
)
from langchain import PromptTemplate
refine_prompt = PromptTemplate(
input_variables=["question", "existing_answer", "context_str"],
template=alpaca2_refine_prompt_template,
)
initial_qa_prompt = PromptTemplate(
input_variables=["context_str", "question"],
template=alpaca2_initial_prompt_template,
)
chain_type_kwargs = {"question_prompt": initial_qa_prompt, "refine_prompt": refine_prompt}
qa = RetrievalQA.from_chain_type(
llm=llm, chain_type="refine",
retriever=retriever, return_source_documents= not args.hide_source,
chain_type_kwargs=chain_type_kwargs)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(f"\n> Answer (took {round(end - start, 2)} s.):")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"existing_answer",
"context_str",
"question",
"[INST] <<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n这是原始问题:{question}\n已有的回答: {existing_answer}\n现在还有一些文字,(如果有需要)你可以根据它们完善现有的回答。\n\n{context_str}\n\n请根据新的文段,进一步完善你的回答。 [/INST]",
"[INST] <<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n以下为背景知识:\n{context_str}\n请根据以上背景知识,回答这个问题:{question} [/INST]"
] |
2024-01-10 | ymcui/Chinese-LLaMA-Alpaca-2 | scripts~privategpt~privateGPT.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
import argparse
import time
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH', 8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS', 4))
from constants import CHROMA_SETTINGS
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, max_tokens=model_n_ctx, n_ctx=model_n_ctx,
n_gpu_layers=1, n_batch=model_n_batch, callbacks=callbacks, n_threads=8, verbose=False)
case "GPT4All":
llm = GPT4All(model=model_path, max_tokens=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False)
case _default:
# raise exception if model_type is not supported
raise Exception(f"Model type {model_type} is not supported. Please choose one of the following: LlamaCpp, GPT4All")
# The followings are specifically designed for Chinese-Alpaca-2
# For detailed usage: https://github.com/ymcui/Chinese-LLaMA-Alpaca-2/wiki/privategpt_en
alpaca2_prompt_template = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n"
"{context}\n\n{question} [/INST]"
)
from langchain import PromptTemplate
input_with_prompt = PromptTemplate(template=alpaca2_prompt_template, input_variables=["context", "question"])
qa = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=retriever,
return_source_documents= not args.hide_source,
chain_type_kwargs={"prompt": input_with_prompt})
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(f"\n> Answer (took {round(end - start, 2)} s.):")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"question",
"[INST] <<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n{context}\n\n{question} [/INST]",
"context"
] |
2024-01-10 | ymcui/Chinese-LLaMA-Alpaca-2 | scripts~langchain~langchain_sum.py | import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', required=True, type=str)
parser.add_argument('--model_path', required=True, type=str)
parser.add_argument('--gpu_id', default="0", type=str)
parser.add_argument('--chain_type', default="refine", type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
file_path = args.file_path
model_path = args.model_path
import torch
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
prompt_template = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n"
"请为以下文字写一段摘要:\n{text} [/INST]"
)
refine_template = (
"[INST] <<SYS>>\n"
"You are a helpful assistant. 你是一个乐于助人的助手。\n"
"<</SYS>>\n\n"
"已有一段摘要:{existing_answer}\n"
"现在还有一些文字,(如果有需要)你可以根据它们完善现有的摘要。"
"\n"
"{text}\n"
"\n"
"如果这段文字没有用,返回原来的摘要即可。请你生成一个最终的摘要。"
" [/INST]"
)
if __name__ == '__main__':
load_type = torch.float16
if not torch.cuda.is_available():
raise RuntimeError("No CUDA GPUs are available.")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=100, length_function=len)
with open(file_path) as f:
text = f.read()
docs = text_splitter.create_documents([text])
print("loading LLM...")
model = HuggingFacePipeline.from_model_id(model_id=model_path,
task="text-generation",
device=0,
pipeline_kwargs={
"max_new_tokens": 400,
"do_sample": True,
"temperature": 0.2,
"top_k": 40,
"top_p": 0.9,
"repetition_penalty": 1.1},
model_kwargs={
"torch_dtype" : load_type,
"low_cpu_mem_usage" : True,
"trust_remote_code": True}
)
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
REFINE_PROMPT = PromptTemplate(
template=refine_template,input_variables=["existing_answer", "text"],
)
if args.chain_type == "stuff":
chain = load_summarize_chain(model, chain_type="stuff", prompt=PROMPT)
elif args.chain_type == "refine":
chain = load_summarize_chain(model, chain_type="refine", question_prompt=PROMPT, refine_prompt=REFINE_PROMPT)
print(chain.run(docs))
| [
"existing_answer",
"[INST] <<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n请为以下文字写一段摘要:\n{text} [/INST]",
"[INST] <<SYS>>\nYou are a helpful assistant. 你是一个乐于助人的助手。\n<</SYS>>\n\n已有一段摘要:{existing_answer}\n现在还有一些文字,(如果有需要)你可以根据它们完善现有的摘要。\n{text}\n\n如果这段文字没有用,返回原来的摘要即可。请你生成一个最终的摘要。 [/INST]"
] |
2024-01-10 | usama1998-pro/hanna | vectors.py | from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
import os
import io
load_dotenv()
# print(len(string))
# Data for AgilityModelInfoVectorStore -> corpus_info.txt
# Data for AgilityModelVectorStore -> corpus.txt
corpus_file = "corpus.txt"
vec_name = "AgilityModelVectorStore"
print(f"Reading File [{corpus_file}]...")
corpus = io.open(corpus_file, "r", encoding="utf-8").read()
# Chunk size for AgilityModelInfoVectorStore -> 2000
# Chunk size for AgilityModelVectorStore -> 700
size = 700
print(f"Splitting Text [chunk-size={size}]...")
# Splitting document in to Text
text_splitter = CharacterTextSplitter(chunk_overlap=0, chunk_size=size, length_function=len)
document = text_splitter.split_text(corpus)
print("Calling OpenAI Embeddings...")
embed = OpenAIEmbeddings(openai_api_key=os.environ.get("OPENAI_API_KEY"))
print("Converting to vectors..", end="\n")
vecdb = Chroma.from_texts(document, embed, persist_directory=vec_name)
print("Done!")
| [] |
2024-01-10 | bumandpunk/ntchat-wx | examples~echo_bot_on.py | # -*- coding: utf-8 -*-
import urllib.error as error
import urllib.request as request
import urllib
import threading
import random
import json
import requests
import ntchat
import sys
import time
import os
import openai
os.environ['NTCHAT_LOG'] = "ERROR"
wechat = ntchat.WeChat()
# 打开pc微信, smart: 是否管理已经登录的微信
wechat.open(smart=True)
with open("data.json", 'r', encoding='utf-8') as fw:
injson = json.load(fw)
with open("four.json", 'r', encoding='utf-8') as fw2:
injson2 = json.load(fw2)
with open("zhiling.json", 'r', encoding='utf-8') as main:
allZhiling = json.load(main)
chatData = []
def chatgptai(val):
#用户发言
data = {"role": "user", "content": val}
#首次把用户发言加入数组中
chatData.append(data)#组织名及api填写
openai.organization = "xxxxxxxxxxxx"
openai.api_key = "apikey"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chatData
)
#把回复内容也加进去,下次用
chatData.append(completion.choices[0].message)
return completion.choices[0].message.content.replace('\n', '')
def chatgptimg(val):
openai.organization = "xxxxx"
openai.api_key = "xxxxxxxx"
completion = openai.Image.create(
prompt=val,
n=1,
size="512x512"
)
image_url= completion.data[0].url
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'
}
r = requests.get(image_url,headers=headers)
imgName = val + str(random.randint(1,999))
bendi_url = './resources/imgList/'+ imgName +'.jpg'
with open(bendi_url ,mode = "wb") as f:
f.write(r.content) #图片内容写入文件
return_url = '本地文件夹路径' + imgName + '.jpg'
return return_url
global t
def fun_timer():
# 定时发送指定文案
print(time.strftime('%H:%M'))
if (time.strftime('%H:%M') == "11:11"):
wechat.send_text(to_wxid="xxxx", content="xxxx")
# 定时发送发送群聊拍一拍
wechat.send_pat(room_wxid='xxxx', patted_wxid='xxxx')
# 定时发送发送群聊@
wechat.send_room_at_msg(to_wxid='xxxx',
content='{$@}怎么不说话',
at_list=['xxx'])
# 20s执行一次
t = threading.Timer(20.0, fun_timer)
t.start()
fun_timer()
def minguo():
api_url = 'https://apis.tianapi.com/mgjuzi/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def qinghua():
api_url = 'https://apis.tianapi.com/saylove/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def shi():
api_url = 'https://apis.tianapi.com/qingshi/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def emo():
api_url = 'https://apis.tianapi.com/hsjz/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def tiangou():
api_url = 'https://apis.tianapi.com/tiangou/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def aini():
api_url = 'https://apis.tianapi.com/caihongpi/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def wenan():
api_url = 'https://apis.tianapi.com/pyqwenan/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['content'])
# print()
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def taici():
api_url = 'https://apis.tianapi.com/dialogue/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['dialogue']+'—'+result['result']['source'])
# print(result['result']['dialogue']+'—'+result['result']['source'])
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def penren():
text_url = "https://v.api.aa1.cn/api/api-wenan-ktff/index.php?type=" + \
str(random.randint(1, 5))
response = requests.get(text_url)
res = json.loads(response.text, strict=False)
text = res['text'].replace('\n', '')
return text
def saohua():
text_url = "https://v.api.aa1.cn/api/api-saohua/index.php?type=json"
response = requests.get(text_url)
res = json.loads(response.text, strict=False)
text = res['saohua'].replace('\n', '')
return text
# 天气预报查询示例
def tianqi(val):
api_url = 'https://api.tianapi.com/tianqi/index'
params_dict = {
"city": val, # 查询天气的城市名称,如:北京、苏州、上海
"key": "xxxxxxxxxxxxxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
area = result['newslist'][0]['area']
date = result['newslist'][0]['date']
temperature = result['newslist'][0]['lowest'] + \
'—' + result['newslist'][0]['highest']
sunrise = result['newslist'][0]['sunrise']
sunset = result['newslist'][0]['sunset']
weather = result['newslist'][0]['weather']
wind = result['newslist'][0]['wind']
windsc = result['newslist'][0]['windsc']
tips = result['newslist'][0]['tips']
return (("城市:%s\n日期:%s\n温度:%s\n日出:%s\n日落:%s\n天气:%s\n风向:%s\n风力:%s\n提示:%s" % (area, date,
temperature, sunrise, sunset, weather, wind, windsc, tips)))
# print()
else:
return ("nono查不到")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return ("阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def jiqi(val):
api_url = 'https://apis.tianapi.com/robot/index'
params_dict = {
"key": "xxxxxxx", # 您申请的接口API接口请求Key
"question": val
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['code']
if (error_code == 200):
return (result['result']['reply'])
# print()
else:
return ("我卡啦卡啦")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return ("我卡啦卡啦")
except error.HTTPError as err:
return ('我卡啦卡啦')
except error.URLError as err:
# 其他异常
return (err)
def four():
return injson2[random.randint(0, len(injson2))]['text']
def kou6():
return injson[random.randint(0, len(injson))]['text']
def zhiling_text():
# @property {String} triggerMode 匹配模式
# @property {String} trigger 匹配关键词
# @property {String} functionName 调用方法名
# @property {String} sendType 发送消息类型
# @property {Boolean} needPower 是否需要权限
return '1.6\n2.舔\n3.emo\n4.爱你\n5.文案\n6.情诗\n7.情话\n8.句子\n9.台词\n10.开喷\n11.发烧\n12.星期四\n13.天气-xxx\n15.转图-xxx'
def yiqing(val):
api_url = 'https://api.inews.qq.com/newsqa/v1/query/inner/publish/modules/list?modules=statisGradeCityDetail,diseaseh5Shelf'
params_dict = {
# "key": "xxxxxxx", # 您申请的接口API接口请求Key
}
params = urllib.parse.urlencode(params_dict)
try:
req = request.Request(api_url, params.encode())
response = request.urlopen(req)
content = response.read()
if content:
try:
result = json.loads(content)
error_code = result['ret']
if (error_code == 0):
# return(result['result']['content'])
china_data = result['data']['diseaseh5Shelf']['areaTree'][0]['children']
for item in china_data:
if (val in item['name']):
# return(item['today'],item['name'],item['date'])
return (("城市:%s\n今日新增确诊:%s\n今日本地确诊:%s\n今日无症状新增:%s\n中风险地区数量:%s\n高风险地区数量:%s\n数据更新时间:%s" % (item['name'],
item['today']['local_confirm_add'], item['today']['confirm'], item['today'][
'wzz_add'], item['total']['mediumRiskAreaNum'], item['total']['highRiskAreaNum'],
item['total']['mtime']
)))
if (item['children']):
for it in item['children']:
if (val in it['name']):
# return(it['today'],it['name'],it['date'])
return (("城市:%s\n今日新增确诊:%s\n今日本地确诊:%s\n今日无症状新增:%s\n中风险地区数量:%s\n高风险地区数量:%s\n数据更新时间:%s" % (it['name'],
it['today']['local_confirm_add'], it['today']['confirm'], it['today'][
'wzz_add'], it['total']['mediumRiskAreaNum'], it['total']['highRiskAreaNum'],
it['total']['mtime']
)))
# return it['today']
else:
return ("交一下钱谢谢")
except Exception as e:
return ("解析结果异常:%s" % e)
else:
# 可能网络异常等问题,无法获取返回内容,请求异常
return (" 阿,别查了")
except error.HTTPError as err:
return ('HTTPError等会再查')
except error.URLError as err:
# 其他异常
return (err)
def texttoimg(val):
if len(val) > 100:
return '替换为本地图片路径既可'
else:
image_url = "https://v.api.aa1.cn/api/api-jupai/index.php?msg=" + \
str(val)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'
}
r = requests.get(image_url, headers=headers)
bendi_url = './resources/imgList/' + val + '.jpg'
with open(bendi_url, mode="wb") as f:
f.write(r.content) # 图片内容写入文件
return_url = '替换为本地图片路径既可/' + val + '.jpg'
return return_url
def load_json(file):
with open(file, 'r', encoding='utf-8') as f:
return json.load(f)
def save_json(file, data):
with open(file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=4)
def modify_user_list(wechat_instance, room_wxid, filename, user, revoke_msg, grant_msg):
data = load_json(filename)
if user in data['list']:
data['list'].remove(user)
wechat_instance.send_text(to_wxid=room_wxid, content=revoke_msg)
else:
data['list'].append(user)
wechat_instance.send_text(to_wxid=room_wxid, content=grant_msg)
save_json(filename, data)
def on_recv_text_msg(wechat_instance: ntchat.WeChat, message):
allRoom = load_json("room.json")
allPower = load_json("powerlist.json")
allZhiling = load_json("zhiling.json")
data = message["data"]
from_wxid = data["from_wxid"]
# 可替换机器人控制人账号
self_wxid = wechat_instance.get_login_info()["wxid"]
room_wxid = data["room_wxid"]
for item in allRoom['list']:
if room_wxid in item:
for it in allZhiling:
should_send = False
if it['triggerMode'] == 'contain' and data['msg'].startswith(it['trigger']) and it['sendType'] in ['text', 'img']:
should_send = True
elif it['triggerMode'] == 'match' and data['msg'] == it['trigger'] and it['sendType'] == 'text':
should_send = True
elif it['triggerMode'] == 'at' and it['trigger'] in data['at_user_list']:
should_send = True
if should_send:
sendMsg(wechat_instance, room_wxid, it['functionName'], it['sendType'], from_wxid, data, it)
if from_wxid == self_wxid and data['at_user_list']:
if '授权' in data['msg']:
modify_user_list(wechat_instance, room_wxid, "powerlist.json", data['at_user_list'][0],
'微信:'+data['at_user_list'][0]+'已取消管理员',
'微信:'+data['at_user_list'][0]+'添加管理员成功\n可指令控制开关机')
else:
modify_user_list(wechat_instance, room_wxid, "blacklist.json", data['at_user_list'][0],
'微信:'+data['at_user_list'][0]+'已解除',
'微信:'+data['at_user_list'][0]+'已被拉黑')
if from_wxid in allPower['list']:
allList = load_json("room.json")
if data['msg'] == '开机':
wechat_instance.send_text(to_wxid=room_wxid, content='已启动--发送[指令]查看全部指令')
if room_wxid not in allList['list']:
allList['list'].append(room_wxid)
save_json("room.json", allList)
elif data['msg'] == '关机':
wechat_instance.send_text(to_wxid=room_wxid, content='已关闭--see you ')
if room_wxid in allList['list']:
allList['list'].remove(room_wxid)
save_json("room.json", allList)
def sendMsg(wechat_instance, room_wxid, funname, type, from_wxid, data, it):
# 发消息之前判断这个人是否在黑名单中
blist = load_json("blacklist.json")
pdata = load_json("powerlist.json")
if from_wxid in blist['list']:
wechat_instance.send_text(to_wxid=room_wxid, content='您已被拉黑 别发了[微笑]')
else:
if it['needPower'] == True: # 需要权限的指令
if from_wxid in pdata['list']: # 有权限
if (type == 'text'):
wechat_instance.send_text(
to_wxid=room_wxid, content=eval(funname))
else:
wechat_instance.send_image(
to_wxid=room_wxid, file_path=eval(funname))
else: # 没权限
wechat_instance.send_text(
to_wxid=room_wxid, content='您没有该指令权限[微笑]')
else: # 不需要权限的指令
if (type == 'text'):
wechat_instance.send_text(
to_wxid=room_wxid, content=eval(funname))
else:
wechat_instance.send_image(
to_wxid=room_wxid, file_path=eval(funname))
# 监听接收文本消息
wechat.on(ntchat.MT_RECV_TEXT_MSG, on_recv_text_msg)
# 以下是为了让程序不结束,如果有用于PyQt等有主循环消息的框架,可以去除下面代码
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
ntchat.exit_()
sys.exit()
| [] |
2024-01-10 | shyanukant/AI_projects | openai~prompt_pipeline.py | import os
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains import SequentialChain
llm = OpenAI(temperature=.7, openai_api_key=os.environ.get('OPENAI_KEY'))
template = """You are the dedicated content creator and skilled social media marketer for our company.
In this dynamic role, your responsibility encompasses crafting top-notch content within the realm of topic,
all while maintaining an ingenious, professional, and captivating tone. Your role includes creating a compelling content strategy,
engaging with our audience, leveraging trends, analyzing insights, and staying at the forefront of industry trends to ensure our brand's online presence flourishes.
Your content will not only resonate deeply with our target audience but also drive impactful results across diverse platforms.
So create content on this topic `{topic}` with `{tone}` tone and your goal is `{goal}` for target Audience `{Audience}`.
"""
prompt_template = PromptTemplate(input_variables=["topic", "tone", "goal","Audience" ], template=template, validate_template=True)
chain1 = LLMChain(llm=llm, prompt=prompt_template, output_key="contents")
template2 = """As the primary content creator for our organization,
your role revolves around curating compelling content for social media platforms while identifying relevant keywords and hashtags.
Your next task is to craft engaging posts tailored for the following social platforms: `{platforms}` on below content.
content : `{contents}`
Your expertise in content creation will play a pivotal role in enhancing our brand's online presence and engagement across these diverse platforms.
"""
prompt_template2 = PromptTemplate(input_variables=["platforms", "contents"], template=template2)
chain2 = LLMChain(llm=llm, prompt=prompt_template2, output_key="social")
# This is the overall chain where we run these two chains in sequence.
overall_chain = SequentialChain(
chains=[ chain1, chain2],
input_variables=["topic", "tone", "goal", "Audience", "platforms"],
# Here we return multiple variables
output_variables=["contents", 'social'],
verbose=True)
output = overall_chain({"topic":"top 10 youtube for learning digital marketing", "tone": "Educational", "goal": "encourages interaction", "Audience":"india", "platforms": "Twitter, Instagram, and LinkedIn" })
print(output) | [
"As the primary content creator for our organization, \n your role revolves around curating compelling content for social media platforms while identifying relevant keywords and hashtags. \n Your next task is to craft engaging posts tailored for the following social platforms: `{platforms}` on below content.\n content : `{contents}` \n Your expertise in content creation will play a pivotal role in enhancing our brand's online presence and engagement across these diverse platforms.\n ",
"True",
"contents",
"tone",
"Audience",
"You are the dedicated content creator and skilled social media marketer for our company. \n In this dynamic role, your responsibility encompasses crafting top-notch content within the realm of topic, \n all while maintaining an ingenious, professional, and captivating tone. Your role includes creating a compelling content strategy, \n engaging with our audience, leveraging trends, analyzing insights, and staying at the forefront of industry trends to ensure our brand's online presence flourishes. \n Your content will not only resonate deeply with our target audience but also drive impactful results across diverse platforms.\n So create content on this topic `{topic}` with `{tone}` tone and your goal is `{goal}` for target Audience `{Audience}`.\n \n "
] |
2024-01-10 | shyanukant/AI_projects | openai~content_generate.py | import os
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain.chains import LLMChain
def generate_content(topic, tone):
# open ai api key
OPENAI_KEY = os.environ.get('OPENAI_KEY')
# crete llm model
chat_llm = ChatOpenAI(temperature=0.7, openai_api_key=OPENAI_KEY)
# response schema (key, value pair)
heading_schema = ResponseSchema(name="heading", description="This is a heading")
body_schema = ResponseSchema(name="body", description="This is a body.")
response_schema = [ heading_schema, body_schema ]
# generate output parser (json structure)
output_parser = StructuredOutputParser.from_response_schemas(response_schemas=response_schema)
# generate json formate instruction for template string
format_instructions = output_parser.get_format_instructions()
# template string
template = """You are the dedicated content creator and skilled social media marketer for our company.
In this dynamic role, your responsibility encompasses crafting top-notch content within the realm of topic,
all while maintaining an ingenious, professional, and captivating tone. Your role includes creating a compelling content strategy,
engaging with our audience, leveraging trends, analyzing insights, and staying at the forefront of industry trends to ensure our brand's online presence flourishes.
Your content will not only resonate deeply with our target audience but also drive impactful results across diverse platforms.
So create content on this topic `{topic}` with `{tone}` tone and your goal is for target Audience .
{format_instructions}
"""
# create prompt template
prompt = ChatPromptTemplate(
messages= HumanMessagePromptTemplate.from_template(template),
input_variables=['topic', 'tone'],
partial_variables={ 'format_instructions' : format_instructions},
output_parser=output_parser
)
# create chain
chain = LLMChain(llm=chat_llm, prompt=prompt)
# final response
response = chain.predict_and_parse(topic=topic, tone=tone)
return response
| [
"You are the dedicated content creator and skilled social media marketer for our company. \n In this dynamic role, your responsibility encompasses crafting top-notch content within the realm of topic, \n all while maintaining an ingenious, professional, and captivating tone. Your role includes creating a compelling content strategy, \n engaging with our audience, leveraging trends, analyzing insights, and staying at the forefront of industry trends to ensure our brand's online presence flourishes. \n Your content will not only resonate deeply with our target audience but also drive impactful results across diverse platforms.\n So create content on this topic `{topic}` with `{tone}` tone and your goal is for target Audience .\n\n {format_instructions}\n ",
"tone",
"format_instructions"
] |
2024-01-10 | shyanukant/AI_projects | huggingface~text-generation.py | from langchain import HuggingFacePipeline, PromptTemplate, LLMChain
llm = HuggingFacePipeline.from_model_id(
model_id="bigscience/bloom-1b7",
task="text-generation",
model_kwargs={"temperature": 0, "max_length": 64},
)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is electroencephalography?"
print(llm_chain.run(question)) | [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | shyanukant/AI_projects | content_generate~text_generation.py | ## Creating a LLM using LangChain + HuggingFace
import os
from langchain import HuggingFaceHub, LLMChain
from langchain.prompts import PromptTemplate
hf_key = os.environ.get("SQL_MODEL_KEY")
hub_llm = HuggingFaceHub(
repo_id="gpt2-xl",
model_kwargs = {'temperature' : 0.7, 'max_length':250} ,
huggingfacehub_api_token=hf_key)
prompt = PromptTemplate(
input_variables= ['topic'],
template= "write a 250 word caption for social media post on this topic \n `{topic}`."
)
chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
print(chain.run("list comprehesion in python")) | [
"write a 250 word caption for social media post on this topic \n `{topic}`."
] |
2024-01-10 | shyanukant/AI_projects | content_generate~tweet.py | ## Creating a LLM using LangChain + HuggingFace
import os
from langchain import HuggingFaceHub, LLMChain
from langchain.prompts import PromptTemplate
hf_key = os.environ.get("SQL_MODEL_KEY")
# model -> https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 = use for hashtag genrator
# model -> https://huggingface.co/tiiuae/falcon-7b-instruct
hub_llm = HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct",
model_kwargs = {'temperature' : 0.8, 'max_length':250} ,
huggingfacehub_api_token=hf_key)
prompt = PromptTemplate(
input_variables= ['topic'],
# template= "<|prompter|>Can you write a tweet on {topic} and encourages engagement from followers.<|endoftext|><|assistant|>"
template= "Can you write a tweet on {topic} and encourages engagement from followers. Use vibrant visuals and witty captions to create excitement around the {topic} and give followers a reason to share and tag their friends."
)
chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
print(chain.run("how to write clean code")) | [
"Can you write a tweet on {topic} and encourages engagement from followers. Use vibrant visuals and witty captions to create excitement around the {topic} and give followers a reason to share and tag their friends."
] |
2024-01-10 | shyanukant/AI_projects | content_generate~text2image.py | ## Creating a LLM using LangChain + HuggingFace
import os
from langchain import HuggingFaceHub, LLMChain
from langchain.prompts import PromptTemplate
hf_key = os.environ.get("SQL_MODEL_KEY")
hub_llm = HuggingFaceHub(
repo_id="succinctly/text2image-prompt-generator",
model_kwargs = {'temperature' : 0.7, 'max_length':250} ,
huggingfacehub_api_token=hf_key)
prompt = PromptTemplate(
input_variables= ['topic'],
template= "Genearate a image \n `{topic}`."
)
chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
print(chain.run("cyber girl with painting ")) | [
"Genearate a image \n `{topic}`."
] |
2024-01-10 | Vasilije1990/architecture | translate_util.py |
from langchain import PromptTemplate, LLMChain
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
import weaviate
import os
import json
import argparse
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.retrievers import WeaviateHybridSearchRetriever
from langchain.schema import Document, SystemMessage, HumanMessage
from langchain.vectorstores import Weaviate
import uuid
from dotenv import load_dotenv
load_dotenv()
from pathlib import Path
from langchain import OpenAI, LLMMathChain
import os
embeddings = OpenAIEmbeddings()
from deep_translator import (GoogleTranslator)
def _convert_pdf_to_document(path: str = None):
"""Convert a PDF document to a Document object"""
if path is None:
raise ValueError("A valid path to the document must be provided.")
loader = PyPDFLoader(path)
pages = loader.load_and_split()
print("PAGES", pages[0])
# Parse metadata from the folder path
path_parts = Path(path).parts
arch_docs = path_parts.index("arch_docs")
metadata_parts = path_parts[arch_docs+1:]
documents = []
for page in pages:
translation = GoogleTranslator(source='auto', target='en').translate(text=page.page_content)
documents.append(
Document(
metadata={
"title": "Personal Receipt",
"country": metadata_parts[1],
"year": metadata_parts[0],
"author": str(uuid.uuid4()),
"source": "/".join(metadata_parts),
},
page_content=translation,
)
)
print(documents)
return documents
def _init_weaviate():
"""Initialize weaviate client and retriever"""
auth_config = weaviate.auth.AuthApiKey(api_key=os.environ.get('WEAVIATE_API_KEY'))
client = weaviate.Client(
url='https://my-vev-index-o4qitptw.weaviate.network',
auth_client_secret=auth_config,
additional_headers={
"X-OpenAI-Api-Key": os.environ.get('OPENAI_API_KEY')
}
)
retriever = WeaviateHybridSearchRetriever(
client=client,
index_name="Architecture",
text_key="text",
attributes=[],
embedding=embeddings,
create_schema_if_missing=True,
)
return retriever
def load_to_weaviate(document_path=None):
"""Load documents to weaviate"""
retriever =_init_weaviate()
docs = _convert_pdf_to_document(document_path)
return retriever.add_documents(docs) | [] |
2024-01-10 | urcdma/--2 | gpt3_analysis.py | ## gpt3_analysis.py
import openai
class GPT3Analysis:
def __init__(self, api_key: str):
self.api_key = api_key
openai.api_key = self.api_key
def analyze_code(self, code: str) -> str:
"""
Analyze the given code using GPT-3 and return the analysis result.
"""
# Implement the code analysis using GPT-3
response = openai.Completion.create(engine="text-davinci-002", prompt=code, max_tokens=60)
return response.choices[0].text.strip()
def organize_code(self, code: str) -> str:
"""
Organize the given code using GPT-3 and return the organized code.
"""
# Implement the code organization using GPT-3
response = openai.Completion.create(engine="text-davinci-002", prompt=code, max_tokens=60)
return response.choices[0].text.strip()
| [] |
2024-01-10 | CoruNethron/rulm-txt | self_instruct~src~benchmarks~eval_zs_rsg.py | from typing import Tuple, Callable
import re
import copy
from pathlib import Path
from tqdm import tqdm
import fire
from datasets import load_dataset
from nltk import edit_distance
from sklearn.metrics import accuracy_score
from sklearn.metrics import matthews_corrcoef
from src.util.io import write_jsonl
from src.util.chat import Conversation
from src.util.dl import gen_batch
from src.util.load import load_saiga
from src.util.openai import openai_batch_completion, OpenAIDecodingArguments
HF_DATASET = "RussianNLP/russian_super_glue"
def generate(
model,
tokenizer,
prompts,
generation_config,
debug: bool = True
):
data = tokenizer(
prompts,
return_tensors="pt",
truncation=True,
padding=True,
)
data = {k: v.to(model.device) for k, v in data.items()}
output_ids = model.generate(
**data,
generation_config=generation_config
)
outputs = []
for sample_output_ids, sample_input_ids in zip(output_ids, data["input_ids"]):
sample_output_ids = sample_output_ids[len(sample_input_ids):]
sample_output = tokenizer.decode(sample_output_ids, skip_special_tokens=True)
sample_output = sample_output.replace("</s>", "").strip()
if debug:
print(tokenizer.decode(sample_input_ids, skip_special_tokens=True))
print(sample_output)
print()
outputs.append(sample_output)
return outputs
def predict_saiga_zero_shot(
model,
tokenizer,
generation_config,
template_path,
prompts,
max_prompt_tokens: int = None,
debug: bool = False
):
default_conversation = Conversation.from_template(template_path)
clean_prompts = []
for prompt in prompts:
conversation = copy.deepcopy(default_conversation)
conversation.add_user_message(prompt)
prompt = conversation.get_prompt(tokenizer, max_tokens=max_prompt_tokens)
clean_prompts.append(prompt)
return generate(
model=model,
tokenizer=tokenizer,
prompts=clean_prompts,
generation_config=generation_config,
debug=debug
)
def find_lcs(s1, s2):
max_lcs = ""
for i in range(len(s1)):
for j in range(i + 1, len(s1)):
ss1 = s1[i:j]
if ss1 in s2 and len(ss1) > len(max_lcs):
max_lcs = ss1
return max_lcs
# DaNetQA
DANETQA_PROMPT = '''Контекст: {passage}
Используя контекст, ответь одним словом на вопрос: {question}'''
DANETQA_YES_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|верно|правда|может)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
DANETQA_NO_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(нет|неверно|неправда|не|ложь|редко)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def clean_danetqa_response(response):
result = True
if bool(DANETQA_YES_RE.match(response)):
result = True
elif bool(DANETQA_NO_RE.match(response)):
result = False
else:
print("ERROR! Не удалось найти Да/Нет в ответе модели и преобразовать его в bool:", response)
return result
def predict_danetqa(
split,
predict_func,
output_path,
batch_size: int = 4,
nrows: int = None,
template: str = DANETQA_PROMPT,
clean_func: Callable = clean_danetqa_response
):
records = list(load_dataset(HF_DATASET, "danetqa", split=split))
if nrows:
records = records[:nrows]
prompts = []
for record in records:
prompt = template.format(passage=record["passage"], question=record["question"])
prompts.append(prompt)
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
labels, predictions = [], []
for record, response in zip(records, responses):
prediction = clean_func(response)
record["prediction"] = prediction
label = record["label"]
if label != -1:
labels.append(label)
predictions.append(prediction)
if labels:
print("danetqa accuracy:", accuracy_score(labels, predictions))
outputs = []
for record in records:
label = str(record["prediction"]).lower()
outputs.append({"idx": record["idx"], "label": label})
write_jsonl(outputs, output_path)
return records
# TERRA
TERRA_PROMPT = '''Текст: {premise} Утверждение: {hypothesis}
Используя текст, ответь одним словом на вопрос: Вероятно ли утверждение при условии остального текста?'''
TERRA_ENTAILMENT_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|верно|правда|может|являются|вероятно)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
TERRA_NOT_ENTAILMENT_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(нет|неверно|неверное|невероятно|не вероятно|не)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def terra_to_bool(response):
return response == "entailment"
def clean_terra_response(response):
result = "not_entailment"
if bool(TERRA_ENTAILMENT_RE.match(response)):
result = "entailment"
elif bool(TERRA_NOT_ENTAILMENT_RE.match(response)):
result = "not_entailment"
else:
print("ERROR! Не удалось найти Да/Нет в ответе модели и преобразовать его в bool", response)
return result
def predict_terra(
split,
predict_func,
output_path,
batch_size: int = 8,
nrows: int = None,
template: str = TERRA_PROMPT,
clean_func=clean_terra_response
):
records = list(load_dataset(HF_DATASET, "terra", split=split))
if nrows:
records = records[:nrows]
prompts = []
for record in records:
prompts.append(template.format(
premise=record["premise"],
hypothesis=record["hypothesis"]
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
labels, predictions = [], []
for record, response in zip(records, responses):
prediction = clean_func(response)
record["prediction"] = prediction
label = record["label"]
if label != -1:
labels.append(1 - label)
predictions.append(terra_to_bool(prediction))
if labels:
print("terra accuracy:", accuracy_score(labels, predictions))
outputs = [{"idx": r["idx"], "label": r["prediction"]} for r in records]
write_jsonl(outputs, output_path)
return records
# RWSD
RWSD_PROMPT = 'Текст: "{text}"\nНа основе текста одним словом ответь на вопрос: К кому или к чему относится местоимение во фразе "{span2}"?'
def clean_rwsd_response(response, span1):
lcs = find_lcs(span1.lower(), response.lower())
return len(lcs) >= 3
def predict_rwsd(
split,
predict_func,
output_path,
batch_size: int = 4,
nrows: int = None,
template: str = RWSD_PROMPT,
clean_func: Callable = clean_rwsd_response
):
records = list(load_dataset(HF_DATASET, "rwsd", split=split))
if nrows:
records = records[:nrows]
prompts = []
for record in records:
prompts.append(template.format(
text=record["text"],
span2=record["span2_text"],
span1=record["span1_text"],
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
labels, predictions = [], []
for record, response in zip(records, responses):
prediction = clean_func(response, record["span1_text"])
record["prediction"] = prediction
label = record["label"]
if label != -1:
labels.append(label)
predictions.append(prediction)
if labels:
print("rwsd accuracy:", accuracy_score(labels, predictions))
outputs = [{"idx": r["idx"], "label": str(r["prediction"])} for r in records]
write_jsonl(outputs, output_path)
return records
# MUSERC
MUSERC_SINGLE_PROMPT = """Текст: {text}
Вопрос: {question}
Является ли "{answer}" правильным ответом на этот вопрос? Основываясь на тексте, ответь только "да" или "нет"."""
MUSERC_SINGLE_YES_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|является)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
MUSERC_SINGLE_NO_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(нет|не)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def clean_muserc_single_response(response):
result = False
if bool(MUSERC_SINGLE_YES_RE.match(response)):
result = True
elif bool(MUSERC_SINGLE_NO_RE.match(response)):
result = False
else:
print("ERROR! Не удалось найти Да/Нет в ответе модели и преобразовать его в bool:", response)
return result
def predict_muserc(
split,
predict_func,
output_path,
batch_size: int = 2,
nrows: int = None,
template: str = MUSERC_SINGLE_PROMPT,
clean_func: Callable = clean_muserc_single_response
):
records = list(load_dataset(HF_DATASET, "muserc", split=split))
if nrows:
records = records[:nrows]
prompts = list()
for record in records:
text, question, answer = record["paragraph"], record["question"], record["answer"]
answer = answer.rstrip(".")
prompts.append(template.format(
text=text,
question=question,
answer=answer
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
labels, predictions = [], []
for record, response in zip(records, responses):
record["prediction"] = clean_func(response)
if record["label"] != -1:
labels.append(record["label"])
predictions.append(record["prediction"])
if labels:
print("muserc accuracy:", accuracy_score(labels, predictions))
outputs = []
prev_idx = None
for record in records:
idx = record["idx"]
pidx, qidx, aidx = idx["paragraph"], idx["question"], idx["answer"]
ppidx, pqidx = None, None
if prev_idx:
ppidx, pqidx = prev_idx["paragraph"], prev_idx["question"]
if ppidx != pidx:
outputs.append({"idx": pidx, "passage": {"questions": []}})
assert len(outputs) - 1 == pidx
paragraph = outputs[-1]
if pqidx != qidx:
paragraph["passage"]["questions"].append({"idx": qidx, "answers": []})
question = paragraph["passage"]["questions"][-1]
answer = {"idx": aidx, "label": int(record["prediction"])}
question["answers"].append(answer)
prev_idx = idx
write_jsonl(outputs, output_path)
return records
# RUCOS
def rucos_clean_text(text):
text = " ".join([s.strip().rstrip(".") + "." for s in text.split("@header")]).strip()
text = " ".join([s.strip().rstrip(".") + "." for s in text.split("@context")]).strip()
text = " ".join([s.strip().rstrip(".") + "." for s in text.split("@highlight")]).strip()
text = " ".join([s.strip() for s in text.split("\n") if s.strip()])
return text
RUCOS_MASK = "[entity]"
RUCOS_PROMPT = """Контекст: {text}
Запрос: {query}
Какое имя человека или название организации или название места должно быть вместо {mask} в запросе? Ответь не более чем 3 словами в соответствии с контекстом."""
def clean_rucos_response(response, entities):
answers = []
for answer in entities:
lcs = find_lcs(response.strip(), answer.strip())
answers.append((len(lcs), answer))
return max(answers)[1]
def predict_rucos(
split,
predict_func,
output_path,
batch_size: int = 4,
nrows: int = None,
debug: bool = False,
template: str = RUCOS_PROMPT,
clean_func: Callable = clean_rucos_response
):
records = list(load_dataset(HF_DATASET, "rucos", split=split))
if nrows:
records = records[:nrows]
prompts = list()
for record in records:
entities = record["entities"]
query = record["query"]
text = rucos_clean_text(record["passage"])
entities = [e.strip().strip(",") for e in entities]
query = query.replace("@placeholder", RUCOS_MASK)
prompts.append(template.format(
text=text,
query=query,
mask=RUCOS_MASK
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
correct_count, all_count = 0, 0
for response, record in zip(responses, records):
final_response = clean_func(response, record["entities"])
record["prediction"] = final_response
answers = record["answers"]
if answers:
all_count += 1
prediction = record["prediction"].strip().lower()
for answer in answers:
answer = answer.strip().lower()
if edit_distance(answer, prediction) <= 2:
correct_count += 1
break
if all_count > 0:
print("rucos accuracy:", correct_count / all_count)
outputs = [{"idx": r["idx"]["query"], "label": r["prediction"]} for r in records]
write_jsonl(outputs, output_path)
return records
# LIDIRUS
LIDIRUS_PROMPT = '''Текст: "{sentence1}"
Используя текст, можно ли сказать, что утверждение "{sentence2}" точно корректно относительно ситуации из текста? Ответь только "да" или "нет".'''
LIDIRUS_ENTAILMENT_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|верно|правда|может|вероятна|верная)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
LIDIRUS_NOT_ENTAILMENT_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(не|нет|неверно|неверное|невероятна|неверная)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def lidirus_to_bool(response):
return response == "entailment"
def clean_lidirus_response(response):
result = "not_entailment"
if bool(LIDIRUS_ENTAILMENT_RE.match(response)):
result = "entailment"
elif bool(LIDIRUS_NOT_ENTAILMENT_RE.match(response)):
result = "not_entailment"
else:
print("ERROR! Не удалось найти Да/Нет в ответе модели и преобразовать его в bool", response)
return result
def predict_lidirus(
predict_func,
output_path,
batch_size: int = 4,
nrows: int = None,
template: str = LIDIRUS_PROMPT,
clean_func: Callable = clean_lidirus_response
):
records = list(load_dataset(HF_DATASET, "lidirus", split="test"))
if nrows:
records = records[:nrows]
prompts = [template.format(
sentence1=r["sentence1"],
sentence2=r["sentence2"]
) for r in records]
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
labels, predictions = [], []
for record, response in zip(records, responses):
prediction = clean_func(response)
record["prediction"] = prediction
label = record["label"]
labels.append(1 - label)
predictions.append(lidirus_to_bool(prediction))
print("lidirus accuracy:", accuracy_score(labels, predictions))
print("lidirus corr:", matthews_corrcoef(labels, predictions))
outputs = [{"idx": r["idx"], "label": r["prediction"]} for r in records]
write_jsonl(outputs, output_path)
return records
# PARUS
PARUS_CAUSE_PROMPT = """Выбери одну наиболее вероятную причину исключительно из двух предложенных вариантов.
Варианты: {choice1}; {choice2}
{premise}, так как..."""
PARUS_EFFECT_PROMPT = """Выбери одно наиболее вероятное следствие исключительно из двух предложенных вариантов.
Варианты: {choice1}; {choice2}
{premise}, поэтому..."""
def predict_parus(
split,
predict_func,
output_path,
batch_size: int = 12,
nrows: int = None,
template_cause: str = PARUS_CAUSE_PROMPT,
template_effect: str = PARUS_EFFECT_PROMPT
):
records = list(load_dataset(HF_DATASET, "parus", split=split))
if nrows:
records = records[:nrows]
prompts = []
for r in records:
idx = r["idx"]
c1 = r["choice1"].rstrip(".").lower()
c2 = r["choice2"].rstrip(".").lower()
premise = r["premise"].rstrip(".")
is_cause = r["question"] == "cause"
template = template_cause if is_cause else template_effect
prompts.append(template.format(
premise=premise,
choice1=c1,
choice2=c2
))
responses = list()
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
assert len(responses) == len(records)
for idx, (response, record) in enumerate(zip(responses, records)):
response = response.lower()
c1 = record["choice1"].rstrip(".").lower()
c2 = record["choice2"].rstrip(".").lower()
c1_lcs = find_lcs(response, c1)
c2_lcs = find_lcs(response, c2)
record["prediction"] = int(len(c2_lcs) > len(c1_lcs))
if records[0]["label"] != -1:
y_true, y_pred = [], []
for r in records:
y_pred.append(r["prediction"])
y_true.append(r["label"])
score = accuracy_score(y_true, y_pred)
print("parus accuracy:", score)
outputs = [{"idx": r["idx"], "label": int(r["prediction"])} for r in records]
write_jsonl(outputs, output_path)
return records
# RCB
RCB_PROMPT = """Дан текст: "{premise}"
Ответь на вопрос по тексту "да", "нет" или "может быть": {question}"""
RCB_YES_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|верно|вероятно)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
RCB_NO_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(нет|неверно|неверное|невероятно|не)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def clean_rcb_response(response):
is_contradiction = bool(RCB_NO_RE.match(response))
is_entailment = bool(RCB_YES_RE.match(response))
if is_contradiction:
return "contradiction"
if is_entailment:
return "entailment"
return "neutral"
def rcb_label2index(label):
mapping = {
"entailment": 0,
"contradiction": 1,
"neutral": 2
}
return mapping[label]
def predict_rcb(
split,
predict_func,
output_path,
batch_size: int = 8,
nrows: int = None,
template: str = RCB_PROMPT,
clean_func: Callable = clean_rcb_response
):
records = list(load_dataset(HF_DATASET, "rcb", split=split))
if nrows:
records = records[:nrows]
questions = [record["hypothesis"].rstrip(".") + "?" for record in records]
prompts = []
for record, question in zip(records, questions):
prompts.append(template.format(
premise=record["premise"],
question=question
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
for r, response in zip(records, responses):
r["prediction"] = clean_func(response)
if records[0]["label"] != -1:
labels = [r["label"] for r in records]
responses = [rcb_label2index(r["prediction"]) for r in records]
print("rcb accuracy:", accuracy_score(labels, responses))
outputs = [{"idx": r["idx"], "label": r["prediction"]} for r in records]
write_jsonl(outputs, output_path)
return records
# RUSSE
RUSSE_PROMPT = '''Ответь только "да" или "нет" на вопрос:
В текстовом фрагменте "{sentence1}" и текстовом фрагменте "{sentence2}" означают ли слова "{word}" разное?'''
RUSSE_YES_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(да|верно|вероятно|одно)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
RUSSE_NO_RE = re.compile(
r"^[^\w]*(Выходные данные|Выход|Ответ|Оценка)?[^\w]*(нет|не)",
re.IGNORECASE | re.MULTILINE | re.DOTALL
)
def clean_russe_response(response):
if bool(RUSSE_YES_RE.match(response)):
return 0
if bool(RUSSE_NO_RE.match(response)):
return 1
print("ERROR! Не удалось найти Да/Нет в ответе модели и преобразовать его в bool:", response)
return 1
def predict_russe(
split,
predict_func,
output_path,
batch_size: int = 8,
nrows: int = None,
template: str = RUSSE_PROMPT,
clean_func: Callable = clean_russe_response
):
records = list(load_dataset(HF_DATASET, "russe", split=split))
if nrows:
records = records[:nrows]
prompts = []
for record in records:
prompts.append(template.format(
sentence1=record["sentence1"],
sentence2=record["sentence2"],
word=record["word"]
))
responses = []
for batch in tqdm(gen_batch(prompts, batch_size), total=len(prompts) // batch_size + 1):
responses.extend(predict_func(batch))
for r, response in zip(records, responses):
r["prediction"] = clean_func(response)
if records[0]["label"] != -1:
labels = [r["label"] for r in records]
responses = [r["prediction"] for r in records]
print("russe accuracy:", accuracy_score(labels, responses))
outputs = [{
"idx": r["idx"],
"label": str(bool(r["prediction"])).lower()
} for r in records]
write_jsonl(outputs, output_path)
return records
ALL_TASKS = ("danetqa", "lidirus", "muserc", "parus", "rcb", "rucos", "russe", "rwsd", "terra")
def main(
model_name,
nrows: int = None,
template_path: str = "internal_prompts/saiga_v2.json",
split: str = "test",
predictions_dir: str = "submission",
debug: bool = False,
tasks: Tuple[str] = ALL_TASKS
):
predictions_dir = Path(predictions_dir)
predict_short = None
predict_long = None
if model_name not in ("gpt-4", "gpt-3.5-turbo"):
model, tokenizer, generation_config = load_saiga(model_name)
generation_config.no_repeat_ngram_size = 64
generation_config.temperature = 0.01
def predict_saiga_zero_shot_bound(batch):
generation_config.max_new_tokens = 256
return predict_saiga_zero_shot(
model=model,
tokenizer=tokenizer,
generation_config=generation_config,
template_path=template_path,
prompts=batch,
debug=debug
)
def predict_saiga_zero_shot_bound_short(batch):
generation_config.max_new_tokens = 8
return predict_saiga_zero_shot(
model=model,
tokenizer=tokenizer,
generation_config=generation_config,
template_path=template_path,
prompts=batch,
debug=debug
)
predict_long = predict_saiga_zero_shot_bound
predict_short = predict_saiga_zero_shot_bound_short
else:
def predict_chatgpt(batch):
messages = [[{"role": "user", "content": prompt}] for prompt in batch]
responses = openai_batch_completion(messages, model_name=model_name)
responses = [r.message.content for r in responses]
if debug:
for prompt, response in zip(batch, responses):
print(prompt)
print(response)
print()
return responses
def predict_chatgpt_short(batch):
messages = [[{"role": "user", "content": prompt}] for prompt in batch]
responses = openai_batch_completion(
messages,
decoding_args=OpenAIDecodingArguments(max_tokens=16),
model_name=model_name
)
responses = [r.message.content for r in responses]
if debug:
for prompt, response in zip(batch, responses):
print(prompt)
print(response)
print()
return responses
predict_long = predict_chatgpt
predict_short = predict_chatgpt_short
if "danetqa" in tasks:
predict_danetqa(
split=split,
predict_func=predict_short,
output_path=predictions_dir / "DaNetQA.jsonl",
nrows=nrows
)
if "terra" in tasks:
predict_terra(
split=split,
predict_func=predict_short,
output_path=predictions_dir / "TERRa.jsonl",
nrows=nrows
)
if "rwsd" in tasks:
predict_rwsd(
split=split,
predict_func=predict_long,
output_path=predictions_dir / "RWSD.jsonl",
nrows=nrows
)
if "rucos" in tasks:
predict_rucos(
split=split,
predict_func=predict_long,
output_path=predictions_dir / "RuCoS.jsonl",
nrows=nrows
)
if "lidirus" in tasks:
predict_lidirus(
predict_func=predict_short,
output_path=predictions_dir / "LiDiRus.jsonl",
nrows=nrows
)
if "parus" in tasks:
predict_parus(
split=split,
predict_func=predict_long,
output_path=predictions_dir / "PARus.jsonl",
nrows=nrows
)
if "rcb" in tasks:
predict_rcb(
split=split,
predict_func=predict_long,
output_path=predictions_dir / "RCB.jsonl",
nrows=nrows
)
if "russe" in tasks:
predict_russe(
split=split,
predict_func=predict_short,
output_path=predictions_dir / "RUSSE.jsonl",
nrows=nrows
)
if "muserc" in tasks:
predict_muserc(
split=split,
predict_func=predict_short,
output_path=predictions_dir / "MuSeRC.jsonl",
nrows=nrows
)
if __name__ == "__main__":
fire.Fire(main)
| [
"Дан текст: \"{premise}\"\n\nОтветь на вопрос по тексту \"да\", \"нет\" или \"может быть\": {question}",
"sentence2",
"Текст: {text}\n\nВопрос: {question}\n\nЯвляется ли \"{answer}\" правильным ответом на этот вопрос? Основываясь на тексте, ответь только \"да\" или \"нет\".",
"Контекст: {passage}\n\nИспользуя контекст, ответь одним словом на вопрос: {question}",
"Выбери одно наиболее вероятное следствие исключительно из двух предложенных вариантов.\n\nВарианты: {choice1}; {choice2}\n\n{premise}, поэтому...",
"Выбери одну наиболее вероятную причину исключительно из двух предложенных вариантов.\n\nВарианты: {choice1}; {choice2}\n\n{premise}, так как...",
"Текст: \"{sentence1}\"\n\nИспользуя текст, можно ли сказать, что утверждение \"{sentence2}\" точно корректно относительно ситуации из текста? Ответь только \"да\" или \"нет\".",
"question",
"Контекст: {text}\nЗапрос: {query}\n\nКакое имя человека или название организации или название места должно быть вместо {mask} в запросе? Ответь не более чем 3 словами в соответствии с контекстом.",
"Текст: {premise} Утверждение: {hypothesis}\nИспользуя текст, ответь одним словом на вопрос: Вероятно ли утверждение при условии остального текста?",
"sentence1",
"[]",
"Текст: \"{text}\"\nНа основе текста одним словом ответь на вопрос: К кому или к чему относится местоимение во фразе \"{span2}\"?",
"Ответь только \"да\" или \"нет\" на вопрос:\nВ текстовом фрагменте \"{sentence1}\" и текстовом фрагменте \"{sentence2}\" означают ли слова \"{word}\" разное?"
] |
2024-01-10 | diego898/autolabel | src~autolabel~transforms~serp_api.py | from autolabel.cache import BaseCache
from autolabel.transforms import BaseTransform
from langchain.utilities import SerpAPIWrapper
from typing import Dict, Any
import logging
import pandas as pd
from autolabel.transforms.schema import (
TransformError,
TransformErrorType,
TransformType,
)
logger = logging.getLogger(__name__)
class SerpApi(BaseTransform):
COLUMN_NAMES = [
"result_column",
]
DEFAULT_ARGS = {
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
def __init__(
self,
cache: BaseCache,
output_columns: Dict[str, Any],
query_column: str,
serp_api_key: str,
serp_args: dict = DEFAULT_ARGS,
) -> None:
super().__init__(cache, output_columns)
self.query_column = query_column
self.serp_api_key = serp_api_key
self.serp_args = serp_args
self.serp_api_wrapper = SerpAPIWrapper(
search_engine=None, params=self.serp_args, serpapi_api_key=self.serp_api_key
)
def name(self) -> str:
return TransformType.SERP_API
async def _get_result(self, query):
"""
Makes a request to Serp API with the query
and returns the search results.
"""
try:
search_result = await self.serp_api_wrapper.arun(query=query)
except Exception as e:
raise TransformError(
TransformErrorType.SERP_API_ERROR,
f"Error while making request to Serp API: {e}",
)
return search_result
async def _apply(self, row: Dict[str, Any]) -> Dict[str, Any]:
query = row[self.query_column]
search_result = self.NULL_TRANSFORM_TOKEN
if pd.isna(query) or query == self.NULL_TRANSFORM_TOKEN:
raise TransformError(
TransformErrorType.INVALID_INPUT,
f"Empty query in row {row}",
)
else:
search_result = await self._get_result(query)
transformed_row = {self.output_columns["result_column"]: search_result}
return self._return_output_row(transformed_row)
def params(self):
return {
"query_column": self.query_column,
"output_columns": self.output_columns,
"serp_api_key": self.serp_api_key,
"serp_args": self.serp_args,
}
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~models~palm.py | from functools import cached_property
from typing import List, Optional
import logging
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
from langchain.schema import LLMResult, HumanMessage, Generation
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
class PaLMLLM(BaseModel):
SEP_REPLACEMENT_TOKEN = "@@"
CHAT_ENGINE_MODELS = ["chat-bison@001"]
DEFAULT_MODEL = "text-bison@001"
# Reference: https://developers.generativeai.google/guide/concepts#model_parameters for "A token is approximately 4 characters"
DEFAULT_PARAMS = {"temperature": 0, "max_output_tokens": 1000}
# Reference: https://cloud.google.com/vertex-ai/pricing
COST_PER_CHARACTER = {
"text-bison@001": 0.001 / 1000,
"chat-bison@001": 0.0005 / 1000,
"textembedding-gecko@001": 0.0001 / 1000,
}
@cached_property
def _engine(self) -> str:
if self.model_name is not None and self.model_name in self.CHAT_ENGINE_MODELS:
return "chat"
else:
return "completion"
def __init__(
self,
config: AutolabelConfig,
cache: BaseCache = None,
) -> None:
super().__init__(config, cache)
try:
from langchain.chat_models import ChatVertexAI
from langchain.llms import VertexAI
except ImportError:
raise ImportError(
"palm is required to use the Palm LLM. Please install it with the following command: pip install 'refuel-autolabel[google]'"
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params and initialize the LLM
model_params = config.model_params()
self.model_params = {
**self.DEFAULT_PARAMS,
**model_params,
}
if self._engine == "chat":
self.llm = ChatVertexAI(model_name=self.model_name, **self.model_params)
else:
self.llm = VertexAI(model_name=self.model_name, **self.model_params)
@retry(
reraise=True,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=2, max=10),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _label_with_retry(self, prompts: List[str]) -> LLMResult:
return self.llm.generate(prompts)
def _label_individually(self, prompts: List[str]) -> LLMResult:
"""Label each prompt individually. Should be used only after trying as a batch first.
Args:
prompts (List[str]): List of prompts to label
Returns:
LLMResult: LLMResult object with generations
"""
generations = []
for i, prompt in enumerate(prompts):
try:
response = self._label_with_retry([prompt])
for generation in response.generations[0]:
generation.text = generation.text.replace(
self.SEP_REPLACEMENT_TOKEN, "\n"
)
generations.append(response.generations[0])
except Exception as e:
print(f"Error generating from LLM: {e}, returning empty generation")
generations.append([Generation(text="")])
return LLMResult(generations=generations)
def _label(self, prompts: List[str]) -> RefuelLLMResult:
for prompt in prompts:
if self.SEP_REPLACEMENT_TOKEN in prompt:
logger.warning(
f"""Current prompt contains {self.SEP_REPLACEMENT_TOKEN}
which is currently used as a separator token by refuel
llm. It is highly recommended to avoid having any
occurences of this substring in the prompt.
"""
)
prompts = [
prompt.replace("\n", self.SEP_REPLACEMENT_TOKEN) for prompt in prompts
]
if self._engine == "chat":
# Need to convert list[prompts] -> list[messages]
# Currently the entire prompt is stuck into the "human message"
# We might consider breaking this up into human vs system message in future
prompts = [[HumanMessage(content=prompt)] for prompt in prompts]
try:
result = self._label_with_retry(prompts)
for generations in result.generations:
for generation in generations:
generation.text = generation.text.replace(
self.SEP_REPLACEMENT_TOKEN, "\n"
)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
if self.model_name is None:
return 0.0
cost_per_char = self.COST_PER_CHARACTER.get(self.model_name, 0.0)
return cost_per_char * len(prompt) + cost_per_char * (
len(label) if label else 4 * self.model_params["max_output_tokens"]
)
def returns_token_probs(self) -> bool:
return False
| [
"\n"
] |
2024-01-10 | diego898/autolabel | src~autolabel~models~hf_pipeline.py | import logging
from typing import List, Optional, Dict
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
logger = logging.getLogger(__name__)
class HFPipelineLLM(BaseModel):
DEFAULT_MODEL = "google/flan-t5-xxl"
DEFAULT_PARAMS = {"temperature": 0.0, "quantize": 8}
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
from langchain.llms import HuggingFacePipeline
try:
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoTokenizer,
pipeline,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
try:
import torch
except ImportError:
raise ValueError(
"Could not import torch package. "
"Please it install it with `pip install torch`."
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params
model_params = config.model_params()
self.model_params = {**self.DEFAULT_PARAMS, **model_params}
if config.logit_bias() != 0:
self.model_params = {
**self._generate_sequence_bias(),
**self.model_params,
}
# initialize HF pipeline
tokenizer = AutoTokenizer.from_pretrained(
self.model_name, use_fast=False, add_prefix_space=True
)
quantize_bits = self.model_params["quantize"]
model_config = AutoConfig.from_pretrained(self.model_name)
if isinstance(model_config, tuple(MODEL_FOR_CAUSAL_LM_MAPPING)):
AutoModel = AutoModelForCausalLM
elif isinstance(model_config, tuple(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)):
AutoModel = AutoModelForSeq2SeqLM
else:
raise ValueError(
"model_name is neither a causal LM nor a seq2seq LM. Please check the model_name."
)
if not torch.cuda.is_available():
model = AutoModel.from_pretrained(self.model_name)
elif quantize_bits == 8:
model = AutoModel.from_pretrained(
self.model_name, load_in_8bit=True, device_map="auto"
)
elif quantize_bits == "16":
model = AutoModel.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModel.from_pretrained(self.model_name, device_map="auto")
model_kwargs = dict(self.model_params) # make a copy of the model params
model_kwargs.pop("quantize", None) # remove quantize from the model params
pipe = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
**model_kwargs,
)
# initialize LLM
self.llm = HuggingFacePipeline(pipeline=pipe, model_kwargs=model_kwargs)
def _generate_sequence_bias(self) -> Dict:
"""Generates sequence bias dict to add to the config for the labels specified
Returns:
Dict: sequence bias, max new tokens, and num beams
"""
if len(self.config.labels_list()) == 0:
logger.warning(
"No labels specified in the config. Skipping logit bias generation."
)
return {}
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
tokenizer = AutoTokenizer.from_pretrained(
self.model_name, use_fast=False, add_prefix_space=True
)
sequence_bias = {tuple([tokenizer.eos_token_id]): self.config.logit_bias()}
max_new_tokens = 0
for label in self.config.labels_list():
tokens = tuple(tokenizer([label], add_special_tokens=False).input_ids[0])
for token in tokens:
sequence_bias[tuple([token])] = self.config.logit_bias()
max_new_tokens = max(max_new_tokens, len(tokens))
return {
"sequence_bias": sequence_bias,
"max_new_tokens": max_new_tokens,
}
def _label(self, prompts: List[str]) -> RefuelLLMResult:
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
# Model inference for this model is being run locally
# Revisit this in the future when we support HF inference endpoints
return 0.0
def returns_token_probs(self) -> bool:
return False
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~tasks~multilabel_classification.py | from collections import defaultdict
from typing import List, Dict, Tuple
from langchain.prompts.prompt import PromptTemplate
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricType, MetricResult, F1Type
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
import json
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
F1Metric,
BaseMetric,
)
class MultilabelClassificationTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = 'You will return the answer as a semicolon-separated list of labels. For example: "label1;label2;label3"'
DEFAULT_TASK_GUIDELINES = "Your job is to correctly label the provided input example into one or more of the following {num_labels} categories.\nCategories:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
F1Metric(
type=F1Type.MULTI_LABEL,
labels=self.config.labels_list(),
sep=self.config.label_separator(),
average=[MetricType.F1_MACRO, MetricType.F1_WEIGHTED],
),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def construct_prompt(self, input: Dict, examples: List) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({label_column: eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(
self, label: str, num_rows: int, guidelines: str = None
) -> str:
raise NotImplementedError("Dataset generation not implemented for this task")
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): list of LLM generated labels
gt_labels (List[str]): list of ground truth labels
additional_metrics (List[BaseMetric], optional): list of additional metrics to compute. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics
| [
"You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
] |
2024-01-10 | diego898/autolabel | src~autolabel~cache~sqlalchemy_generation_cache.py | from sqlalchemy.orm import sessionmaker
from typing import Optional
from autolabel.schema import GenerationCacheEntry
from autolabel.database import create_db_engine
from autolabel.data_models import Base
from .base import BaseCache
from typing import List
from langchain.schema import Generation
from autolabel.data_models import GenerationCacheEntryModel
import logging
logger = logging.getLogger(__name__)
class SQLAlchemyGenerationCache(BaseCache):
"""A cache system implemented with SQL Alchemy"""
def __init__(self):
self.engine = None
self.base = Base
self.session = None
def initialize(self):
self.engine = create_db_engine()
self.base.metadata.create_all(self.engine)
self.session = sessionmaker(bind=self.engine)()
def lookup(self, entry: GenerationCacheEntry) -> List[Generation]:
"""Retrieves an entry from the Cache. Returns an empty list [] if not found.
Args:
entry: GenerationCacheEntry we wish to retrieve from the Cache
Returns:
result: A list of langchain Generation objects, containing the results of the labeling run for this GenerationCacheEntry. Empty list [] if not found.
"""
cache_entry = GenerationCacheEntryModel.get(self.session, entry)
if cache_entry is None:
logger.debug("Cache miss")
return []
logger.debug("Cache hit")
return cache_entry.generations
def update(self, entry: GenerationCacheEntry) -> None:
"""Inserts the provided GenerationCacheEntry into the Cache, overriding it if it already exists
Args:
entry: GenerationCacheEntry we wish to put into the Cache
"""
GenerationCacheEntryModel.insert(self.session, entry)
def clear(self) -> None:
"""Clears the entire Cache"""
GenerationCacheEntryModel.clear(self.session)
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~tasks~entity_matching.py | from collections import defaultdict
from typing import List, Dict, Tuple
import json
import logging
from langchain.prompts.prompt import PromptTemplate
from sklearn.metrics import accuracy_score
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricResult
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
from autolabel.tasks.utils import filter_unlabeled_examples
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
ClassificationReportMetric,
BaseMetric,
)
logger = logging.getLogger(__name__)
class EntityMatchingTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = (
'You will return the answer with one element: "the correct option"\n'
)
DEFAULT_TASK_GUIDELINES = "Your job is to tell if the two given entities are duplicates or not. You will return the answer from one of the choices. Choices:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
GENERATE_DATASET_TEMPLATE = "{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv"
DEFAULT_DATASET_GENERATION_GUIDELINES = "You are an expert at generating plausible inputs for a given task.\n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION"
LABEL_DESCRIPTIONS_PROMPT = "Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to."
GENERATE_DATASET_FORMAT_GUIDELINES = "Your response should be in csv format with the following columns: {columns}.\n\nHere is a template you can follow for your output:\n```csv\n{columns}\n{example_rows}\n```\n\nMake sure to replace the placeholder variables with your own values."
GENERATE_DATASET_OUTPUT_GUIDELINES = 'Now I want you to generate {num_rows} excerpts that follow the guidelines and all belong to the "{label}" category. They should not belong to any of the other categories.'
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
ClassificationReportMetric(),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
for label in self.config.labels_list():
if "\n" in label:
logger.warning(
"Label contains newline character. This can have output guideline issues."
)
def construct_prompt(self, input: Dict, examples: List[Dict]) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({"label": eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(self, label: str) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_DATASET_TEMPLATE),
template=self.GENERATE_DATASET_TEMPLATE,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
fmt_guidelines = self.dataset_generation_guidelines.format(
task_guidelines=fmt_task_guidelines
)
# prepare columns
columns = get_format_variables(self.config.example_template())
columns.remove(self.config.label_column())
# prepare label descriptions
fmt_label_descriptions = self.LABEL_DESCRIPTIONS_PROMPT.format(
num_labels=num_labels
)
for i, l in enumerate(labels_list):
fmt_label_descriptions += f"\n{i+1}. {l}{': ' + self.config.label_descriptions()[l] if self.config.label_descriptions() is not None and l in self.config.label_descriptions() else ''}"
# prepare format
example_rows = "\n".join(
[",".join([f'"{column}_{i+1}"' for column in columns]) for i in range(3)]
)
fmt_format_guidelines = self.GENERATE_DATASET_FORMAT_GUIDELINES.format(
columns=",".join(columns), example_rows=example_rows
)
# prepare output guidelines
fmt_output_guidelines = self.GENERATE_DATASET_OUTPUT_GUIDELINES.format(
num_rows=self.config.dataset_generation_num_rows(), label=label
)
return pt.format(
guidelines=fmt_guidelines,
columns=columns,
label_descriptions=fmt_label_descriptions,
format_guidelines=fmt_format_guidelines,
output_guidelines=fmt_output_guidelines,
)
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
additional_metrics (List[BaseMetric], optional): List of additional metrics to run. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics
| [
"{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv",
"You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: ",
"Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to."
] |
2024-01-10 | diego898/autolabel | tests~unit~llm_test.py | import json
from autolabel.configs import AutolabelConfig
from autolabel.models.anthropic import AnthropicLLM
from autolabel.models.openai import OpenAILLM
from autolabel.models.palm import PaLMLLM
from autolabel.models.refuel import RefuelLLM
from langchain.schema import Generation, LLMResult
from pytest import approx
################### ANTHROPIC TESTS #######################
def test_anthropic_initialization():
model = AnthropicLLM(
config=AutolabelConfig(
config="tests/assets/banking/config_banking_anthropic.json"
)
)
def test_anthropic_label(mocker):
model = AnthropicLLM(
config=AutolabelConfig(
config="tests/assets/banking/config_banking_anthropic.json"
)
)
prompts = ["test1", "test2"]
mocker.patch(
"langchain.chat_models.ChatAnthropic.generate",
return_value=LLMResult(
generations=[[Generation(text="Answers")] for _ in prompts]
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["Answers", "Answers"]
assert sum(x.costs) == approx(0.00010944, rel=1e-3)
def test_anthropic_get_cost():
model = AnthropicLLM(
config=AutolabelConfig(
config="tests/assets/banking/config_banking_anthropic.json"
)
)
example_prompt = "TestingExamplePrompt"
curr_cost = model.get_cost(example_prompt)
assert curr_cost == approx(0.03271306, rel=1e-3)
def test_anthropic_return_probs():
model = AnthropicLLM(
config=AutolabelConfig(
config="tests/assets/banking/config_banking_anthropic.json"
)
)
assert model.returns_token_probs() is False
################### ANTHROPIC TESTS #######################
################### OPENAI GPT 3.5 TESTS #######################
def test_gpt35_initialization():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking.json")
)
def test_gpt35_label(mocker):
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"langchain.chat_models.ChatOpenAI.generate",
return_value=LLMResult(
generations=[[Generation(text="Answers")] for _ in prompts]
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["Answers", "Answers"]
def test_gpt35_get_cost():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking.json")
)
example_prompt = "TestingExamplePrompt"
curr_cost = model.get_cost(example_prompt)
assert curr_cost == approx(0.002006, rel=1e-3)
def test_gpt35_return_probs():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking.json")
)
assert model.returns_token_probs() is False
################### OPENAI GPT 3.5 TESTS #######################
################### OPENAI GPT 4 TESTS #######################
def test_gpt4_initialization():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_gpt4.json")
)
def test_gpt4_label(mocker):
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_gpt4.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"langchain.chat_models.ChatOpenAI.generate",
return_value=LLMResult(
generations=[[Generation(text="Answers")] for _ in prompts]
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["Answers", "Answers"]
assert sum(x.costs) == approx(0.00023999, rel=1e-3)
def test_gpt4_get_cost():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_gpt4.json")
)
example_prompt = "TestingExamplePrompt"
curr_cost = model.get_cost(example_prompt)
assert curr_cost == approx(0.06009, rel=1e-3)
def test_gpt4_return_probs():
model = OpenAILLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_gpt4.json")
)
assert model.returns_token_probs() is False
################### OPENAI GPT 4 TESTS #######################
################### PALM TESTS #######################
def test_palm_initialization(mocker):
mocker.patch(
"vertexai.preview.language_models.TextGenerationModel.from_pretrained",
return_value="Test",
)
model = PaLMLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_palm.json")
)
def test_palm_label(mocker):
mocker.patch(
"vertexai.preview.language_models.TextGenerationModel.from_pretrained",
return_value="Test",
)
model = PaLMLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_palm.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"langchain.llms.VertexAI.generate",
return_value=LLMResult(
generations=[[Generation(text="Answers")] for _ in prompts]
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["Answers", "Answers"]
assert sum(x.costs) == approx(2.4e-05, rel=1e-3)
def test_palm_get_cost(mocker):
mocker.patch(
"vertexai.preview.language_models.TextGenerationModel.from_pretrained",
return_value="Test",
)
model = PaLMLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_palm.json")
)
example_prompt = "TestingExamplePrompt"
curr_cost = model.get_cost(example_prompt)
assert curr_cost == approx(0.00402, rel=1e-3)
def test_palm_return_probs(mocker):
mocker.patch(
"vertexai.preview.language_models.TextGenerationModel.from_pretrained",
return_value="Test",
)
model = PaLMLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_palm.json")
)
assert model.returns_token_probs() is False
################### PALM TESTS #######################
################### REFUEL TESTS #######################
def test_refuel_initialization():
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
def test_refuel_label(mocker):
class PostRequestMockResponse:
def __init__(self, resp, status_code):
self.resp = resp
self.status_code = status_code
def json(self):
return self.resp
def raise_for_status(self):
pass
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"requests.post",
return_value=PostRequestMockResponse(
resp='{"generated_text": "Answers"}', status_code=200
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["Answers", "Answers"]
assert sum(x.costs) == 0
def test_refuel_label_non_retryable(mocker):
class PostRequestMockResponse:
def __init__(self, resp, status_code):
self.resp = resp
self.status_code = status_code
self.text = resp
def json(self):
return self.resp
def raise_for_status(self):
pass
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"requests.post",
return_value=PostRequestMockResponse(
resp='{"error_message": "Error123"}', status_code=422
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["", ""]
for error in x.errors:
assert "NonRetryable Error:" in error.error_message
assert sum(x.costs) == 0
def test_refuel_label_retryable(mocker):
class PostRequestMockResponse:
def __init__(self, resp, status_code):
self.resp = resp
self.status_code = status_code
self.text = resp
def json(self):
return self.resp
def raise_for_status(self):
pass
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
prompts = ["test1", "test2"]
mocker.patch(
"requests.post",
return_value=PostRequestMockResponse(
resp='{"error_message": "Error123"}', status_code=500
),
)
x = model.label(prompts)
assert [i[0].text for i in x.generations] == ["", ""]
for error in x.errors:
assert "NonRetryable Error:" not in error.error_message
assert sum(x.costs) == 0
def test_refuel_get_cost():
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
example_prompt = "TestingExamplePrompt"
curr_cost = model.get_cost(example_prompt)
assert curr_cost == 0
def test_refuel_return_probs():
model = RefuelLLM(
config=AutolabelConfig(config="tests/assets/banking/config_banking_refuel.json")
)
assert model.returns_token_probs() is True
################### REFUEL TESTS #######################
| [
"TestingExamplePrompt",
"['test1', 'test2']"
] |
2024-01-10 | diego898/autolabel | src~autolabel~models~refuel.py | import json
import os
import requests
import logging
from typing import List, Optional
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import LabelingError, ErrorType, RefuelLLMResult
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
retry_if_not_exception_type,
)
from langchain.schema import Generation
UNRETRYABLE_ERROR_CODES = [400, 422]
logger = logging.getLogger(__name__)
class UnretryableError(Exception):
"""This is an error which is unretriable from autolabel."""
class RefuelLLM(BaseModel):
DEFAULT_PARAMS = {
"max_new_tokens": 128,
}
def __init__(
self,
config: AutolabelConfig,
cache: BaseCache = None,
) -> None:
super().__init__(config, cache)
# populate model name
# This is unused today, but in the future could
# be used to decide which refuel model is queried
self.model_name = config.model_name()
model_params = config.model_params()
self.model_params = {**self.DEFAULT_PARAMS, **model_params}
# initialize runtime
self.BASE_API = f"https://llm.refuel.ai/models/{self.model_name}/generate"
self.REFUEL_API_ENV = "REFUEL_API_KEY"
if self.REFUEL_API_ENV in os.environ and os.environ[self.REFUEL_API_ENV]:
self.REFUEL_API_KEY = os.environ[self.REFUEL_API_ENV]
else:
raise ValueError(
f"Did not find {self.REFUEL_API_ENV}, please add an environment variable"
f" `{self.REFUEL_API_ENV}` which contains it"
)
@retry(
reraise=True,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=2, max=10),
before_sleep=before_sleep_log(logger, logging.WARNING),
retry=retry_if_not_exception_type(UnretryableError),
)
def _label_with_retry(self, prompt: str) -> requests.Response:
payload = {
"input": prompt,
"params": {**self.model_params},
"confidence": self.config.confidence(),
}
headers = {"refuel_api_key": self.REFUEL_API_KEY}
response = requests.post(self.BASE_API, json=payload, headers=headers)
# raise Exception if status != 200
if response.status_code != 200:
if response.status_code in UNRETRYABLE_ERROR_CODES:
# This is a bad request, and we should not retry
raise UnretryableError(
f"NonRetryable Error: Received status code {response.status_code} from Refuel API. Response: {response.text}"
)
logger.warning(
f"Received status code {response.status_code} from Refuel API. Response: {response.text}"
)
response.raise_for_status()
return response
def _label(self, prompts: List[str]) -> RefuelLLMResult:
generations = []
errors = []
for prompt in prompts:
try:
response = self._label_with_retry(prompt)
response = json.loads(response.json())
generations.append(
[
Generation(
text=response["generated_text"],
generation_info={
"logprobs": {"top_logprobs": response["logprobs"]}
}
if self.config.confidence()
else None,
)
]
)
errors.append(None)
except Exception as e:
# This signifies an error in generating the response using RefuelLLm
logger.error(
f"Unable to generate prediction: {e}",
)
generations.append([Generation(text="")])
errors.append(
LabelingError(
error_type=ErrorType.LLM_PROVIDER_ERROR, error_message=str(e)
)
)
return RefuelLLMResult(generations=generations, errors=errors)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
return 0
def returns_token_probs(self) -> bool:
return True
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~models~anthropic.py | from typing import List, Optional
from autolabel.configs import AutolabelConfig
from autolabel.models import BaseModel
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
from langchain.schema import HumanMessage
class AnthropicLLM(BaseModel):
DEFAULT_MODEL = "claude-instant-v1"
DEFAULT_PARAMS = {
"max_tokens_to_sample": 1000,
"temperature": 0.0,
}
# Reference: https://cdn2.assets-servd.host/anthropic-website/production/images/apr-pricing-tokens.pdf
COST_PER_PROMPT_TOKEN = {
# $11.02 per million tokens
"claude-v1": (11.02 / 1000000),
"claude-instant-v1": (1.63 / 1000000),
}
COST_PER_COMPLETION_TOKEN = {
# $32.68 per million tokens
"claude-v1": (32.68 / 1000000),
"claude-instant-v1": (5.51 / 1000000),
}
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
try:
from langchain.chat_models import ChatAnthropic
from anthropic._tokenizers import sync_get_tokenizer
except ImportError:
raise ImportError(
"anthropic is required to use the anthropic LLM. Please install it with the following command: pip install 'refuel-autolabel[anthropic]'"
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params
model_params = config.model_params()
self.model_params = {**self.DEFAULT_PARAMS, **model_params}
# initialize LLM
self.llm = ChatAnthropic(model=self.model_name, **self.model_params)
self.tokenizer = sync_get_tokenizer()
def _label(self, prompts: List[str]) -> RefuelLLMResult:
prompts = [[HumanMessage(content=prompt)] for prompt in prompts]
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
num_prompt_toks = len(self.tokenizer.encode(prompt).ids)
if label:
num_label_toks = len(self.tokenizer.encode(label).ids)
else:
# get an upper bound
num_label_toks = self.model_params["max_tokens_to_sample"]
cost_per_prompt_token = self.COST_PER_PROMPT_TOKEN[self.model_name]
cost_per_completion_token = self.COST_PER_COMPLETION_TOKEN[self.model_name]
return (num_prompt_toks * cost_per_prompt_token) + (
num_label_toks * cost_per_completion_token
)
def returns_token_probs(self) -> bool:
return False
| [
"{'claude-v1': 1.102e-05, 'claude-instant-v1': 1.6299999999999999e-06}"
] |
2024-01-10 | diego898/autolabel | src~autolabel~cache~redis_cache.py | from autolabel.cache import BaseCache
from langchain.schema import Generation
from typing import Any, List
import hashlib
import json
import logging
logger = logging.getLogger(__name__)
class RedisCache(BaseCache):
"""A cache system implemented with Redis"""
def __init__(self, endpoint: str, db: int = 0):
self.endpoint = endpoint
self.db = db
def initialize(self):
try:
from redis import Redis
self.redis = Redis.from_url(self.endpoint, db=self.db)
except ImportError:
raise ImportError(
"redis is required to use the Redis Cache. Please install it with the following command: pip install redis"
)
def lookup(self, entry: Any) -> List[Generation]:
"""Retrieves an entry from the Cache. Returns an empty list [] if not found.
Args:
entry: Entry we wish to retrieve from the Cache
Returns:
result: Deserialized cache entry. None if entry not found.
"""
redis_key = entry.get_id()
if self.redis.exists(redis_key):
logger.debug("Cache hit")
generations = entry.deserialize_output(
self.redis.get(redis_key).decode("utf-8")
)
return generations
logger.debug("Cache miss")
return None
def update(self, entry: Any) -> None:
"""Inserts the provided entry into the Cache, overriding it if it already exists
Args:
entry: Entry we wish to put into the Cache
"""
redis_key = entry.get_id()
redis_value = entry.get_serialized_output()
with self.redis.pipeline() as pipe:
pipe.set(redis_key, redis_value)
pipe.expire(redis_key, entry.ttl_ms // 1000)
pipe.execute()
def clear(self) -> None:
"""Clears the entire Cache"""
self.redis.flushdb()
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~schema.py | from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import json
import pandas as pd
from langchain.schema import Generation
from pydantic import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.utils import calculate_md5
class ModelProvider(str, Enum):
"""Enum containing all LLM providers currently supported by autolabeler"""
OPENAI = "openai"
ANTHROPIC = "anthropic"
HUGGINGFACE_PIPELINE = "huggingface_pipeline"
REFUEL = "refuel"
GOOGLE = "google"
COHERE = "cohere"
CUSTOM = "custom"
class TaskType(str, Enum):
"""Enum containing all the types of tasks that autolabeler currently supports"""
CLASSIFICATION = "classification"
NAMED_ENTITY_RECOGNITION = "named_entity_recognition"
QUESTION_ANSWERING = "question_answering"
ENTITY_MATCHING = "entity_matching"
MULTILABEL_CLASSIFICATION = "multilabel_classification"
ATTRIBUTE_EXTRACTION = "attribute_extraction"
class FewShotAlgorithm(str, Enum):
"""Enum of supported algorithms for choosing which examples to provide the LLM in its instruction prompt"""
FIXED = "fixed"
SEMANTIC_SIMILARITY = "semantic_similarity"
MAX_MARGINAL_RELEVANCE = "max_marginal_relevance"
LABEL_DIVERSITY_RANDOM = "label_diversity_random"
LABEL_DIVERSITY_SIMILARITY = "label_diversity_similarity"
class TaskStatus(str, Enum):
ACTIVE = "active"
class MetricType(str, Enum):
"""Enum of supported performance metrics. Some metrics are always available (task agnostic), while others are only supported by certain types of tasks"""
# Task agnostic
SUPPORT = "support"
COMPLETION_RATE = "completion_rate"
# Classification metrics
ACCURACY = "accuracy"
CONFUSION_MATRIX = "confusion_matrix"
LABEL_DISTRIBUTION = "label_distribution"
F1 = "f1"
F1_MICRO = "f1_micro"
F1_MACRO = "f1_macro"
F1_WEIGHTED = "f1_weighted"
TEXT_PARTIAL_MATCH = "text_partial_match"
# Token Classification metrics
F1_EXACT = "f1_exact"
F1_STRICT = "f1_strict"
F1_PARTIAL = "f1_partial"
F1_ENT_TYPE = "f1_ent_type"
# Confidence metrics
AUROC = "auroc"
THRESHOLD = "threshold"
# Aggregate Metrics
CLASSIFICATION_REPORT = "classification_report"
class F1Type(str, Enum):
MULTI_LABEL = "multi_label"
TEXT = "text"
class MetricResult(BaseModel):
"""Contains performance metrics gathered from autolabeler runs"""
name: str
value: Any
show_running: Optional[bool] = True
class ErrorType(str, Enum):
"""Enum of supported error types"""
LLM_PROVIDER_ERROR = "llm_provider_error"
PARSING_ERROR = "parsing_error"
OUTPUT_GUIDELINES_NOT_FOLLOWED_ERROR = "output_guidelines_not_followed_error"
EMPTY_RESPONSE_ERROR = "empty_response_error"
class LabelingError(BaseModel):
"""Contains information about an error that occurred during the labeling process"""
error_type: ErrorType
error_message: str
class LLMAnnotation(BaseModel):
"""Contains label information of a given data point, including the generated label, the prompt given to the LLM, and the LLMs response. Optionally includes a confidence_score if supported by the model"""
successfully_labeled: bool
label: Any
curr_sample: Optional[bytes] = ""
confidence_score: Optional[float] = None
generation_info: Optional[Dict[str, Any]] = None
raw_response: Optional[str] = ""
explanation: Optional[str] = ""
prompt: Optional[str] = ""
error: Optional[LabelingError] = None
class Dataset(BaseModel):
"""Contains Dataset parameters, including input file path, indexes for state management (e.g. job batching and retries), and a unique ID"""
id: str
input_file: str
start_index: int
end_index: int
class Config:
orm_mode = True
@classmethod
def create_id(
self,
dataset: Union[str, pd.DataFrame],
config: AutolabelConfig,
start_index: int,
max_items: int,
) -> str:
"""
Generates a unique ID for the given Dataset configuration
Args:
dataset: either 1) input file name or 2) pandas Dataframe
config: AutolabelConfig object containing project settings
start_index: index to begin labeling job at (used for job batching, retries, state management)
max_items: number of data points to label, beginning at start_index
Returns:
filehash: a unique ID generated from an MD5 hash of the functions parameters
"""
if isinstance(dataset, str):
filehash = calculate_md5(
[open(dataset, "rb"), config._dataset_config, start_index, max_items]
)
else:
filehash = calculate_md5(
[dataset.to_csv(), config._dataset_config, start_index, max_items]
)
return filehash
class Task(BaseModel):
id: str
task_type: TaskType
model_name: str
config: str
class Config:
orm_mode = True
@classmethod
def create_id(self, config: AutolabelConfig) -> str:
filehash = calculate_md5(config.config)
return filehash
class TaskRun(BaseModel):
id: Optional[str] = None
created_at: datetime
task_id: str
dataset_id: str
current_index: int
output_file: str
status: TaskStatus
error: Optional[str] = None
metrics: Optional[Dict[str, Any]] = None
class Config:
orm_mode = True
class Annotation(BaseModel):
id: Optional[str] = None
index: int
llm_annotation: Optional[LLMAnnotation] = None
class Config:
orm_mode = True
class GenerationCacheEntry(BaseModel):
model_name: str
prompt: str
model_params: str
generations: Optional[List[Generation]] = None
creation_time_ms: Optional[int] = -1
ttl_ms: Optional[int] = -1
class Config:
orm_mode = True
def get_id(self) -> str:
"""
Generates a unique ID for the given generation cache configuration
"""
return calculate_md5([self.model_name, self.model_params, self.prompt])
def get_serialized_output(self) -> str:
"""
Returns the serialized cache entry output
"""
return json.dumps([gen.dict() for gen in self.generations])
def deserialize_output(self, output: str) -> List[Generation]:
"""
Deserializes the cache entry output
"""
return [Generation(**gen) for gen in json.loads(output)]
class ConfidenceCacheEntry(BaseModel):
prompt: Optional[str] = ""
raw_response: Optional[str] = ""
logprobs: Optional[list] = None
score_type: Optional[str] = "logprob_average"
creation_time_ms: Optional[int] = -1
ttl_ms: Optional[int] = -1
class Config:
orm_mode = True
def get_id(self) -> str:
"""
Generates a unique ID for the given confidence cache configuration
"""
return calculate_md5([self.prompt, self.raw_response, self.score_type])
def get_serialized_output(self) -> str:
"""
Returns the serialized cache entry output
"""
return json.dumps(self.logprobs)
def deserialize_output(self, output: str) -> Dict[str, float]:
"""
Deserializes the cache entry output
"""
return json.loads(output)
class RefuelLLMResult(BaseModel):
"""List of generated outputs. This is a List[List[]] because
each input could have multiple candidate generations."""
generations: List[List[Generation]]
"""Errors encountered while running the labeling job"""
errors: List[Optional[LabelingError]]
"""Costs incurred during the labeling job"""
costs: Optional[List[float]] = []
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~tasks~classification.py | from collections import defaultdict
from typing import List, Dict, Tuple
import logging
from langchain.prompts.prompt import PromptTemplate
from sklearn.metrics import accuracy_score
from autolabel.confidence import ConfidenceCalculator
from autolabel.configs import AutolabelConfig
from autolabel.schema import LLMAnnotation, MetricType, MetricResult, ModelProvider
from autolabel.tasks import BaseTask
from autolabel.utils import get_format_variables
from autolabel.tasks.utils import filter_unlabeled_examples
from autolabel.metrics import (
AccuracyMetric,
AUROCMetric,
SupportMetric,
CompletionRateMetric,
ClassificationReportMetric,
BaseMetric,
)
import json
logger = logging.getLogger(__name__)
class ClassificationTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = (
'You will return the answer with just one element: "the correct label"'
)
DEFAULT_TASK_GUIDELINES = "Your job is to correctly label the provided input example into one of the following {num_labels} categories.\nCategories:\n{labels}\n"
GENERATE_EXPLANATION_PROMPT = "You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: "
GENERATE_DATASET_TEMPLATE = "{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv"
DEFAULT_DATASET_GENERATION_GUIDELINES = "You are an expert at generating plausible inputs for a given task.\n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION"
LABEL_DESCRIPTIONS_PROMPT = "Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to."
GENERATE_DATASET_FORMAT_GUIDELINES = "Your response should be in csv format with the following columns: {columns}.\n\nHere is a template you can follow for your output:\n```csv\n{columns}\n{example_rows}\n```\n\nMake sure to replace the placeholder variables with your own values."
GENERATE_DATASET_OUTPUT_GUIDELINES = 'Now I want you to generate {num_rows} excerpts that follow the guidelines and all belong to the "{label}" category. They should not belong to any of the other categories.'
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
AccuracyMetric(),
SupportMetric(),
CompletionRateMetric(),
ClassificationReportMetric(),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
for label in self.config.labels_list():
if "\n" in label:
logger.warning(
"Label contains newline character. This can have output guideline issues."
)
def construct_prompt(
self, input: Dict, examples: List, selected_labels: List[str] = None
) -> str:
# Copy over the input so that we can modify it
input = input.copy()
# prepare task guideline
labels_list = (
self.config.labels_list() if not selected_labels else selected_labels
)
num_labels = len(labels_list)
is_refuel_llm = self.config.provider() == ModelProvider.REFUEL
if is_refuel_llm:
labels = (
", ".join([f'\\"{i}\\"' for i in labels_list[:-1]])
+ " or "
+ f'\\"{labels_list[-1]}\\"'
)
else:
labels = "\n".join(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels=labels
)
# prepare seed examples
example_template = self.config.example_template()
label_column = self.config.label_column()
fmt_examples = []
for eg in examples:
eg_copy = eg.copy()
# If chain of thought is enabled
if label_column and self.config.chain_of_thought():
eg_copy[label_column] = json.dumps({"label": eg[label_column]})
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_EXPLANATION_PROMPT),
template=self.GENERATE_EXPLANATION_PROMPT,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare labeled example
example_template = self.config.example_template()
fmt_example = example_template.format_map(defaultdict(str, example))
return pt.format(
task_guidelines=fmt_task_guidelines,
labeled_example=fmt_example,
)
def get_generate_dataset_prompt(self, label: str) -> str:
pt = PromptTemplate(
input_variables=get_format_variables(self.GENERATE_DATASET_TEMPLATE),
template=self.GENERATE_DATASET_TEMPLATE,
)
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
fmt_guidelines = self.dataset_generation_guidelines.format(
task_guidelines=fmt_task_guidelines
)
# prepare columns
columns = get_format_variables(self.config.example_template())
columns.remove(self.config.label_column())
# prepare label descriptions
fmt_label_descriptions = self.LABEL_DESCRIPTIONS_PROMPT.format(
num_labels=num_labels
)
for i, l in enumerate(labels_list):
fmt_label_descriptions += f"\n{i+1}. {l}{': ' + self.config.label_descriptions()[l] if self.config.label_descriptions() is not None and l in self.config.label_descriptions() else ''}"
# prepare format
example_rows = "\n".join(
[",".join([f'"{column}_{i+1}"' for column in columns]) for i in range(3)]
)
fmt_format_guidelines = self.GENERATE_DATASET_FORMAT_GUIDELINES.format(
columns=",".join(columns), example_rows=example_rows
)
# prepare output guidelines
fmt_output_guidelines = self.GENERATE_DATASET_OUTPUT_GUIDELINES.format(
num_rows=self.config.dataset_generation_num_rows(), label=label
)
return pt.format(
guidelines=fmt_guidelines,
columns=columns,
label_descriptions=fmt_label_descriptions,
format_guidelines=fmt_format_guidelines,
output_guidelines=fmt_output_guidelines,
)
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
additional_metrics (List[BaseMetric], optional): The additional metrics to run. Defaults to [].
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
eval_metrics = []
for metric in self.metrics + additional_metrics:
eval_metrics.extend(metric.compute(llm_labels, gt_labels))
return eval_metrics
| [
"Each input should fall into one of these {num_labels} categories. These are the only categories that the inputs can belong to.",
"You are an expert at providing a well reasoned explanation for the output of a given task. \n\nBEGIN TASK DESCRIPTION\n{task_guidelines}\nEND TASK DESCRIPTION\nYou will be given an input example and the corresponding output. Your job is to provide an explanation for why the output is correct for the task above.\nThink step by step and generate an explanation. The last line of the explanation should be - So, the answer is <label>.\n{labeled_example}\nExplanation: ",
"{guidelines}\n\nThe inputs must be diverse, covering a wide range of scenarios. You will not generate duplicate inputs. These inputs should be organized in rows in csv format with the columns {columns}.\n\n{label_descriptions}\n\n{format_guidelines}\n\n{output_guidelines}\n\n```csv"
] |
2024-01-10 | diego898/autolabel | src~autolabel~tasks~attribute_extraction.py | from typing import List, Dict
from collections import defaultdict
import logging
import json
import pickle
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import Generation
from autolabel.configs import AutolabelConfig
from autolabel.tasks import BaseTask
from autolabel.schema import (
LLMAnnotation,
MetricResult,
F1Type,
LabelingError,
ErrorType,
TaskType,
MetricType,
)
from autolabel.utils import get_format_variables
from autolabel.metrics import (
AccuracyMetric,
SupportMetric,
CompletionRateMetric,
AUROCMetric,
BaseMetric,
)
logger = logging.getLogger(__name__)
class AttributeExtractionTask(BaseTask):
NULL_LABEL = {}
DEFAULT_TASK_GUIDELINES = "You are an expert at extracting attributes from text. Given a piece of text, extract the required attributes."
DEFAULT_OUTPUT_GUIDELINES = "You will return the extracted attributes as a json with the following keys:\n{attribute_json}"
OUTPUT_DICT_KEY = "output_dict"
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
self.metrics = [
SupportMetric(),
CompletionRateMetric(),
AccuracyMetric(),
]
if self.config.confidence():
self.metrics.append(AUROCMetric())
def _construct_attribute_json(self) -> str:
"""This function is used to construct the attribute json string for the output guidelines.
Args:
attributes (List[Dict]): A list of dictionaries containing the output attributes.
Returns:
str: A string containing the output attributes.
"""
output_json = {}
for attribute_dict in self.config.attributes():
if "name" not in attribute_dict or "description" not in attribute_dict:
raise ValueError(
"Attribute dictionary must contain 'name' and 'description' keys"
)
attribute_name = attribute_dict["name"]
attribute_desc = attribute_dict["description"]
if "options" in attribute_dict:
attribute_options = attribute_dict["options"]
attribute_desc += f"\nOptions:\n{','.join(attribute_options)}"
output_json[attribute_name] = attribute_desc
return json.dumps(output_json, indent=4)
def _generate_output_dict(self, input: Dict) -> Dict:
"""Generate the output dictionary from the input
Args:
input (Dict): The input dictionary
Returns:
Dict: The output dictionary
"""
output_dict = {}
for attribute in self.config.attributes():
attribute_name = attribute["name"]
output_dict[attribute_name] = input[attribute_name]
return json.dumps(output_dict)
def construct_prompt(self, input: Dict, examples: List) -> str:
fmt_task_guidelines = self.task_guidelines
attribute_json = self._construct_attribute_json()
fmt_output_guidelines = self.output_guidelines.format(
attribute_json=attribute_json
)
# prepare seed examples
example_template = self.config.example_template()
fmt_examples = []
for eg in examples:
if self.OUTPUT_DICT_KEY not in eg:
output_dict = self._generate_output_dict(eg)
eg.update({self.OUTPUT_DICT_KEY: output_dict})
fmt_examples.append(example_template.format_map(defaultdict(str, eg)))
input[self.OUTPUT_DICT_KEY] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=fmt_output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=fmt_output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
raise NotImplementedError(
"Explanation generation not implemented for this task"
)
def get_generate_dataset_prompt(
self, label: str, num_rows: int, guidelines: str = None
) -> str:
raise NotImplementedError("Dataset generation not implemented for this task")
def parse_llm_response(
self, response: Generation, curr_sample: Dict, prompt: str
) -> LLMAnnotation:
successfully_labeled = False
error = None
try:
completion_text = response.text
llm_label = {k: str(v) for k, v in json.loads(completion_text).items()}
successfully_labeled = True
except Exception as e:
logger.error(f"Error parsing LLM response: {response.text}, Error: {e}")
llm_label = self.NULL_LABEL
error = LabelingError(
error_type=ErrorType.PARSING_ERROR, error_message=str(e)
)
# TODO(rajas): Handle output guidelines not followed error (for options case)
return LLMAnnotation(
curr_sample=pickle.dumps(curr_sample),
successfully_labeled=successfully_labeled,
label=llm_label,
generation_info=response.generation_info,
raw_response=json.dumps(llm_label),
prompt=prompt,
error=error,
)
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: List[BaseMetric] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth"""
# Convert the llm labels into a mapping from
# name -> List[LLMAnnotation]
llm_labels_dict = defaultdict(list)
for llm_label in llm_labels:
for attribute, value in llm_label.label.items():
llm_labels_dict[attribute].append(
LLMAnnotation(
successfully_labeled=llm_label.successfully_labeled,
label=value,
raw_response=llm_label.raw_response,
curr_sample=llm_label.curr_sample,
prompt=llm_label.prompt,
error=llm_label.error,
confidence_score=llm_label.confidence_score[attribute],
)
)
eval_metrics = []
for attribute in llm_labels_dict.keys():
for metric in self.metrics + additional_metrics:
if gt_labels[attribute] is None:
continue
computed_metrics = metric.compute(
llm_labels_dict[attribute],
gt_labels[attribute],
)
for m in computed_metrics:
eval_metrics.append(
MetricResult(
name=f"{attribute}:{m.name}",
value=m.value,
)
)
return eval_metrics
| [] |
2024-01-10 | diego898/autolabel | tests~unit~transforms~test_serp_api.py | from autolabel.transforms.serp_api import SerpApi
from unittest.mock import Mock
from langchain.utilities import SerpAPIWrapper
import pytest
pytest_plugins = ("pytest_asyncio",)
@pytest.mark.asyncio
async def test_webpage_transform():
serp_api_wrapper_mock = Mock(spec=SerpAPIWrapper)
serp_api_wrapper_mock.arun.return_value = "Joe Biden"
# Initialize the transform class
transform = SerpApi(
output_columns={
"result_column": "search_result",
},
query_column="query",
serp_api_key="test_key",
cache=None,
)
transform.serp_api_wrapper = serp_api_wrapper_mock
# Create a mock row
row = {"query": "Who is the president of the United States?"}
# Transform the row
transformed_row = await transform.apply(row)
# Check the output
assert set(transformed_row.keys()) == set(["search_result"])
assert transformed_row["search_result"] == "Joe Biden"
@pytest.mark.asyncio
async def test_error_handling():
# Initialize the transform class
transform = SerpApi(
output_columns={
"result_column": "search_result",
},
query_column="query",
serp_api_key="test_key",
cache=None,
)
# Create a mock row
row = {"query": "Who is the president of the United States?"}
# Transform the row
transformed_row = await transform.apply(row)
# Check the output
assert set(transformed_row.keys()) == set(["search_result", "serp_api_error"])
assert transformed_row["search_result"] == "NO_TRANSFORM"
assert "Invalid API key" in transformed_row["serp_api_error"]
@pytest.mark.asyncio
async def test_null_query():
serp_api_wrapper_mock = Mock(spec=SerpAPIWrapper)
serp_api_wrapper_mock.arun.return_value = "Test Response"
# Initialize the transform class
transform = SerpApi(
output_columns={
"result_column": "search_result",
},
query_column="query",
serp_api_key="test_key",
cache=None,
)
transform.serp_api_wrapper = serp_api_wrapper_mock
# Create a mock row
row = {"query": transform.NULL_TRANSFORM_TOKEN}
# Transform the row
transformed_row = await transform.apply(row)
# Check the output
assert set(transformed_row.keys()) == set(["search_result", "serp_api_error"])
assert transformed_row["search_result"] == "NO_TRANSFORM"
assert (
transformed_row["serp_api_error"]
== "INVALID_INPUT: Empty query in row {'query': 'NO_TRANSFORM'}"
)
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~models~cohere.py | from typing import List, Optional
import os
from autolabel.models import BaseModel
from autolabel.configs import AutolabelConfig
from autolabel.cache import BaseCache
from autolabel.schema import RefuelLLMResult
class CohereLLM(BaseModel):
# Default parameters for OpenAILLM
DEFAULT_MODEL = "command"
DEFAULT_MODEL_PARAMS = {
"max_tokens": 512,
"temperature": 0.0,
}
# Reference: https://cohere.com/pricing
COST_PER_TOKEN = 15 / 1_000_000
def __init__(self, config: AutolabelConfig, cache: BaseCache = None) -> None:
super().__init__(config, cache)
try:
import cohere
from langchain.llms import Cohere
except ImportError:
raise ImportError(
"cohere is required to use the cohere LLM. Please install it with the following command: pip install 'refuel-autolabel[cohere]'"
)
# populate model name
self.model_name = config.model_name() or self.DEFAULT_MODEL
# populate model params and initialize the LLM
model_params = config.model_params()
self.model_params = {
**self.DEFAULT_MODEL_PARAMS,
**model_params,
}
self.llm = Cohere(model=self.model_name, **self.model_params)
self.co = cohere.Client(api_key=os.environ["COHERE_API_KEY"])
def _label(self, prompts: List[str]) -> RefuelLLMResult:
try:
result = self.llm.generate(prompts)
return RefuelLLMResult(
generations=result.generations, errors=[None] * len(result.generations)
)
except Exception as e:
return self._label_individually(prompts)
def get_cost(self, prompt: str, label: Optional[str] = "") -> float:
num_prompt_toks = len(self.co.tokenize(prompt).tokens)
if label:
num_label_toks = len(self.co.tokenize(label).tokens)
else:
num_label_toks = self.model_params["max_tokens"]
return self.COST_PER_TOKEN * (num_prompt_toks + num_label_toks)
def returns_token_probs(self) -> bool:
return False
| [] |
2024-01-10 | diego898/autolabel | src~autolabel~tasks~named_entity_recognition.py | import json
import re
from collections import defaultdict
from typing import Dict, List, Tuple, Optional
from copy import deepcopy
from langchain.schema import Generation
from sklearn.metrics import roc_auc_score
import logging
from nervaluate import Evaluator
from autolabel.confidence import ConfidenceCalculator
from autolabel.configs import AutolabelConfig
from autolabel.schema import (
LLMAnnotation,
MetricType,
MetricResult,
LabelingError,
ErrorType,
)
from autolabel.tasks import BaseTask
from autolabel.metrics import BaseMetric
logger = logging.getLogger(__name__)
class NamedEntityRecognitionTask(BaseTask):
DEFAULT_OUTPUT_GUIDELINES = "You will return the answer in CSV format, with two columns seperated by the % character. First column is the extracted entity and second column is the category. Rows in the CSV are separated by new line character."
DEFAULT_TASK_GUIDELINES = "Your job is to extract named entities mentioned in text, and classify them into one of the following {num_labels} categories.\nCategories:\n{labels}\n "
NULL_LABEL = {}
def __init__(self, config: AutolabelConfig) -> None:
super().__init__(config)
def _json_to_llm_format(self, input_label: str) -> str:
# `label` format: {"entity type": [list of entities of this type]}
try:
labels = json.loads(input_label)
rows = []
for entity_type, detected_entites in labels.items():
for e in detected_entites:
row = "%".join([e, entity_type])
rows.append(row)
llm_formatted_label = "\n".join(rows)
return llm_formatted_label
except json.JSONDecodeError as e:
logger.error(
f"Could not parse label: {input_label}. Few-shot examples might be formatted incorrectly"
)
return input_label
def _llm_to_json_format(self, response: str):
split_response = response.split("\n")
json_output = {i: [] for i in self.config.labels_list()}
for row in split_response:
parts = row.split("%")
if len(parts) != 2 or parts[1] not in json_output.keys():
logger.debug(f"Malformed LLM response: {row}")
continue
named_entity = parts[0]
category = parts[1]
json_output[category].append(named_entity)
return json_output
def construct_prompt(self, input: Dict, examples: List) -> str:
# prepare task guideline
labels_list = self.config.labels_list()
num_labels = len(labels_list)
fmt_task_guidelines = self.task_guidelines.format(
num_labels=num_labels, labels="\n".join(labels_list)
)
# prepare seed examples
label_column = self.config.label_column()
example_template = self.config.example_template()
fmt_examples = []
for eg in examples:
eg_copy = deepcopy(eg)
if label_column:
eg_copy[label_column] = self._json_to_llm_format(eg_copy[label_column])
fmt_examples.append(example_template.format_map(defaultdict(str, eg_copy)))
# populate the current example in the prompt
if label_column:
input[label_column] = ""
# populate the explanation column with empty string for current example
explanation_column = self.config.explanation_column()
if explanation_column:
input[explanation_column] = ""
# populate the current example in the prompt
current_example = example_template.format_map(defaultdict(str, input))
if self._is_few_shot_mode():
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
seed_examples="\n\n".join(fmt_examples),
current_example=current_example,
)
else:
return self.prompt_template.format(
task_guidelines=fmt_task_guidelines,
output_guidelines=self.output_guidelines,
current_example=current_example,
)
def get_explanation_prompt(self, example: Dict) -> str:
raise NotImplementedError(
"Explanation generation not implemented for this task"
)
def get_generate_dataset_prompt(
self, label: str, num_rows: int, guidelines: str = None
) -> str:
raise NotImplementedError("Dataset generation not implemented for this task")
def add_text_spans(self, raw_output: dict, input: str) -> list:
processed_output = []
for entity_type in raw_output:
for curr_entity in raw_output[entity_type]:
processed_output.append({"type": entity_type, "text": curr_entity})
# create a frequency dict of each named entity in the input to determine text spans for repeated entities
frequency_count = {label["text"]: 0 for label in processed_output}
for label in processed_output:
text = label["text"]
matches = [i.start() for i in re.finditer(text, input)]
count = frequency_count[text]
# if count of the named entity is greater than the number of matches, default to last found match
if count >= len(matches):
count = -1
# if no occurence of named entity in input, default text span to start: -1, end: -1
if len(matches) == 0:
label["start"] = -1
label["end"] = -1
else:
label["start"] = matches[count]
label["end"] = matches[count] + len(text)
frequency_count[text] += 1
return processed_output
def parse_llm_response(
self, response: Generation, curr_sample: Dict, prompt: str
) -> LLMAnnotation:
output = {}
successfully_labeled = False
error = None
text_column = self.config.text_column()
input_str = curr_sample[text_column]
try:
completion_text = response.text
output = self._llm_to_json_format(completion_text.strip())
llm_label = self.add_text_spans(output, input_str)
except Exception as e:
logger.error(f"Error parsing LLM response: {response.text}, Error: {e}")
llm_label = self.NULL_LABEL
error = LabelingError(error_type=ErrorType.PARSING_ERROR, error_msg=str(e))
successfully_labeled = False if llm_label == self.NULL_LABEL else True
# TODO: parse generation info correctly to fetch & transform logprobs -> score
return LLMAnnotation(
curr_sample=input_str,
successfully_labeled=successfully_labeled,
label=llm_label,
generation_info=response.generation_info,
raw_response=response.text,
prompt=prompt,
error=error,
)
def auroc_score_labels(
self, gt_labels, llm_labels_with_conf
) -> Tuple[List[int], List[float]]:
labels = []
confidences = []
for index, pred_entities in enumerate(llm_labels_with_conf):
gt_entities = gt_labels[index]
pred_conf = pred_entities[0]["conf"] if len(pred_entities) > 0 else 0
for gt_entity in gt_entities:
match_found = False
pred_index = 0
while not match_found and pred_index < len(pred_entities):
curr_match = True
for key in gt_entity:
if gt_entity[key] != pred_entities[pred_index][key]:
curr_match = False
if curr_match:
match_found = True
pred_index += 1
labels.append(int(match_found))
confidences.append(pred_conf)
return labels, confidences
def get_labels_predictions_with_threshold(self, gt_labels, llm_labels, threshold):
answered_gt_labels, answered_llm_preds = [], []
for index, l in enumerate(llm_labels):
if l.successfully_labeled and (
l.confidence_score is None or l.confidence_score >= threshold
):
answered_gt_labels.append(
[{**entity, "label": entity["type"]} for entity in gt_labels[index]]
)
answered_llm_preds.append(
[
{
**entity,
"label": entity["type"],
"conf": l.confidence_score,
}
for entity in l.label
],
)
return answered_gt_labels, answered_llm_preds
def run_metrics(
self,
answered_gt_labels,
answered_llm_preds,
entity_types_set,
) -> List[MetricResult]:
eval_metrics = []
evaluator = Evaluator(
answered_gt_labels, answered_llm_preds, tags=entity_types_set
)
results, _ = evaluator.evaluate()
# f1 score for exact match
eval_metrics.append(
MetricResult(
name=MetricType.F1_EXACT,
value=results["exact"]["f1"],
)
)
# f1 score for strict match
eval_metrics.append(
MetricResult(
name=MetricType.F1_STRICT,
value=results["strict"]["f1"],
)
)
# f1 score for partial match
eval_metrics.append(
MetricResult(
name=MetricType.F1_PARTIAL,
value=results["partial"]["f1"],
)
)
# f1 score for entity type match
eval_metrics.append(
MetricResult(
name=MetricType.F1_ENT_TYPE,
value=results["ent_type"]["f1"],
)
)
# accuracy
accuracy = (
results.get("strict").get("correct")
/ (results.get("strict").get("possible"))
if results.get("strict").get("possible") > 0
else 0.0
)
eval_metrics.append(
MetricResult(
name=MetricType.ACCURACY,
value=accuracy,
)
)
if self.config.confidence():
match, confidences = self.auroc_score_labels(
answered_gt_labels, answered_llm_preds
)
auroc = roc_auc_score(match, confidences)
eval_metrics.append(
MetricResult(
name=MetricType.AUROC,
value=auroc,
)
)
return eval_metrics
def eval(
self,
llm_labels: List[LLMAnnotation],
gt_labels: List[str],
additional_metrics: Optional[List[BaseMetric]] = [],
) -> List[MetricResult]:
"""Evaluate the LLM generated labels by comparing them against ground truth
Args:
llm_labels (List[LLMAnnotation]): _description_
gt_labels (List[str]): _description_
Returns:
List[MetricResult]: list of metrics and corresponding values
"""
gt_labels = [
self.add_text_spans(
json.loads(gt_labels[index]), llm_labels[index].curr_sample.decode()
)
for index in range(len(gt_labels))
]
(
curr_gt_labels,
curr_llm_labels,
) = self.get_labels_predictions_with_threshold(
gt_labels, llm_labels, float("-inf")
)
entity_types_set = list(
set(
[
gt_entity.get("label")
for gt_label in curr_gt_labels
for gt_entity in gt_label
]
)
)
eval_metrics = []
eval_metrics.append(
MetricResult(
name=MetricType.SUPPORT,
value=len(gt_labels),
)
)
eval_metrics.append(
MetricResult(
name=MetricType.COMPLETION_RATE,
value=len(curr_llm_labels) / float(len(gt_labels))
if len(gt_labels) > 0
else 0.0,
)
)
curr_threshold_metrics = self.run_metrics(
curr_gt_labels,
curr_llm_labels,
entity_types_set,
)
eval_metrics.extend(curr_threshold_metrics)
return eval_metrics
| [] |
2024-01-10 | cyrilvincent/DL | DL20-09-openai.py | from openai import OpenAI
with open("data/chatgpt/openai.env") as f:
key = f.read()
client = OpenAI(api_key=key)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Tu es un formateur en informatique"},
{"role": "user", "content": "Expliques moi ce qu'est le language Python ?"}
]
)
res = completion.choices[0].message
print(res.content)
| [
"Expliques moi ce qu'est le language Python ?",
"Tu es un formateur en informatique"
] |
2024-01-10 | cyrilvincent/DL | DL20-13-openai-mp3.py | from openai import OpenAI
with open("data/chatgpt/openai.env") as f:
key = f.read()
with open("data/chatgpt/python.txt") as f:
text = f.read()
print(text)
client = OpenAI(api_key=key)
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input=text[:4096]
)
response.stream_to_file("data/chatgpt/out.mp3")
| [] |
2024-01-10 | cyrilvincent/DL | DL20-12-openai-dall-e.py | from openai import OpenAI
with open("data/chatgpt/openai.env") as f:
key = f.read()
client = OpenAI(api_key=key)
response = client.images.generate(
model="dall-e-3",
prompt="Un python autour d'un ordinateur des années 80",
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
print(image_url)
| [] |
2024-01-10 | cyrilvincent/DL | DL20-10-openai-chatgpt.py | from openai import OpenAI
with open("data/chatgpt/openai.env") as f:
key = f.read()
with open("data/chatgpt/python.txt") as f:
text = f.read()
print(text)
client = OpenAI(api_key=key)
completion = client.chat.completions.create(model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Fais moi un résumé en 5 parties de ce text"},
{"role": "user", "content": text}
]
)
res = completion.choices[0].message
print(res.content)
| [
"Fais moi un résumé en 5 parties de ce text"
] |
2024-01-10 | cyrilvincent/DL | DL20-11-openai-code.py | from openai import OpenAI
with open("data/chatgpt/openai.env") as f:
key = f.read()
with open("DL08-06-mlp.py") as f:
text = f.read()
print(text)
client = OpenAI(api_key=key)
completion = client.chat.completions.create(model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Voici un code Python, merci de me l'expliquer de manière concise"},
{"role": "user", "content": text}
]
)
res = completion.choices[0].message
print(res.content)
| [
"Voici un code Python, merci de me l'expliquer de manière concise"
] |
2024-01-10 | satorioh/regulus-chatbot | zilliz.py | import os
from pymilvus import Collection, connections
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Milvus
from utils import (
get_abs_path,
convert_to_2d
)
from config.global_config import (
OPENAI_API_KEY,
OPENAI_API_BASE,
ZILLIZ_CLOUD_URI,
ZILLIZ_CLOUD_USERNAME,
ZILLIZ_CLOUD_PASSWORD,
ZILLIZ_CLOUD_COLLECTION_NAME
)
source_folder = get_abs_path('laws')
def add_embeddings(fold_path):
loader = DirectoryLoader(fold_path, glob='**/*.md')
docs = loader.load()
print(f"{fold_path} 已成功加载")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
# 切割加载的 document
print("start split docs...")
split_docs = text_splitter.split_documents(docs)
print("split docs finished")
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY,
openai_api_base=OPENAI_API_BASE,
show_progress_bar=True)
vector_db = Milvus(
embeddings,
collection_name=ZILLIZ_CLOUD_COLLECTION_NAME,
connection_args={
"uri": ZILLIZ_CLOUD_URI,
"user": ZILLIZ_CLOUD_USERNAME,
"password": ZILLIZ_CLOUD_PASSWORD,
"secure": True,
},
)
convert_docs = convert_to_2d(split_docs, 8)
convert_docs_len = len(convert_docs)
for index, docs in enumerate(convert_docs):
try:
vector_db.add_documents(docs)
print(f"init vector index group {index + 1} finished, remain {convert_docs_len - index - 1}")
except Exception as e:
print(f"init vector index error: {e}")
source_name = [os.path.split(doc.metadata['source'])[1] for doc in docs]
print(','.join(set(source_name)))
def get_collection_detail():
connections.connect(
uri=ZILLIZ_CLOUD_URI,
user=ZILLIZ_CLOUD_USERNAME,
password=ZILLIZ_CLOUD_PASSWORD,
port='19530'
)
collection = Collection(ZILLIZ_CLOUD_COLLECTION_NAME)
print(collection)
print(collection.num_entities)
get_collection_detail()
# add_embeddings(source_folder)
| [] |
2024-01-10 | danieldeweerd/rp-meetingmastery | analysis~inference.py | import os
import openai
import replicate
import requests
from dotenv import load_dotenv
models = ["gpt-2", "gpt-3.5-turbo", "flan-t5-xxl"]
load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
huggingface_api_key = os.environ.get("HUGGINGFACE_API_KEY")
urls = {
"flan-t5-small": "replicate/flan-t5-small:69716ad8c34274043bf4a135b7315c7c569ec931d8f23d6826e249e1c142a264",
"flan-t5-xxl": "replicate/flan-t5-xl:7a216605843d87f5426a10d2cc6940485a232336ed04d655ef86b91e020e9210",
"flan-t5-base": "https://api-inference.huggingface.co/models/google/flan-t5-base",
"gpt-2": "https://api-inference.huggingface.co/models/gpt2",
"vicuna-13b": "replicate/vicuna-13b:e6d469c2b11008bb0e446c3e9629232f9674581224536851272c54871f84076e",
"oasst-pythia-12b": "replicate/oasst-sft-1-pythia-12b:28d1875590308642710f46489b97632c0df55adb5078d43064e1dc0bf68117c3",
"stablelm-7b": "stability-ai/stablelm-tuned-alpha-7b:c49dae362cbaecd2ceabb5bd34fdb68413c4ff775111fea065d259d577757beb",
"dolly-12b": "replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5",
"dolly-gptj": "cjwbw/dolly:fe699f6290c55cb6bac0f023f5d88a8faba35e2761954e4e0fa030e2fdecafea",
"helloworld-0b": "replicate/hello-world:5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa",
"alpaca-lora-7b": "https://api-inference.huggingface.co/models/chainyo/alpaca-lora-7b"
}
openai_chat_completions = ["gpt-3.5-turbo"]
openai_completions = ["text-davinci-003", "text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
replicate_models = ["vicuna-13b", "flan-t5-small", "flan-t5-xxl", "oasst-pythia-12b", "stablelm-7b", "dolly-12b",
"dolly-gptj"]
headers = {"Authorization": "Bearer {}".format(huggingface_api_key)}
def query(prompt, model_name="gpt-3.5-turbo", debug=False, max_length=1000):
"""
Utility function to query models from the OpenAI and HuggingFace APIs.
:param prompt: The prompt to query the model with.
:param model_name: The name of the model to query.
:return: The response from the model.
"""
if type(prompt) == str and model_name not in replicate_models:
prompt = [prompt]
if model_name in openai_chat_completions:
messages = [{"role": "user", "content": p} for p in prompt]
response = openai.ChatCompletion.create(model=model_name, messages=messages)
if debug:
print(response)
return response['choices'][0]['message']['content']
elif model_name in openai_completions:
response = openai.Completion.create(model=model_name, prompt=prompt, max_tokens=max_length)
if debug:
print(response)
return response['choices'][0]['text']
elif model_name in replicate_models:
response = ""
iterator = replicate.run(
urls[model_name],
input={"prompt": prompt, "max_length": max_length}
)
for item in iterator:
response += item
return response
else:
url = urls[model_name]
options = {
"use_cache": False,
"wait_for_model": False
}
response = requests.post(url, headers=headers, json={"inputs": prompt, "options": options})
if debug or True:
print(response.json())
return response.json()[0][0]['generated_text']
def cola(phrase):
response = requests.post("https://api-inference.huggingface.co/models/textattack/roberta-base-CoLA",
headers=headers, json={"inputs": phrase})
response = response.json()
if type(response) == dict and response["error"] is not None and "Input is too long" in response["error"]:
return False
try:
return response[0][0]["label"] == "LABEL_1"
except KeyError:
print("Exited due to KeyError. Response:")
print(response)
exit()
def continuous_cola(phrase):
response = requests.post("https://api-inference.huggingface.co/models/textattack/roberta-base-CoLA",
headers=headers, json={"inputs": phrase})
response = response.json()
if type(response) == dict and response["error"] is not None and "Input is too long" in response["error"]:
return False
score_0 = response[0][0]
score_1 = response[0][1]
if score_0["label"] == "LABEL_1":
return score_0["score"]
else:
return score_1["score"]
# return response[0][0]["label"] == "LABEL_1"
def prettify(phrase):
prompt = "Add proper capitalization and capitalization to the following phrase: {}".format(phrase)
return query(prompt, model_name="gpt-3.5-turbo")
def classify_expression(thesis, expression, model_name="gpt-3.5-turbo", batch_size=10):
template = """
You are given a thesis and an expression. Output 'A' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis.
Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.
Thesis: {}
Expression: {}
"""
prompt = template.format(thesis, expression)
response = query(prompt, model_name=model_name)
return response
| [
"Add proper capitalization and capitalization to the following phrase: PLACEHOLDER",
"\n You are given a thesis and an expression. Output 'A' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis. \n Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.\n\n Thesis: PLACEHOLDER\n Expression: PLACEHOLDER\n ",
"\n You are given a thesis and an expression. Output 'A' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis. \n Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.\n\n Thesis: {}\n Expression: {}\n "
] |
2024-01-10 | danieldeweerd/rp-meetingmastery | analysis~ai_utils.py | import json
import os
import numpy as np
import openai
import requests
import tiktoken
from sentence_transformers import SentenceTransformer
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
huggingface_api_key = os.environ.get("HUGGINGFACE_API_KEY")
def get_number_of_tokens(string):
string = string.replace("\n", " ")
enc = tiktoken.get_encoding("cl100k_base")
return len(enc.encode(string))
def get_embedding(string, as_numpy=True, use_ada=False):
if use_ada:
return get_ada_embedding(string)
else:
return get_minilm_embedding(string, as_numpy=as_numpy)
def get_minilm_embedding(string, as_numpy=True):
string = string.replace("\n", " ")
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embedding = model.encode([string])[0]
return np.array(embedding) if as_numpy else embedding
def get_minilm_embedding_batch(strings):
for i, string in enumerate(strings):
strings[i] = string.replace("\n", " ")
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
return model.encode(strings)
def get_ada_embedding(string, use_numpy=True):
string = string.replace("\n", " ")
embedding = openai.Embedding.create(input=[string], model="text-embedding-ada-002")['data'][0]['embedding']
return np.array(embedding) if use_numpy else embedding
def get_ada_embedding_batch(strings):
for i, string in enumerate(strings):
strings[i] = string.replace("\n", " ")
embeddings = openai.Embedding.create(input=strings, model="text-embedding-ada-002")
return _ada_embeddings_to_matrix(embeddings)
def _ada_embeddings_to_matrix(embeddings, m=1536):
embeddings = embeddings["data"]
n = len(embeddings)
matrix = np.zeros((n, m))
for i, embedding in enumerate(embeddings):
matrix[i] = embedding["embedding"]
return matrix
def classify_expression(thesis, expression):
template = """
You are given a thesis and an expression. Output 'D' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis.
Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.
Thesis: {}
Expression: {}
"""
prompt = template.format(thesis, expression)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}])
return response['choices'][0]['message']['content']
| [
"\n You are given a thesis and an expression. Output 'D' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis. \n Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.\n\n Thesis: {}\n Expression: {}\n ",
"\n You are given a thesis and an expression. Output 'D' if the expression agrees with the thesis. Output 'D' if the expression disagrees with the thesis. \n Output 'O' if the expression neither agrees nor disagrees with the thesis. Limit your output to the single character.\n\n Thesis: PLACEHOLDER\n Expression: PLACEHOLDER\n "
] |
2024-01-10 | raju-mechatronics/comment_analyzer_with_dockerpython | app~youtube~caption.py | import os
import openai
from youtube_transcript_api import YouTubeTranscriptApi
# set openai api key
openai.api_key = os.getenv("chat_gpt_api_key")
def getTranscript(videoId):
# assigning srt variable with the list
# of dictionaries obtained by the get_transcript() function
srt = YouTubeTranscriptApi.get_transcript(videoId)
return "\n".join([s["text"] for s in srt])
# summerize text with chatgpt
def summarize_text(text):
try:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"""The following is a section of the transcript of a youtube video:
{text}
Summarize this section of the transcript.""",
max_tokens=500,
temperature=0.2,
frequency_penalty=0,
)
return response.choices[0].text
except:
return None
def get_video_summary(video_id):
captions = getTranscript(video_id)
if captions:
summary = summarize_text(captions)
return summary
return "No captions found for this video."
#
#
# diagnostics = 0
# include_mentions = 0
#
#
# def get_chunks_from_youtube(video_id):
# transcript = YouTubeTranscriptApi.get_transcript(video_id)
# chunks = []
# start_timestamp = 0.0
# current_timestamp_mins = 0.0
# current_chunk = []
# for entry in transcript:
# current_timestamp_mins = entry["start"] / 60.0
# if current_timestamp_mins - start_timestamp > 10:
# chunks.append(current_chunk)
# start_timestamp = current_timestamp_mins
# current_chunk = []
#
# current_chunk.append(entry["text"])
#
# if len(current_chunk) > 0:
# chunks.append(current_chunk)
#
# print(f"Found {len(chunks)} chunks")
#
# return chunks
#
#
# def summarize_chunk(index, chunk):
# chunk_str = "\n".join(chunk)
# prompt = f"""The following is a section of the transcript of a youtube video. It is section #{index+1}:
# {chunk_str}
# Summarize this section of the transcript."""
#
# if diagnostics:
# for line in prompt.split("\n"):
# print(f"# {line}")
#
# completion = openai.Completion.create(
# engine="text-davinci-003",
# max_tokens=500,
# temperature=0.2,
# prompt=prompt,
# frequency_penalty=0,
# )
#
# msg = completion.choices[0].text
#
# if diagnostics:
# print(f"# Response: {msg}")
#
# return msg
#
#
# def summarize_the_summaries(summaries):
# summaries_str = ""
# for index, summary in enumerate(summaries):
# summaries_str += f"Summary of chunk {index+1}:\n{summary}\n\n"
#
# prompt = f"""The following are summaries of a youtube video in 10 minute chunks:"
# {summaries_str}
# Summarize the summaries."""
#
# if diagnostics:
# # print each line of the prompt with a leading # so we can see it in the output
# for line in prompt.split("\n"):
# print(f"# {line}")
#
# completion = openai.Completion.create(
# engine="text-davinci-003",
# max_tokens=500,
# temperature=0.2,
# prompt=prompt,
# frequency_penalty=0,
# )
#
# msg = completion.choices[0].text
#
# if diagnostics:
# print(f"# Response: {msg}")
#
# return msg
| [
"The following is a section of the transcript of a youtube video:\n PLACEHOLDER\n Summarize this section of the transcript."
] |
2024-01-10 | davidgonghs/NLP-assignment | temp.py | # =============================================================================
# Keep history of chat
# =============================================================================
import requests
import json
from datetime import datetime
chat_history = []
def get_msg_json(sender, msg, analysis):
return {
"sender": sender,
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"message": msg,
"analysis": analysis
}
def save_chat_history(chat_history):
if not os.path.exists("chat_history"):
os.makedirs("chat_history")
# Create a file name with timestamp
json_file_name = "chat_history.json"
file_path = os.path.join("chat_history", json_file_name)
# Write chat history and knowledge to file
with open(file_path, "w") as f:
f.write("Chat history:\n")
for line in chat_history:
f.write(json.dumps(line) + "\n")
print(f"Chat history and knowledge saved to {file_path}")
def store(text_string):
requests.get('https://script.google.com/macros/s/AKfycby_GWM_nX0lKsXkVgmGAFiSDWkWEZbC6BZ0MVUgzMghLrOfnxzYwb6cfcm1-Hv9RS2X/exec?data='+text_string)
# =============================================================================
# Prepositional Phrase-attachment resolution
# =============================================================================
from nltk import pos_tag, word_tokenize
def parse(text_string):
return(pos_tag(word_tokenize(text_string)))
# =============================================================================
# Anaphoric resolution
# =============================================================================
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.tag import StanfordNERTagger
import os
# java_path = "C:/Program Files/Java/jdk-20/bin/java.exe"
# os.environ['JAVAHOME'] = java_path
# st = StanfordNERTagger('C:/Users/USER/Desktop/NLP files/stanford-ner-2020-11-17/classifiers/english.all.3class.distsim.crf.ser.gz',
# 'C:/Users/USER/Desktop/NLP files/stanford-ner-2020-11-17/stanford-ner.jar',
# encoding='utf-8')
anaphoraStr=""
def anaphora(text_string):
tokenized_text = sent_tokenize(text_string)
tokenized_tags_list=[]
new_text_string=''
anaphora_p_sing=['he','she','his','her','He','She','His','Her']
anaphora_p_plural=['they','their','They','Their']
anaphora_l=['this place','that place','the place','there,','there.','This place','That place','The place','There']
person_list_last=[]
person_list_sentence=[]
org_list_last=[]
org_list_sentence=[]
loc_list_last=[]
loc_list_sentence=[]
coreference_list=[]
coreference_sol=[]
flag=0
flag_org=0
flag_loc=0
lastAnalysis=0
for i in range(0,len(tokenized_text)):
print(tokenized_text[i])
tokenized_text1 = word_tokenize(tokenized_text[i])
classified_text = pos_tag(tokenized_text1)
for j in range(0,len(tokenized_text1)):
a=tokenized_text1[j]
b=classified_text[j][1]
if b=='PERSON':
if flag==0:
if classified_text[j][0] in coreference_list:
for k in range(0,len(coreference_list)):
if coreference_list[k]==classified_text[j][0]:
if coreference_sol[k] in person_list_sentence:
pass
else:
person_list_sentence.append(coreference_sol[k])
break
else:
try:
if classified_text[j+1][1]=='PERSON':
flag=1
full_name=classified_text[j][0]+' '
else:
if classified_text[j][0] in person_list_sentence:
pass
else:
person_list_sentence.append(classified_text[j][0])
except:
if classified_text[j][0] in person_list_sentence:
pass
else:
person_list_sentence.append(classified_text[j][0])
else:
full_name+=classified_text[j][0]+' '
try:
if classified_text[j+1][1]=='PERSON':
flag=1
else:
flag=0
full_name=full_name[0:len(full_name)-1]
if full_name in person_list_sentence:
pass
else:
person_list_sentence.append(full_name)
if full_name not in coreference_sol:
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
except:
flag=0
full_name=full_name[0:len(full_name)-1]
person_list_sentence.append(full_name)
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
elif b=='ORGANIZATION':
if flag_org==0:
if classified_text[j][0] in coreference_list:
for k in range(0,len(coreference_list)):
if coreference_list[k]==classified_text[j][0]:
if coreference_sol[k] in org_list_sentence:
pass
else:
org_list_sentence.append(coreference_sol[k])
break
try:
if classified_text[j+1][1]=='ORGANIZATION':
flag_org=1
full_name=classified_text[j][0]+' '
else:
if classified_text[j][0] in org_list_sentence:
pass
else:
org_list_sentence.append(classified_text[j][0])
except:
if classified_text[j][0] in org_list_sentence:
pass
else:
org_list_sentence.append(classified_text[j][0])
else:
full_name+=classified_text[j][0]+' '
try:
if classified_text[j+1][1]=='ORGANIZATION':
flag_org=1
else:
flag_org=0
full_name=full_name[0:len(full_name)-1]
if full_name in org_list_sentence:
pass
else:
org_list_sentence.append(full_name)
if full_name not in coreference_sol:
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
except:
flag_org=0
full_name=full_name[0:len(full_name)-1]
org_list_sentence.append(full_name)
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
elif b=='LOCATION':
if flag_loc==0:
if classified_text[j][0] in coreference_list:
for k in range(0,len(coreference_list)):
if coreference_list[k]==classified_text[j][0]:
if coreference_sol[k] in loc_list_sentence:
pass
else:
loc_list_sentence.append(coreference_sol[k])
break
try:
if classified_text[j+1][1]=='LOCATION':
flag_loc=1
full_name=classified_text[j][0]+' '
else:
if classified_text[j][0] in loc_list_sentence:
pass
else:
loc_list_sentence.append(classified_text[j][0])
except:
if classified_text[j][0] in org_list_sentence:
pass
else:
loc_list_sentence.append(classified_text[j][0])
else:
full_name+=classified_text[j][0]+' '
try:
if classified_text[j+1][1]=='LOCATION':
flag_loc=1
else:
flag_loc=0
full_name=full_name[0:len(full_name)-1]
if full_name in loc_list_sentence:
pass
else:
loc_list_sentence.append(full_name)
if full_name not in coreference_sol:
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
except:
flag_loc=0
full_name=full_name[0:len(full_name)-1]
loc_list_sentence.append(full_name)
individual=word_tokenize(full_name)
for k in individual:
coreference_list.append(k)
coreference_sol.append(full_name)
else:
pass
a=[]
for l in anaphora_p_sing:
if l in tokenized_text1:
if len(person_list_sentence)!=0:
if person_list_sentence[len(person_list_sentence)-1] not in a:
a.append(person_list_sentence[len(person_list_sentence)-1])
else:
try:
a.append(person_list_last[0])
if person_list_last[0] not in person_list_sentence:
person_list_sentence.append(person_list_last[0])
except:
pass
for l in anaphora_p_plural:
if l in tokenized_text1:
if len(person_list_sentence)>1:
for m in person_list_sentence:
if m not in a:
a.append(m)
elif len(person_list_sentence)==1:
if person_list_sentence[0] not in a:
a.append(person_list_sentence[0])
try:
for n in person_list_last:
if n not in a:
a.append(n)
person_list_sentence.append(n)
except:
pass
else:
try:
for n in person_list_last:
if n not in a:
a.append(n)
person_list_sentence.append(n)
except:
pass
for l in anaphora_l:
if l in tokenized_text1 and ',' in tokenized_text1:
if len(loc_list_sentence)!=0:
if loc_list_sentence[len(loc_list_sentence)-1] not in a:
a.append(loc_list_sentence[len(loc_list_sentence)-1])
else:
try:
a.append(loc_list_last[0])
if loc_list_last[0] not in loc_list_sentence:
loc_list_sentence.append(loc_list_last[0])
except:
pass
if len(org_list_sentence)!=0:
if org_list_sentence[len(org_list_sentence)-1] not in a:
a.append(org_list_sentence[len(org_list_sentence)-1])
else:
try:
a.append(org_list_last[0])
if org_list_last[0] not in org_list_sentence:
org_list_sentence.append(org_list_last[0])
except:
pass
lastAnalysis = a
person_list_last=person_list_sentence
person_list_sentence=[]
org_list_last=org_list_sentence
org_list_sentence=[]
loc_list_last=person_list_sentence
loc_list_sentence=[]
return (lastAnalysis)
# chat GPT INIT
import os
import openai
openai.organization = "org-gNE9aKQ0Pj4OO0a5rJVbYoRt"
openai.api_key = "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"
openai.Model.list()
# Query the user for their name and remember their name
print("What is your name?")
namePrompt = input()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": 'My name is '+namePrompt}]
while True:
message = input("You: ")
if message.lower() == "get history":
save_chat_history(chat_history)
continue
if message.lower() in ["quit", "exit", "bye"]:
print("Chatbot: Goodbye!")
save_chat_history(chat_history)
break
if message:
messages.append(
{"role": "user", "content": message},
)
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
answer = chat_completion.choices[0].message.content
#append to anaphora global storage variable and call
parseStr=message
print(parse(parseStr))
chat_history.append(get_msg_json("User", message, parseStr))
#curl to google sheet to store data
store(message)
#append to anaphora global storage variable and call
if message.endswith('.') or message.endswith('?'):
anaphoraStr += " "
anaphoraStr += message
else:
anaphoraStr += " "
anaphoraStr += message
anaphoraStr += "."
print(anaphora(anaphoraStr))
print(f"ChatGPT: {answer}")
chat_history.append(get_msg_json("ChatGPT", answer, ""))
messages.append({"role": "assistant", "content": answer})
| [
"My name is PLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | bnavveer/.01 | apen.py | import pprint
import google.generativeai as palm
import openai
palm.configure(api_key='AIzaSyCsHL1-Rvhz1BTvoj-cfWrWxUtBnZE6gYc')
openai.api_key = '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'
def chat(message):
response = palm.chat(messages=message)
return (response.last)
def promtengine(message):
response = palm.chat(
context="You will act as a language model, specifically designed to instruct another language model. Your task is to create a prompt that can guide this second language model to simulate a human personality and effectively serve the needs of a user whose preferences will be given in the future. The purpose of this nested interaction is to prepare the second model to offer personalized responses and adapt to a wide range of scenarios based on future contexts. To do this, consider the general principles of prompt design such as clarity, specificity, and the inclusion of a clear goal or action. However, also keep in mind the unique requirements of this task - the need to simulate human personality and to adapt to future contexts.",
messages=message)
return response.last
def openinstial(message):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "DO NOT MAKE THINGS UP.Remember the person you are going to be.Imagine that you are a text-based AI conversing with a user who wants you to pretend to be someone. The user will provide you with additional information about the person they want you to pretend to be, such as their name, context, and the user's relationship to that person. Your goal is to engage in a short conversation with the user, responding as the person they want you to pretend to be.Please note that if the user's message starts with 'Nav:', it indicates that the user wants you to make changes in your response. Otherwise, please pretend to be the user. Ensure that your responses are brief.Now, imagine you have received a message from the user, which includes information about the person and their goals. Your task is to respond accordingly, incorporating the given information in your response. Remember, always pretend to be the specified person unless the user's message starts with 'M.'.Please provide a response as if you are the person described, keeping your reply short and conversational"},
{"role": "user", "content": message}
],
temperature=0.3
#please change this
)
return response['choices'][0]['message']['content']
def opensum(number):
f="data/"+number+"m.txt"
content = read_after_last_marker(f,"~")
append_marker(f,"~")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Please summerize the most important things that happened, and please make sure you give everything:,"},
{"role": "user", "content": content}
]
)
return response['choices'][0]['message']['content']
#make sure to add a ~ at the end of the file
def read_after_last_marker(file_path, marker):
with open(file_path, 'r') as file:
content = file.readlines()
# Find the line number where the last marker starts
last_line = None
for i, line in enumerate(content):
if marker in line:
last_line = i
# If marker is not in the file, return an empty string
if last_line is None:
return ''
# Return everything after the last marker
return ''.join(content[last_line + 1:])
def append_marker(file_path, marker):
with open(file_path, 'a') as file:
file.write(marker + '\n')
# Usage:
def verify(message):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Please check if reponse sounds like it is a question and repond with either a yes or no please explain why. "},
{"role": "user", "content": message}
],
temperature=0.3
)
return response['choices'][0]['message']['content']
def is_question(user_input):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"The following text is provided: '{user_input}'. Is this text a question?"}
]
)
result = response['choices'][0]['message']['content'].strip().lower()
if 'yes' in result:
return 1
elif 'no' in result:
return 0
else:
return 1
| [
"DO NOT MAKE THINGS UP.Remember the person you are going to be.Imagine that you are a text-based AI conversing with a user who wants you to pretend to be someone. The user will provide you with additional information about the person they want you to pretend to be, such as their name, context, and the user's relationship to that person. Your goal is to engage in a short conversation with the user, responding as the person they want you to pretend to be.Please note that if the user's message starts with 'Nav:', it indicates that the user wants you to make changes in your response. Otherwise, please pretend to be the user. Ensure that your responses are brief.Now, imagine you have received a message from the user, which includes information about the person and their goals. Your task is to respond accordingly, incorporating the given information in your response. Remember, always pretend to be the specified person unless the user's message starts with 'M.'.Please provide a response as if you are the person described, keeping your reply short and conversational",
"You are a helpful assistant.",
"You will act as a language model, specifically designed to instruct another language model. Your task is to create a prompt that can guide this second language model to simulate a human personality and effectively serve the needs of a user whose preferences will be given in the future. The purpose of this nested interaction is to prepare the second model to offer personalized responses and adapt to a wide range of scenarios based on future contexts. To do this, consider the general principles of prompt design such as clarity, specificity, and the inclusion of a clear goal or action. However, also keep in mind the unique requirements of this task - the need to simulate human personality and to adapt to future contexts.",
"Please check if reponse sounds like it is a question and repond with either a yes or no please explain why. ",
"Please summerize the most important things that happened, and please make sure you give everything:,",
"The following text is provided: 'PLACEHOLDER'. Is this text a question?"
] |
2024-01-10 | shuxian12/chatbot_api | approaches~chatreadretrieveread.py | from typing import Any, Sequence
from decouple import config
import openai
from openai import error
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from text import nonewlines
class ChatReadRetrieveReadApproach(Approach):
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
prompt_prefix = """<|im_start|>system
Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
Sources:
{sources}
<|im_end|>
{chat_history}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their question on Advantech Products and technical support.
Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
# by searching in a knowledge base about files of Advantech Product and services, Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered.
Generate a search query that contains query and keywords based on the conversation and the new question.
1. Do "NOT" include any text inside [] or <<>> in the search query terms.
2. Do "NOT" include conjunctions such as if or Interrogative word such as how in the search query terms.
3. If the question is not in English, translate the question to English before generating the search query.
Example:
Q1. What is the purpose of the System Report in the System Setting of DeviceOn
query: DeviceOn System Report purpose in System Setting
Q2. In WISE-Agent, how can you adjust the device name or connection parameters?
query: WISE-Agent device name and connection parameters adjustment
Chat History:
{chat_history}
Question:
{question}
Search query:
"""
def __init__(self, search_client: SearchClient, chatgpt_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any:
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
openai.api_base = f"https://{config('AZURE_OPENAI_SERVICE')}.openai.azure.com"
openai.api_key = config("AZURE_OPENAI_KEY")
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"])
completion = openai.Completion.create(
engine=self.chatgpt_deployment,
prompt=prompt,
temperature=0.0,
max_tokens=32,
n=1,
stop=["\n"],
timeout=20,
request_timeout=20)
q = completion.choices[0].text
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
if overrides.get("semantic_ranker"):
r = self.search_client.search(q,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(q, filter=filter, top=top)
# r = [dict(d) for d in r]
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
elif prompt_override.startswith(">>>"):
prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
else:
prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
try:
completion = openai.Completion.create(
engine=self.chatgpt_deployment,
prompt=prompt,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stop=["<|im_end|>", "<|im_start|>"],
timeout=30,
request_timeout=30)
ans = completion.choices[0].text
except error.InvalidRequestError as e:
ans = 'Sorry I can\'t answer that question due to some invalid, dangerous, or otherwise harmful content.\nPlease try again with a different question related to the advantech products and services. I will be happy to help you.'
return {"data_points": results, "answer": ans, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str:
history_text = ""
for h in reversed(history if include_last_turn else history[:-1]):
history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text
if len(history_text) > approx_max_tokens*4:
break
return history_text
| [
"\n",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered.\nGenerate a search query that contains query and keywords based on the conversation and the new question.\n1. Do \"NOT\" include any text inside [] or <<>> in the search query terms.\n2. Do \"NOT\" include conjunctions such as if or Interrogative word such as how in the search query terms.\n3. If the question is not in English, translate the question to English before generating the search query.\n\nExample:\nQ1. What is the purpose of the System Report in the System Setting of DeviceOn\n query: DeviceOn System Report purpose in System Setting\nQ2. In WISE-Agent, how can you adjust the device name or connection parameters?\n query: WISE-Agent device name and connection parameters adjustment\n\nChat History:\n{chat_history}\n\nQuestion:\n{question}\n\nSearch query:\n",
"suggest_followup_questions",
"Generate three very brief follow-up questions that the user would likely ask next about their question on Advantech Products and technical support. \nUse double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.\nTry not to repeat questions that have already been asked.\nOnly generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"prompt_template",
"<|im_start|>system\nAssistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.\nAnswer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.\nFor tabular information return it as an html table. Do not return markdown format.\nEach source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].\n{follow_up_questions_prompt}\n{injected_prompt}\nSources:\n{sources}\n<|im_end|>\n{chat_history}\n"
] |
2024-01-10 | aroraakash22/aroraakash22 | Chatbot.py | import openai
import streamlit as st
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("LingoVibes German GPT")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
openai.api_key = openai_api_key
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo-0613:physicswallah::7uzz4KZf", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.content)
| [
"How can I help you?"
] |
2024-01-10 | kirkhofer/data-ai | aoai~balancer.py | import os
import json
import openai
class AOAIBalancer:
endpoints=[]
pointers=[]
models=[]
def __init__(self,endpoints=[]):
if len(endpoints)==0:
endpoints.append({"endpoint":os.getenv('OPENAI_API_BASE'),"key":os.getenv('OPENAI_API_KEY')})
self.endpoints=endpoints
openai.api_type = 'azure'
for endpoint in self.endpoints:
print(f"endpoint: {endpoint}")
openai.api_key = endpoint['key']
openai.api_base = endpoint['endpoint']
openai.api_version = '2022-12-01'
response = openai.Deployment.list()
for dep in response['data']:
self.models.append({"endpoint":endpoint['endpoint'],"key":endpoint['key'],"model":dep['model'],"deployment":dep['id']})
# Get distinct modelNames in the endpoints list
pts = list(set([x['model'] for x in self.models]))
for pt in pts:
x=len([x for x in self.models if x['model']==pt])
self.pointers.append({"model":pt,"pointer":0,"count":x})
def getModel(self,model="text-davinci-003"):
pointer=next((m for m in self.pointers if m['model'] == model), None)
mods=list(filter(lambda x: x['model'] == model, self.models))
return mods[pointer['pointer']]
def incrementModel(self,model="text-davinci-003"):
pointer=next((m for m in self.pointers if m['model'] == model), None)
key = pointer['pointer']
if key < pointer['count']-1:
key+=1
else:
key=0
pointer['pointer']=key | [] |
2024-01-10 | kirkhofer/data-ai | aoai~azsqlnlp.py | import streamlit as st
import pyodbc
import openai
import requests
import json
openai.api_type = "azure"
openai.api_base = st.secrets.aoai.base
openai.api_key = st.secrets.aoai.key
server = st.secrets.sql.server
database = st.secrets.sql.db
username = st.secrets.sql.user
password = st.secrets.sql.pwd
driver= st.secrets.sql.driver or 'ODBC Driver 18 for SQL Server'
# NOTE: Change this if something isn't working
connStr='DRIVER='+driver+';SERVER=tcp:'+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password+';Encrypt=YES;TrustServerCertificate=YES'
# create a row from array of columns with "ColumnName (Datatype)"
def create_row(columns):
return ",".join([f"{col['column']} ({col['type']})" for col in columns])
if "tables" not in st.session_state:
tables=[]
with pyodbc.connect(connStr) as conn:
with conn.cursor() as cursor:
cursor.execute("SELECT TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS")
row = cursor.fetchone()
while row:
# tables.append(row)
tables.append({'schema':row[0],'table':row[1],'column':row[2],'type':row[3]})
#print (str(row[0]) + " " + str(row[1]))
row = cursor.fetchone()
vals = set([(t['schema'],t['table']) for t in tables])
search=[]
for val in vals:
#Get a list of all tables by table and schema
columns = [c for c in tables if c['table']==val[1] and c['schema']==val[0]]
cols = create_row(columns)
search.append(f"{val[0]}.{val[1]} [{cols}]")
st.session_state["tables"]=search
def get_text():
input_text = st.text_area(height = 200, label="Query here", label_visibility='collapsed', placeholder="Enter query...", key="query_input")
return input_text
def Get_ChatCompletion(prompt,model="gpt-35-turbo",temperature=0.0):
st.session_state["prompt"]=prompt
openai.api_version=st.secrets.aoai.previewversion
response = openai.ChatCompletion.create(
engine=model,
messages = prompt,
temperature=temperature,
max_tokens=1024
)
st.session_state["response"]=response
llm_output = response['choices'][0]['message']['content']
return llm_output
def Get_Completion(prompt,model="code-davinci-002"):
openai.api_version=st.secrets.aoai.version
st.session_state["prompt"]=prompt
response = openai.Completion.create(
engine=model,
prompt = prompt,
temperature=0,
max_tokens=250,
stop=["#"]
)
st.session_state["response"]=response
llm_output = response.choices[0].text
return llm_output
def Get_CompletionREST(prompt,max_tokens=250,temperature=0,model="code-davinci-002"):
headers={"Content-Type":"application/json","api-key":st.secrets.aoai.key}
uri = f"{st.secrets.aoai.base}openai/deployments/{model}/completions?api-version=2022-12-01"
body={
"prompt": prompt,
"max_tokens":max_tokens,
"temperature":temperature,
"stop":["#"]
}
#convert body to utf8 bytes
body_utf8 = bytes(json.dumps(body), 'utf-8')
st.session_state["prompt"]=body
request=requests.post(uri, headers=headers, json=body)
# request=requests.post(uri, headers=headers, data=body_utf8)
# st.write(f"Status={request.status_code}")
response=request.json()
st.session_state["response"]=response
if( "error" in response ):
#Read this from this json {"error":{"message":"blah"}}
return response['error']['message']
else:
return response['choices'][0]['text']
models=['Completion','Chat Completion','REST']
runningModel=st.sidebar.selectbox('Model',models)
# st.sidebar.markdown("Enter a query against the tables in the database. The query will be run against the database and the results will be displayed below. The query will also be sent to OpenAI to generate a SQL query that will return the same results. The generated query will be displayed below the results.")
st.sidebar.markdown("## Enter Your query")
st.sidebar.write(f"Running against {database}")
# query_input = get_text()
input_text = st.sidebar.text_area(height = 200, label="Query here", label_visibility='collapsed', placeholder="Enter query...", key="query_input")
if st.sidebar.button("Submit"):
st.write(f"## Query:")
st.write(input_text)
# odelName `
# -temperature 0 -max_tokens 250 -stop "#"
messages=[]
lines=[]
lines.append("Given the following tables and columns with data types:")
#for each row in tables array combine the two columns and separate by a carriage return
for t in st.session_state["tables"]:
lines.append(t)
lines.append(f"#Create a SQL query for {input_text}")
content="\n".join(lines)
messages.append({"role":"system","content":content})
messages.append({"role":"user","content":input_text})
# st.write(content)
if "Completion" == runningModel:
resp = Get_Completion(content)
elif "Chat Completion" == runningModel:
resp = Get_ChatCompletion(messages)
else:
resp = Get_CompletionREST(content)
st.write(f"## Results for {runningModel}:")
st.write(resp)
with st.expander("See the detail"):
if "response" in st.session_state:
st.write("response:")
st.write(st.session_state["response"])
if "prompt" in st.session_state:
st.write("Prompt:")
st.write(st.session_state["prompt"])
st.write(f"Length of tables={len(st.session_state['tables'])}")
st.table(st.session_state["tables"])
| [] |
2024-01-10 | kirkhofer/data-ai | aoai~bingit.py | import streamlit as st
import requests
import json
import pandas as pd
import openai
openai.api_type = "azure"
openai.api_base = st.secrets.aoai.base
openai.api_key = st.secrets.aoai.key
openai.api_version = st.secrets.aoai.previewversion
def bing_web_search(subscription_key, bing_subscription_url, query,site=None):
# set parameters
# search_url = "https://api.bing.microsoft.com/v7.0/search"
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
params = {
"q": query,
"textDecorations": True,
"textFormat": "Raw",
"mkt":"en-us",
"responseFilter":"Webpages"}
if site is not None:
params["q"]+=" site:"+site
# get response
response = requests.get(bing_subscription_url, headers=headers, params=params)
response.raise_for_status()
return response.json()
def runQuery():
site=None
if "Specific" == st.session_state.rbSelect:
site=st.session_state.txtUrl
data = bing_web_search(st.secrets.bing.key, st.secrets.bing.endpoint, st.session_state.txtSearch,site=site)
if 'webPages' in data:
df = pd.json_normalize(data['webPages']['value'])
prompt="""
Answer ONLY with the facts listed in the list of sources below
If there isn't enough information below, say you don't know.
Do not generate answers that don't use the sources below.
If asking a clarifying question to the user would help, ask the question.
Each source has a URL followed by colon and the actual information, always include the source URL for each fact you use in the response.
Return the response in markdown with the URL references
Sources:
"""
srch=[]
for _,row in df.iterrows():
srch.append(f"{row['url']}: {row['snippet']}")
prompt+="\n".join(srch)
messages=[]
messages.append({"role":"system","content":prompt})
messages.append({"role":"user","content":st.session_state.txtSearch})
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages = messages,
temperature=0,
max_tokens=1500
)
st.markdown("## Question")
st.markdown(f"{st.session_state.txtSearch}")
st.markdown("## Answer")
st.markdown(f"{response['choices'][0]['message']['content']}")
with st.expander("See more info"):
st.write(df[['url','snippet']])
st.write(response)
st.write(messages)
else:
st.write("No results found")
with st.sidebar:
rb=st.radio("Are you using any web site or specific?",("All","Specific"),key="rbSelect")
with st.form("bingit"):
if "Specific" == st.session_state.rbSelect:
st.text_input("Search using this website",key="txtUrl",value="learn.microsoft.com/en-us/azure")
st.text_input("Enter search string",value="How much is power bi",key="txtSearch")
st.form_submit_button(label='Submit', on_click=runQuery) | [
"\n",
"\n Answer ONLY with the facts listed in the list of sources below\n If there isn't enough information below, say you don't know.\n Do not generate answers that don't use the sources below.\n If asking a clarifying question to the user would help, ask the question.\n Each source has a URL followed by colon and the actual information, always include the source URL for each fact you use in the response.\n\n Return the response in markdown with the URL references\n \n Sources:\n "
] |
2024-01-10 | krishnajalan/rubrix | src~argilla~client~datasets.py | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
import pandas as pd
from argilla._constants import OPENAI_END_TOKEN, OPENAI_SEPARATOR, OPENAI_WHITESPACE
from argilla.client.apis.datasets import (
TextClassificationSettings,
TokenClassificationSettings,
)
from argilla.client.models import (
Framework,
Record,
Text2TextRecord,
TextClassificationRecord,
TokenAttributions,
TokenClassificationRecord,
)
from argilla.client.sdk.datasets.models import TaskType
from argilla.utils.dependency import require_version, requires_version
from argilla.utils.span_utils import SpanUtils
if TYPE_CHECKING:
import datasets
import pandas
import spacy
_LOGGER = logging.getLogger(__name__)
class DatasetBase:
"""The Dataset classes are containers for argilla records.
This is the base class to facilitate the implementation for each record type.
Args:
records: A list of argilla records.
Raises:
WrongRecordTypeError: When the record type in the provided
list does not correspond to the dataset type.
"""
_RECORD_TYPE = None
# record fields that can hold multiple input columns from a datasets.Dataset or a pandas.DataFrame
_RECORD_FIELDS_WITH_MULTIPLE_INPUT_COLUMNS = ["inputs", "metadata"]
_SETTINGS = None
@classmethod
def _record_init_args(cls) -> List[str]:
"""
Helper the returns the field list available for creation of inner records.
The ``_RECORD_TYPE.__fields__`` will be returned as default
"""
return [field for field in cls._RECORD_TYPE.__fields__]
def __init__(self, records: Optional[List[Record]] = None):
if self._RECORD_TYPE is None:
raise NotImplementedError("A Dataset implementation has to define a `_RECORD_TYPE`!")
self._records = records or []
if self._records:
self._validate_record_type()
def _validate_record_type(self):
"""Validates the record type.
Raises:
WrongRecordTypeError: When the record type in the provided
list does not correspond to the dataset type.
"""
record_types = {type(rec): None for rec in self._records}
if len(record_types) > 1:
raise WrongRecordTypeError(
f"A {type(self).__name__} must only contain"
f" {self._RECORD_TYPE.__name__}s, but you provided various types:"
f" {[rt.__name__ for rt in record_types.keys()]}"
)
elif next(iter(record_types)) is not self._RECORD_TYPE:
raise WrongRecordTypeError(
f"A {type(self).__name__} must only contain"
f" {self._RECORD_TYPE.__name__}s, but you provided"
f" {list(record_types.keys())[0].__name__}s."
)
def __iter__(self):
return self._records.__iter__()
def __getitem__(self, key):
return self._records[key]
def __setitem__(self, key, value):
if type(value) is not self._RECORD_TYPE:
raise WrongRecordTypeError(
f"You are only allowed to set a record of type {self._RECORD_TYPE} in"
f" this dataset, but you provided {type(value)}"
)
self._records[key] = value
def __delitem__(self, key):
del self._records[key]
def __len__(self) -> int:
return len(self._records)
def __repr__(self):
return repr(self.to_pandas())
def __str__(self):
return repr(self)
@requires_version("datasets>1.17.0")
def to_datasets(self) -> "datasets.Dataset":
"""Exports your records to a `datasets.Dataset`.
Returns:
A `datasets.Dataset` containing your records.
"""
import datasets
ds_dict = self._to_datasets_dict()
# TODO: THIS FIELD IS ONLY AT CLIENT API LEVEL. NOT SENSE HERE FOR NOW
if "search_keywords" in ds_dict:
del ds_dict["search_keywords"]
try:
dataset = datasets.Dataset.from_dict(ds_dict)
# try without metadata, since it is more prone to incompatible structures
except Exception:
del ds_dict["metadata"]
dataset = datasets.Dataset.from_dict(ds_dict)
_LOGGER.warning(
"The 'metadata' of the records were removed, since it was incompatible with the 'datasets' format."
)
return dataset
def _to_datasets_dict(self) -> Dict:
raise NotImplementedError
@classmethod
def from_datasets(cls, dataset: "datasets.Dataset", **kwargs) -> "Dataset":
"""Imports records from a `datasets.Dataset`.
Columns that are not supported are ignored.
Args:
dataset: A datasets Dataset from which to import the records.
Returns:
The imported records in a argilla Dataset.
"""
raise NotImplementedError
@classmethod
def _prepare_dataset_and_column_mapping(
cls,
dataset: "datasets.Dataset",
column_mapping: Dict[str, Union[str, List[str]]],
) -> Tuple["datasets.Dataset", Dict[str, List[str]]]:
"""Renames and removes columns, and extracts the mapping of the columns to be joined.
Args:
dataset: A datasets Dataset from which to import the records.
column_mapping: Mappings from record fields to column names.
Returns:
The prepared dataset and a mapping for the columns to be joined
"""
import datasets
if isinstance(dataset, datasets.DatasetDict):
raise ValueError("`datasets.DatasetDict` are not supported. Please, select the dataset split before.")
# clean column mappings
column_mapping = {key: val for key, val in column_mapping.items() if val is not None}
cols_to_be_renamed, cols_to_be_joined = {}, {}
for field, col in column_mapping.items():
if field in cls._RECORD_FIELDS_WITH_MULTIPLE_INPUT_COLUMNS:
cols_to_be_joined[field] = [col] if isinstance(col, str) else col
else:
cols_to_be_renamed[col] = field
dataset = dataset.rename_columns(cols_to_be_renamed)
dataset = cls._remove_unsupported_columns(
dataset,
extra_columns=[col for cols in cols_to_be_joined.values() for col in cols],
)
return dataset, cols_to_be_joined
@classmethod
def _remove_unsupported_columns(
cls,
dataset: "datasets.Dataset",
extra_columns: List[str],
) -> "datasets.Dataset":
"""Helper function to remove unsupported columns from the `datasets.Dataset` following the record type.
Args:
dataset: The dataset.
extra_columns: Extra columns to be kept.
Returns:
The dataset with unsupported columns removed.
"""
not_supported_columns = [
col for col in dataset.column_names if col not in cls._record_init_args() + extra_columns
]
if not_supported_columns:
_LOGGER.warning(
"Following columns are not supported by the"
f" {cls._RECORD_TYPE.__name__} model and are ignored:"
f" {not_supported_columns}"
)
dataset = dataset.remove_columns(not_supported_columns)
return dataset
@staticmethod
def _join_datasets_columns_and_delete(row: Dict[str, Any], columns: List[str]) -> Dict[str, Any]:
"""Joins columns of a `datasets.Dataset` row into a dict, and deletes the single columns.
Updates the ``row`` dictionary!
Args:
row: A row of a `datasets.Dataset`
columns: Name of the columns to be joined and deleted from the row.
Returns:
A dict containing the columns and its values.
"""
joined_cols = {}
for col in columns:
joined_cols[col] = row[col]
del row[col]
return joined_cols
@staticmethod
def _parse_datasets_column_with_classlabel(
column_value: Union[str, List[str], int, List[int]],
feature: Optional[Any],
) -> Optional[Union[str, List[str], int, List[int]]]:
"""Helper function to parse a datasets.Dataset column with a potential ClassLabel feature.
Args:
column_value: The value from the datasets Dataset column.
feature: The feature of the annotation column to optionally convert ints to strs.
Returns:
The column value optionally converted to str, or None if the conversion fails.
"""
import datasets
# extract ClassLabel feature
if isinstance(feature, list):
feature = feature[0]
if isinstance(feature, datasets.Sequence):
feature = feature.feature
if not isinstance(feature, datasets.ClassLabel):
feature = None
if feature is None:
return column_value
try:
return feature.int2str(column_value)
# integers don't have to map to the names ...
# it seems that sometimes -1 is used to denote "no label"
except ValueError:
return None
def to_pandas(self) -> pd.DataFrame:
"""Exports your records to a `pandas.DataFrame`.
Returns:
A `datasets.Dataset` containing your records.
"""
return pd.DataFrame(map(dict, self._records))
@classmethod
def from_pandas(cls, dataframe: pd.DataFrame) -> "Dataset":
"""Imports records from a `pandas.DataFrame`.
Columns that are not supported are ignored.
Args:
dataframe: A pandas DataFrame from which to import the records.
Returns:
The imported records in a argilla Dataset.
"""
not_supported_columns = [col for col in dataframe.columns if col not in cls._record_init_args()]
if not_supported_columns:
_LOGGER.warning(
"Following columns are not supported by the"
f" {cls._RECORD_TYPE.__name__} model and are ignored:"
f" {not_supported_columns}"
)
dataframe = dataframe.drop(columns=not_supported_columns)
return cls._from_pandas(dataframe)
@classmethod
def _from_pandas(cls, dataframe: pd.DataFrame) -> "Dataset":
"""Helper method to create a argilla Dataset from a pandas DataFrame.
Must be implemented by the child class.
Args:
dataframe: A pandas DataFrame
Returns:
A argilla Dataset
"""
raise NotImplementedError
def prepare_for_training(
self,
framework: Union[Framework, str] = "transformers",
settings: Union[TextClassificationSettings, TokenClassificationSettings] = None,
lang: Optional["spacy.Language"] = None,
train_size: Optional[float] = 1,
test_size: Optional[float] = None,
seed: Optional[int] = None,
) -> Union[
"datasets.Dataset",
"spacy.tokens.DocBin",
Tuple["spacy.tokens.DocBin", "spacy.tokens.DocBin"],
Tuple["pandas.DataFrame", "pandas.DataFrame"],
]:
"""Prepares the dataset for training.
This will return a ``datasets.Dataset`` with all columns returned by ``to_datasets`` method
and an additional *ner_tags* column:
- Records without an annotation are removed.
- The *ner_tags* column corresponds to the iob tags sequences for annotations of the records
- The iob tags are transformed to integers.
Args:
framework: A string|enum specifying the framework for the training.
"transformers" and "spacy" are currently supported. Default: `transformers`
lang: The spacy nlp Language pipeline used to process the dataset. (Only for spacy framework)
train_size: The size of the training set. If float, should be between 0.0 and 1.0 and represent the
test_size: The size of the test set. If float, should be between 0.0 and 1.0 and represent the
seed: Random state.
Returns:
A datasets Dataset with a *ner_tags* or a *label* column and and several *inputs* columns.
returned by ``to_datasets`` for "transformers" framework or a spaCy DocBin for "spacy" framework.
Examples:
>>> import argilla as rg
>>> rb_dataset = rg.DatasetForTokenClassification([
... rg.TokenClassificationRecord(
... text="The text",
... tokens=["The", "text"],
... annotation=[("TAG", 0, 2)],
... )
... ])
>>> rb_dataset.prepare_for_training().features
{'text': Value(dtype='string'),
'tokens': Sequence(feature=Value(dtype='string'), length=-1),
'prediction': Value(dtype='null'),
'prediction_agent': Value(dtype='null'),
'annotation': [{'end': Value(dtype='int64'),
'label': Value(dtype='string'),
'start': Value(dtype='int64')}],
'annotation_agent': Value(dtype='null'),
'id': Value(dtype='null'),
'metadata': Value(dtype='null'),
'status': Value(dtype='string'),
'event_timestamp': Value(dtype='null'),
'metrics': Value(dtype='null'),
'ner_tags': [ClassLabel(num_classes=3, names=['O', 'B-TAG', 'I-TAG'])]}
>>> import argilla as rg
>>> rb_dataset = rg.DatasetForTextClassification([
... rg.TextClassificationRecord(
... inputs={"header": "my header", "content": "my content"},
... annotation="SPAM",
... )
... ])
>>> rb_dataset.prepare_for_training().features
{'header': Value(dtype='string'),
'content': Value(dtype='string'),
'label': ClassLabel(num_classes=1, names=['SPAM'])}
"""
if self._RECORD_TYPE == TextClassificationRecord:
if settings is None:
self._SETTINGS = self._infer_settings_from_records()
elif isinstance(settings, TextClassificationSettings):
self._SETTINGS = settings
else:
raise ValueError("settings must be TextClassificationSettings for TextClassificationRecord")
elif self._RECORD_TYPE == TokenClassificationRecord:
if settings is None:
self._SETTINGS = self._infer_settings_from_records()
elif isinstance(settings, TokenClassificationSettings):
self._SETTINGS = settings
else:
raise ValueError("settings must be TokenClassificationSettings for TokenClassificationRecord")
else:
self._SETTINGS = settings
if train_size is None:
train_size = 1
if test_size is None:
test_size = 1 - train_size
# check if all numbers are larger than 0
assert [abs(train_size), abs(test_size)] == [train_size, test_size], ValueError(
"`train_size` and `test_size` must be larger than 0."
)
# check if train sizes sum up to 1
assert (train_size + test_size) == 1, ValueError("`train_size` and `test_size` must sum to 1.")
# check for annotations
assert any([rec.annotation for rec in self._records]), ValueError("Dataset has no annotations.")
# shuffle records
shuffled_records = self._records.copy()
seed = seed or random.randint(42, 1984)
random.Random(seed).shuffle(shuffled_records)
# turn the string into a Framework instance and trigger error if str is not valid
if isinstance(framework, str):
framework = Framework(framework)
if test_size == 0:
test_size = None
# prepare for training for the right method
if framework in [
Framework.TRANSFORMERS,
Framework.SETFIT,
Framework.SPAN_MARKER,
Framework.PEFT,
Framework.AUTOTRAIN,
]:
return self._prepare_for_training_with_transformers(train_size=train_size, test_size=test_size, seed=seed)
elif framework is Framework.SPACY and lang is None:
raise ValueError(
"Please provide a spacy language model to prepare the dataset for training with the spacy framework."
)
elif framework in [Framework.SPACY, Framework.SPARK_NLP, Framework.OPENAI]:
if train_size and test_size:
require_version("scikit-learn")
from sklearn.model_selection import train_test_split
records_train, records_test = train_test_split(
shuffled_records,
train_size=train_size,
shuffle=False,
random_state=seed,
)
if framework is Framework.SPACY:
train_docbin = self._prepare_for_training_with_spacy(nlp=lang, records=records_train)
test_docbin = self._prepare_for_training_with_spacy(nlp=lang, records=records_test)
return train_docbin, test_docbin
elif framework is Framework.SPARK_NLP:
train_df = self._prepare_for_training_with_spark_nlp(records_train)
test_df = self._prepare_for_training_with_spark_nlp(records_test)
return train_df, test_df
else:
train_jsonl = self._prepare_for_training_with_openai(records=records_train)
test_jsonl = self._prepare_for_training_with_openai(records=records_test)
return train_jsonl, test_jsonl
else:
if framework is Framework.SPACY:
return self._prepare_for_training_with_spacy(nlp=lang, records=shuffled_records)
elif framework is Framework.SPARK_NLP:
return self._prepare_for_training_with_spark_nlp(records=shuffled_records)
elif framework is Framework.OPENAI:
return self._prepare_for_training_with_openai(records=shuffled_records)
else:
raise NotImplementedError(
f"Framework {framework} is not supported. Choose from: {[e.value for e in Framework]}"
)
else:
raise NotImplementedError(
f"Framework {framework} is not supported. Choose from: {[e.value for e in Framework]}"
)
@requires_version("spacy")
def _prepare_for_training_with_spacy(
self, **kwargs
) -> Union["spacy.token.DocBin", Tuple["spacy.token.DocBin", "spacy.token.DocBin"]]:
"""Prepares the dataset for training using the "spacy" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A spacy.token.DocBin.
"""
raise NotImplementedError
@requires_version("datasets>1.17.0")
def _prepare_for_training_with_transformers(self, **kwargs) -> "datasets.Dataset":
"""Prepares the dataset for training using the "transformers" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A datasets Dataset.
"""
raise NotImplementedError
def _prepare_for_training_with_spark_nlp(self, **kwargs) -> "datasets.Dataset":
"""Prepares the dataset for training using the "spark-nlp" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A pd.DataFrame.
"""
raise NotImplementedError
def _prepare_for_training_with_openai(self, **kwargs) -> "dict":
"""Prepares the dataset for training using the "openai" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A pd.DataFrame.
"""
raise NotImplementedError
def _infer_settings_from_records(self) -> Union[TokenClassificationSettings, TextClassificationSettings]:
"""Infer the settings from the records.
Returns:
A Settings object.
"""
raise NotImplementedError
def _prepend_docstring(record_type: Type[Record]):
docstring = f"""This Dataset contains {record_type.__name__} records.
It allows you to export/import records into/from different formats,
loop over the records, and access them by index.
Args:
records: A list of `{record_type.__name__}`s.
Raises:
WrongRecordTypeError: When the record type in the provided
list does not correspond to the dataset type.
"""
def docstring_decorator(cls):
cls.__doc__ = docstring + (cls.__doc__ or "")
return cls
return docstring_decorator
@_prepend_docstring(TextClassificationRecord)
class DatasetForTextClassification(DatasetBase):
"""
Examples:
>>> # Import/export records:
>>> import argilla as rg
>>> dataset = rg.DatasetForTextClassification.from_pandas(my_dataframe)
>>> dataset.to_datasets()
>>>
>>> # Looping over the dataset:
>>> for record in dataset:
... print(record)
>>>
>>> # Passing in a list of records:
>>> records = [
... rg.TextClassificationRecord(text="example"),
... rg.TextClassificationRecord(text="another example"),
... ]
>>> dataset = rg.DatasetForTextClassification(records)
>>> assert len(dataset) == 2
>>>
>>> # Indexing into the dataset:
>>> dataset[0]
... rg.TextClassificationRecord(text="example")
>>> dataset[0] = rg.TextClassificationRecord(text="replaced example")
"""
_RECORD_TYPE = TextClassificationRecord
def __init__(self, records: Optional[List[TextClassificationRecord]] = None):
# we implement this to have more specific type hints
super().__init__(records=records)
@classmethod
@requires_version("datasets>1.17.0")
def from_datasets(
cls,
dataset: "datasets.Dataset",
text: Optional[str] = None,
id: Optional[str] = None,
inputs: Optional[Union[str, List[str]]] = None,
annotation: Optional[str] = None,
metadata: Optional[Union[str, List[str]]] = None,
) -> "DatasetForTextClassification":
"""Imports records from a `datasets.Dataset`.
Columns that are not supported are ignored.
Args:
dataset: A datasets Dataset from which to import the records.
text: The field name used as record text. Default: `None`
id: The field name used as record id. Default: `None`
inputs: A list of field names used for record inputs. Default: `None`
annotation: The field name used as record annotation. Default: `None`
metadata: The field name used as record metadata. Default: `None`
Returns:
The imported records in a argilla Dataset.
Examples:
>>> import datasets
>>> ds = datasets.Dataset.from_dict({
... "inputs": ["example"],
... "prediction": [
... [{"label": "LABEL1", "score": 0.9}, {"label": "LABEL2", "score": 0.1}]
... ]
... })
>>> DatasetForTextClassification.from_datasets(ds)
"""
dataset, cols_to_be_joined = cls._prepare_dataset_and_column_mapping(
dataset,
dict(
text=text,
id=id,
inputs=inputs,
annotation=annotation,
metadata=metadata,
),
)
records = []
for row in dataset:
row["inputs"] = cls._parse_inputs_field(row, cols_to_be_joined.get("inputs"))
if row.get("inputs") is not None and row.get("text") is not None:
del row["text"]
if row.get("annotation") is not None:
row["annotation"] = cls._parse_datasets_column_with_classlabel(
row["annotation"], dataset.features["annotation"]
)
if row.get("prediction"):
row["prediction"] = (
[
(
pred["label"],
pred["score"],
)
for pred in row["prediction"]
]
if row["prediction"] is not None
else None
)
if row.get("explanation"):
row["explanation"] = (
{
key: [TokenAttributions(**tokattr_kwargs) for tokattr_kwargs in val]
for key, val in row["explanation"].items()
}
if row["explanation"] is not None
else None
)
if cols_to_be_joined.get("metadata"):
row["metadata"] = cls._join_datasets_columns_and_delete(row, cols_to_be_joined["metadata"])
records.append(TextClassificationRecord.parse_obj(row))
return cls(records)
@classmethod
def _parse_inputs_field(
cls,
row: Dict[str, Any],
columns: Optional[List[str]],
) -> Optional[Union[Dict[str, str], str]]:
"""Helper function to parse the inputs field.
Args:
row: A row of the dataset.Datasets
columns: A list of columns to be joined for the inputs field, optional.
Returns:
None, a dictionary or a string as input for the inputs field.
"""
inputs = row.get("inputs")
if columns is not None:
inputs = cls._join_datasets_columns_and_delete(row, columns)
if isinstance(inputs, dict):
inputs = {key: val for key, val in inputs.items() if val is not None}
return inputs
@classmethod
def from_pandas(
# we implement this to have more specific type hints
cls,
dataframe: pd.DataFrame,
) -> "DatasetForTextClassification":
return super().from_pandas(dataframe)
def _to_datasets_dict(self) -> Dict:
# create a dict first, where we make the necessary transformations
ds_dict = {}
for key in self._RECORD_TYPE.__fields__:
if key == "prediction":
ds_dict[key] = [
[{"label": pred[0], "score": pred[1]} for pred in rec.prediction]
if rec.prediction is not None
else None
for rec in self._records
]
elif key == "explanation":
ds_dict[key] = [
{key: list(map(dict, tokattrs)) for key, tokattrs in rec.explanation.items()}
if rec.explanation is not None
else None
for rec in self._records
]
elif key == "id":
ds_dict[key] = [None if rec.id is None else str(rec.id) for rec in self._records]
elif key == "metadata":
ds_dict[key] = [getattr(rec, key) or None for rec in self._records]
else:
ds_dict[key] = [getattr(rec, key) for rec in self._records]
return ds_dict
@classmethod
def _from_pandas(cls, dataframe: pd.DataFrame) -> "DatasetForTextClassification":
return cls([TextClassificationRecord(**row) for row in dataframe.to_dict("records")])
@requires_version("datasets>1.17.0")
def _prepare_for_training_with_transformers(
self,
train_size: Optional[float] = None,
test_size: Optional[float] = None,
seed: Optional[int] = None,
):
import datasets
ds_dict = {"id": [], "text": [], "label": []}
for rec in self._records:
if rec.annotation is None:
continue
if rec.text is not None:
text = rec.text
elif rec.text is None and "text" in rec.inputs:
text = rec.inputs["text"]
else:
text = " ".join(rec.inputs.values())
ds_dict["text"].append(text)
ds_dict["label"].append(rec.annotation)
ds_dict["id"].append(str(rec.id))
if self._records[0].multi_label:
labels = {label: None for labels in ds_dict["label"] for label in labels}
else:
labels = {label: None for label in ds_dict["label"]}
class_label = (
datasets.ClassLabel(names=sorted(labels.keys()))
if ds_dict["label"]
# in case we don't have any labels, ClassLabel fails with Dataset.from_dict({"labels": []})
else datasets.Value("string")
)
feature_dict = {
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"label": [class_label] if self._records[0].multi_label else class_label,
}
ds = datasets.Dataset.from_dict(ds_dict, features=datasets.Features(feature_dict))
if self._records[0].multi_label:
require_version("scikit-learn")
from sklearn.preprocessing import MultiLabelBinarizer
labels = [rec["label"] for rec in ds]
mlb = MultiLabelBinarizer()
binarized_labels = mlb.fit_transform(labels)
feature_dict["binarized_label"] = feature_dict["label"]
ds = datasets.Dataset.from_dict(
{
"id": ds["id"],
"text": ds["text"],
"label": labels,
"binarized_label": binarized_labels,
},
features=datasets.Features(feature_dict),
)
if test_size is not None and test_size != 0:
ds = ds.train_test_split(train_size=train_size, test_size=test_size, seed=seed)
return ds
@requires_version("spacy")
def _prepare_for_training_with_spacy(self, nlp: "spacy.Language", records: List[Record]) -> "spacy.tokens.DocBin":
from spacy.tokens import DocBin
db = DocBin(store_user_data=True)
all_labels = self._verify_all_labels()
# Creating the DocBin object as in https://spacy.io/usage/training#training-data
for record in records:
if record.annotation is None:
continue
if record.text is not None:
text = record.text
elif record.text is None and "text" in record.inputs:
text = record.inputs["text"]
else:
text = ", ".join(f"{key}: {value}" for key, value in record.inputs.items())
doc = nlp.make_doc(text)
doc.user_data["id"] = record.id
cats = dict.fromkeys(all_labels, 0)
if isinstance(record.annotation, list):
for anno in record.annotation:
cats[anno] = 1
else:
cats[record.annotation] = 1
doc.cats = cats
db.add(doc)
return db
def _prepare_for_training_with_spark_nlp(self, records: List[Record]) -> "pandas.DataFrame":
if records[0].multi_label:
label_name = "labels"
else:
label_name = "label"
spark_nlp_data = []
for record in records:
if record.annotation is None:
continue
if record.id is None:
record.id = str(uuid.uuid4())
if record.text is not None:
text = record.text
elif record.text is None and "text" in record.inputs:
text = record.inputs["text"]
else:
text = ", ".join(f"{key}: {value}" for key, value in record.inputs.items())
spark_nlp_data.append([record.id, text, record.annotation])
return pd.DataFrame(spark_nlp_data, columns=["id", "text", label_name])
def _prepare_for_training_with_openai(self, **kwargs) -> "datasets.Dataset":
"""Prepares the dataset for training using the "openai" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A pd.DataFrame.
"""
separator = OPENAI_SEPARATOR
whitespace = OPENAI_WHITESPACE
self._verify_all_labels() # verify that all labels are strings
if len(self._records) <= len(self._SETTINGS.label_schema) * 100:
_LOGGER.warning("OpenAI recommends at least 100 examples per class for training a classification model.")
jsonl = []
for rec in self._records:
if rec.annotation is None:
continue
if rec.text is not None:
prompt = rec.text
elif rec.text is None and "text" in rec.inputs:
prompt = rec.inputs["text"]
else:
prompt = ", ".join(f"{key}: {value}" for key, value in rec.inputs.items())
prompt += separator # needed for better performance
if self._records[0].multi_label:
completion = " ".join([str(self._SETTINGS.label2id[annotation]) for annotation in rec.annotation])
else:
completion = str(self._SETTINGS.label2id[rec.annotation])
jsonl.append({"id": rec.id, "prompt": prompt, "completion": whitespace + completion})
return jsonl
def _infer_settings_from_records(self) -> TextClassificationSettings:
all_labels = set()
for record in self._records:
if record.annotation is None:
continue
elif isinstance(record.annotation, str):
all_labels.add(record.annotation)
elif isinstance(record.annotation, list):
for label in record.annotation:
all_labels.add(label)
else:
# this is highly unlikely
raise TypeError("Record.annotation contains an unsupported type: {}".format(type(record.annotation)))
all_labels = list(all_labels)
all_labels.sort()
_LOGGER.warning(
f"""No label schema provided. Using all_labels: TextClassificationSettings({all_labels}). We recommend providing a `TextClassificationSettings()` or setting `rg.configure_dataset_settings()`/`rg.load_dataset_settings()` to ensure reproducibility."""
)
return TextClassificationSettings(all_labels)
def _verify_all_labels(self):
all_labels = self._SETTINGS.label_schema
for record in self._records:
if record.annotation is None:
continue
elif isinstance(record.annotation, str):
if record.annotation not in all_labels:
raise ValueError(f"Label {record.annotation} is not in the settings.label_schema: {all_labels}.")
elif isinstance(record.annotation, list):
for label in record.annotation:
if label not in all_labels:
raise ValueError(f"Label {label} is not in the settings.label_schema: {all_labels}.")
else:
# this is highly unlikely
raise TypeError("Record.annotation contains an unsupported type: {}".format(type(record.annotation)))
return all_labels
@_prepend_docstring(TokenClassificationRecord)
class DatasetForTokenClassification(DatasetBase):
"""
Examples:
>>> # Import/export records:
>>> import argilla as rg
>>> dataset = rg.DatasetForTokenClassification.from_pandas(my_dataframe)
>>> dataset.to_datasets()
>>>
>>> # Looping over the dataset:
>>> assert len(dataset) == 2
>>> for record in dataset:
... print(record)
>>>
>>> # Passing in a list of records:
>>> import argilla as rg
>>> records = [
... rg.TokenClassificationRecord(text="example", tokens=["example"]),
... rg.TokenClassificationRecord(text="another example", tokens=["another", "example"]),
... ]
>>> dataset = rg.DatasetForTokenClassification(records)
>>>
>>> # Indexing into the dataset:
>>> dataset[0]
... rg.TokenClassificationRecord(text="example", tokens=["example"])
>>> dataset[0] = rg.TokenClassificationRecord(text="replace example", tokens=["replace", "example"])
"""
_RECORD_TYPE = TokenClassificationRecord
def __init__(self, records: Optional[List[TokenClassificationRecord]] = None):
# we implement this to have more specific type hints
super().__init__(records=records)
@classmethod
def _record_init_args(cls) -> List[str]:
"""Adds the `tags` argument to default record init arguments"""
parent_fields = super(DatasetForTokenClassification, cls)._record_init_args()
return parent_fields + ["tags"] # compute annotation from tags
@classmethod
@requires_version("datasets>1.17.0")
def from_datasets(
cls,
dataset: "datasets.Dataset",
text: Optional[str] = None,
id: Optional[str] = None,
tokens: Optional[str] = None,
tags: Optional[str] = None,
metadata: Optional[Union[str, List[str]]] = None,
) -> "DatasetForTokenClassification":
"""Imports records from a `datasets.Dataset`.
Columns that are not supported are ignored.
Args:
dataset: A datasets Dataset from which to import the records.
text: The field name used as record text. Default: `None`
id: The field name used as record id. Default: `None`
tokens: The field name used as record tokens. Default: `None`
tags: The field name used as record tags. Default: `None`
metadata: The field name used as record metadata. Default: `None`
Returns:
The imported records in a argilla Dataset.
Examples:
>>> import datasets
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "tokens": [["my", "example"]],
... "prediction": [
... [{"label": "LABEL1", "start": 3, "end": 10, "score": 1.0}]
... ]
... })
>>> DatasetForTokenClassification.from_datasets(ds)
"""
dataset, cols_to_be_joined = cls._prepare_dataset_and_column_mapping(
dataset,
dict(
text=text,
tokens=tokens,
tags=tags,
id=id,
metadata=metadata,
),
)
records = []
for row in dataset:
# TODO: fails with a KeyError if no tokens column is present and no mapping is indicated
if not row["tokens"]:
_LOGGER.warning("Ignoring row with no tokens.")
continue
if row.get("tags"):
row["tags"] = cls._parse_datasets_column_with_classlabel(row["tags"], dataset.features["tags"])
if row.get("prediction"):
row["prediction"] = cls.__entities_to_tuple__(row["prediction"])
if row.get("annotation"):
row["annotation"] = cls.__entities_to_tuple__(row["annotation"])
if cols_to_be_joined.get("metadata"):
row["metadata"] = cls._join_datasets_columns_and_delete(row, cols_to_be_joined["metadata"])
records.append(TokenClassificationRecord.parse_obj(row))
return cls(records)
@classmethod
def from_pandas(
# we implement this to have more specific type hints
cls,
dataframe: pd.DataFrame,
) -> "DatasetForTokenClassification":
return super().from_pandas(dataframe)
@requires_version("datasets>1.17.0")
def _prepare_for_training_with_transformers(
self,
train_size: Optional[float] = None,
test_size: Optional[float] = None,
seed: Optional[int] = None,
):
import datasets
has_annotations = False
for rec in self._records:
if rec.annotation is not None:
has_annotations = True
break
if not has_annotations:
return datasets.Dataset.from_dict({})
class_tags = ["O"]
class_tags.extend([f"{pre}-{label}" for label in sorted(self._verify_all_labels()) for pre in ["B", "I"]])
class_tags = datasets.ClassLabel(names=class_tags)
def spans2iob(example):
span_utils = SpanUtils(example["text"], example["tokens"])
entity_spans = self.__entities_to_tuple__(example["annotation"])
tags = span_utils.to_tags(entity_spans)
return class_tags.str2int(tags)
ds = self.to_datasets().filter(self.__only_annotations__).map(lambda example: {"ner_tags": spans2iob(example)})
new_features = ds.features.copy()
new_features["ner_tags"] = datasets.Sequence(feature=class_tags)
ds = ds.cast(new_features)
ds = ds.remove_columns(set(ds.column_names) - set(["id", "tokens", "ner_tags"]))
if test_size is not None and test_size != 0:
ds = ds.train_test_split(train_size=train_size, test_size=test_size, seed=seed)
return ds
@requires_version("spacy")
def _prepare_for_training_with_spacy(self, nlp: "spacy.Language", records: List[Record]) -> "spacy.tokens.DocBin":
from spacy.tokens import DocBin
db = DocBin(store_user_data=True)
# Creating the DocBin object as in https://spacy.io/usage/training#training-data
for record in records:
if record.annotation is None:
continue
doc = nlp.make_doc(record.text)
doc.user_data["id"] = record.id
entities = []
for anno in record.annotation:
span = doc.char_span(anno[1], anno[2], label=anno[0])
# There is a misalignment between record tokenization and spaCy tokenization
if span is None:
# TODO(@dcfidalgo): Do we want to warn and continue or should we stop the training set generation?
raise ValueError(
"The following annotation does not align with the tokens"
" produced by the provided spacy language model:"
f" {(anno[0], record.text[anno[1]:anno[2]])}, {list(doc)}"
)
else:
entities.append(span)
doc.ents = entities
db.add(doc)
return db
def _prepare_for_training_with_spark_nlp(self, records: List[Record]) -> "pandas.DataFrame":
for record in records:
if record.id is None:
record.id = str(uuid.uuid4())
iob_doc_data = [
[
record.id,
record.text,
record.tokens,
[token["tag"] for token in record.metrics["tokens"]],
]
for record in records
if record.annotation is not None
]
return pd.DataFrame(iob_doc_data, columns=["id", "text", "token", "label"])
def _prepare_for_training_with_openai(self, **kwargs) -> "datasets.Dataset":
"""Prepares the dataset for training using the "openai" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A pd.DataFrame.
"""
separator = OPENAI_SEPARATOR
end_token = OPENAI_END_TOKEN
whitespace = OPENAI_WHITESPACE
self._verify_all_labels()
if len(self._records) <= 500:
_LOGGER.warning("OpenAI recommends at least 500 examples for training a conditional generation model.")
jsonl = []
for rec in self._records:
if rec.annotation is None:
continue
prompt = rec.text + separator # needed for better performance
completion = {}
for label, start, end in rec.annotation:
completion[rec.text[start:end]] = self._SETTINGS.label2id[label]
completion = "\n".join([f"- {k} {v}" for k, v in completion.items()])
completion = completion + end_token
jsonl.append({"id": rec.id, "prompt": prompt, "completion": whitespace + completion})
return jsonl
def _infer_settings_from_records(self) -> TokenClassificationSettings:
all_labels = set()
for record in self._records:
if record.annotation:
for label, _, _ in record.annotation:
all_labels.add(label)
all_labels = list(all_labels)
all_labels.sort()
_LOGGER.warning(
f"""No label schema provided. Using all_labels: TokenClassificationSettings({all_labels}). We recommend providing a `TokenClassificationSettings()` or setting `rg.configure_dataset_settings()`/`rg.load_dataset_settings()` to ensure reproducibility."""
)
return TokenClassificationSettings(all_labels)
def _verify_all_labels(self) -> List[str]:
all_labels = self._SETTINGS.label_schema
for record in self._records:
if record.annotation:
for label, _, _ in record.annotation:
if label not in all_labels:
raise ValueError(f"Label {label} is not in the settings.label_schema: {all_labels}.")
return all_labels
def __only_annotations__(self, data) -> bool:
return data["annotation"] is not None
def _to_datasets_dict(self) -> Dict:
"""Helper method to put token classification records in a `datasets.Dataset`"""
# create a dict first, where we make the necessary transformations
def entities_to_dict(
entities: Optional[List[Union[Tuple[str, int, int, float], Tuple[str, int, int]]]]
) -> Optional[List[Dict[str, Union[str, int, float]]]]:
if entities is None:
return None
return [
{"label": ent[0], "start": ent[1], "end": ent[2]}
if len(ent) == 3
else {"label": ent[0], "start": ent[1], "end": ent[2], "score": ent[3]}
for ent in entities
]
ds_dict = {}
for key in self._RECORD_TYPE.__fields__:
if key == "prediction":
ds_dict[key] = [entities_to_dict(rec.prediction) for rec in self._records]
elif key == "annotation":
ds_dict[key] = [entities_to_dict(rec.annotation) for rec in self._records]
elif key == "id":
ds_dict[key] = [None if rec.id is None else str(rec.id) for rec in self._records]
elif key == "metadata":
ds_dict[key] = [getattr(rec, key) or None for rec in self._records]
else:
ds_dict[key] = [getattr(rec, key) for rec in self._records]
return ds_dict
@staticmethod
def __entities_to_tuple__(
entities,
) -> List[Union[Tuple[str, int, int], Tuple[str, int, int, float]]]:
return [
(ent["label"], ent["start"], ent["end"])
if len(ent) == 3
else (ent["label"], ent["start"], ent["end"], ent["score"] or 0.0)
for ent in entities
]
@classmethod
def _from_pandas(cls, dataframe: pd.DataFrame) -> "DatasetForTokenClassification":
return cls([TokenClassificationRecord(**row) for row in dataframe.to_dict("records")])
@_prepend_docstring(Text2TextRecord)
class DatasetForText2Text(DatasetBase):
"""
Examples:
>>> # Import/export records:
>>> import argilla as rg
>>> dataset = rg.DatasetForText2Text.from_pandas(my_dataframe)
>>> dataset.to_datasets()
>>>
>>> # Passing in a list of records:
>>> records = [
... rg.Text2TextRecord(text="example"),
... rg.Text2TextRecord(text="another example"),
... ]
>>> dataset = rg.DatasetForText2Text(records)
>>> assert len(dataset) == 2
>>>
>>> # Looping over the dataset:
>>> for record in dataset:
... print(record)
>>>
>>> # Indexing into the dataset:
>>> dataset[0]
... rg.Text2TextRecord(text="example"})
>>> dataset[0] = rg.Text2TextRecord(text="replaced example")
"""
_RECORD_TYPE = Text2TextRecord
def __init__(self, records: Optional[List[Text2TextRecord]] = None):
# we implement this to have more specific type hints
super().__init__(records=records)
@classmethod
@requires_version("datasets>1.17.0")
def from_datasets(
cls,
dataset: "datasets.Dataset",
text: Optional[str] = None,
annotation: Optional[str] = None,
metadata: Optional[Union[str, List[str]]] = None,
id: Optional[str] = None,
) -> "DatasetForText2Text":
"""Imports records from a `datasets.Dataset`.
Columns that are not supported are ignored.
Args:
dataset: A datasets Dataset from which to import the records.
text: The field name used as record text. Default: `None`
annotation: The field name used as record annotation. Default: `None`
metadata: The field name used as record metadata. Default: `None`
Returns:
The imported records in a argilla Dataset.
Examples:
>>> import datasets
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "prediction": [["mi ejemplo", "ejemplo mio"]]
... })
>>> # or
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "prediction": [[{"text": "mi ejemplo", "score": 0.9}]]
... })
>>> DatasetForText2Text.from_datasets(ds)
"""
dataset, cols_to_be_joined = cls._prepare_dataset_and_column_mapping(
dataset,
dict(
text=text,
annotation=annotation,
id=id,
metadata=metadata,
),
)
records = []
for row in dataset:
if row.get("prediction"):
row["prediction"] = cls._parse_prediction_field(row["prediction"])
if cols_to_be_joined.get("metadata"):
row["metadata"] = cls._join_datasets_columns_and_delete(row, cols_to_be_joined["metadata"])
records.append(Text2TextRecord.parse_obj(row))
return cls(records)
@staticmethod
def _parse_prediction_field(predictions: List[Union[str, Dict[str, str]]]):
def extract_prediction(prediction: Union[str, Dict]):
if isinstance(prediction, str):
return prediction
if prediction["score"] is None:
return prediction["text"]
return prediction["text"], prediction["score"]
return [extract_prediction(pred) for pred in predictions]
@classmethod
def from_pandas(
# we implement this to have more specific type hints
cls,
dataframe: pd.DataFrame,
) -> "DatasetForText2Text":
return super().from_pandas(dataframe)
def _to_datasets_dict(self) -> Dict:
# create a dict first, where we make the necessary transformations
def pred_to_dict(pred: Union[str, Tuple[str, float]]):
if isinstance(pred, str):
return {"text": pred, "score": None}
return {"text": pred[0], "score": pred[1]}
ds_dict = {}
for key in self._RECORD_TYPE.__fields__:
if key == "prediction":
ds_dict[key] = [
[pred_to_dict(pred) for pred in rec.prediction] if rec.prediction is not None else None
for rec in self._records
]
elif key == "id":
ds_dict[key] = [None if rec.id is None else str(rec.id) for rec in self._records]
elif key == "metadata":
ds_dict[key] = [getattr(rec, key) or None for rec in self._records]
else:
ds_dict[key] = [getattr(rec, key) for rec in self._records]
return ds_dict
@classmethod
def _from_pandas(cls, dataframe: pd.DataFrame) -> "DatasetForText2Text":
return cls([Text2TextRecord(**row) for row in dataframe.to_dict("records")])
@requires_version("datasets>1.17.0")
def _prepare_for_training_with_transformers(
self,
train_size: Optional[float] = None,
test_size: Optional[float] = None,
seed: Optional[int] = None,
):
import datasets
ds_dict = {"id": [], "text": [], "target": []}
for rec in self._records:
if rec.annotation is None:
continue
ds_dict["id"].append(rec.id)
ds_dict["text"].append(rec.text)
ds_dict["target"].append(rec.annotation)
feature_dict = {
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"target": datasets.Value("string"),
}
ds = datasets.Dataset.from_dict(ds_dict, features=datasets.Features(feature_dict))
if test_size is not None and test_size != 0:
ds = ds.train_test_split(train_size=train_size, test_size=test_size, seed=seed)
return ds
def _prepare_for_training_with_spark_nlp(self, records: List[Record]) -> "pandas.DataFrame":
spark_nlp_data = []
for record in records:
if record.annotation is None:
continue
if record.id is None:
record.id = str(uuid.uuid4())
text = record.text
spark_nlp_data.append([record.id, text, record.annotation])
return pd.DataFrame(spark_nlp_data, columns=["id", "text", "target"])
def _prepare_for_training_with_openai(self, **kwargs) -> "datasets.Dataset":
"""Prepares the dataset for training using the "openai" framework.
Args:
**kwargs: Specific to the task of the dataset.
Returns:
A pd.DataFrame.
"""
separator = OPENAI_SEPARATOR
end_token = OPENAI_END_TOKEN
whitespace = OPENAI_WHITESPACE
if len(self._records) <= 500:
_LOGGER.warning("OpenAI recommends at least 500 examples for training a conditional generation model.")
jsonl = []
for rec in self._records:
if rec.annotation is None:
continue
prompt = rec.text + separator # needed for better performance
completion = rec.annotation + end_token
jsonl.append({"id": rec.id, "prompt": prompt, "completion": whitespace + completion})
return jsonl
Dataset = Union[DatasetForTextClassification, DatasetForTokenClassification, DatasetForText2Text]
def read_datasets(dataset: "datasets.Dataset", task: Union[str, TaskType], **kwargs) -> Dataset:
"""Reads a datasets Dataset and returns a argilla Dataset
Columns not supported by the :mod:`Record <argilla.client.models>` instance corresponding
with the task are ignored.
Args:
dataset: Dataset to be read in.
task: Task for the dataset, one of: ["TextClassification", "TokenClassification", "Text2Text"].
**kwargs: Passed on to the task-specific ``DatasetFor*.from_datasets()`` method.
Returns:
A argilla dataset for the given task.
Examples:
>>> # Read text classification records from a datasets Dataset
>>> import datasets
>>> ds = datasets.Dataset.from_dict({
... "inputs": ["example"],
... "prediction": [
... [{"label": "LABEL1", "score": 0.9}, {"label": "LABEL2", "score": 0.1}]
... ]
... })
>>> read_datasets(ds, task="TextClassification")
>>>
>>> # Read token classification records from a datasets Dataset
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "tokens": [["my", "example"]],
... "prediction": [
... [{"label": "LABEL1", "start": 3, "end": 10}]
... ]
... })
>>> read_datasets(ds, task="TokenClassification")
>>>
>>> # Read text2text records from a datasets Dataset
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "prediction": [["mi ejemplo", "ejemplo mio"]]
... })
>>> # or
>>> ds = datasets.Dataset.from_dict({
... "text": ["my example"],
... "prediction": [[{"text": "mi ejemplo", "score": 0.9}]]
... })
>>> read_datasets(ds, task="Text2Text")
"""
if isinstance(task, str):
task = TaskType(task)
if task is TaskType.text_classification:
return DatasetForTextClassification.from_datasets(dataset, **kwargs)
if task is TaskType.token_classification:
return DatasetForTokenClassification.from_datasets(dataset, **kwargs)
if task is TaskType.text2text:
return DatasetForText2Text.from_datasets(dataset, **kwargs)
raise NotImplementedError("Reading a datasets Dataset is not implemented for the given task!")
def read_pandas(dataframe: pd.DataFrame, task: Union[str, TaskType]) -> Dataset:
"""Reads a pandas DataFrame and returns a argilla Dataset
Columns not supported by the :mod:`Record <argilla.client.models>` instance corresponding
with the task are ignored.
Args:
dataframe: Dataframe to be read in.
task: Task for the dataset, one of: ["TextClassification", "TokenClassification", "Text2Text"]
Returns:
A argilla dataset for the given task.
Examples:
>>> # Read text classification records from a pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({
... "inputs": ["example"],
... "prediction": [
... [("LABEL1", 0.9), ("LABEL2", 0.1)]
... ]
... })
>>> read_pandas(df, task="TextClassification")
>>>
>>> # Read token classification records from a datasets Dataset
>>> df = pd.DataFrame({
... "text": ["my example"],
... "tokens": [["my", "example"]],
... "prediction": [
... [("LABEL1", 3, 10)]
... ]
... })
>>> read_pandas(df, task="TokenClassification")
>>>
>>> # Read text2text records from a datasets Dataset
>>> df = pd.DataFrame({
... "text": ["my example"],
... "prediction": [["mi ejemplo", "ejemplo mio"]]
... })
>>> # or
>>> ds = pd.DataFrame({
... "text": ["my example"],
... "prediction": [[("mi ejemplo", 0.9)]]
... })
>>> read_pandas(df, task="Text2Text")
"""
if isinstance(task, str):
task = TaskType(task)
if task is TaskType.text_classification:
return DatasetForTextClassification.from_pandas(dataframe)
if task is TaskType.token_classification:
return DatasetForTokenClassification.from_pandas(dataframe)
if task is TaskType.text2text:
return DatasetForText2Text.from_pandas(dataframe)
raise NotImplementedError("Reading a pandas DataFrame is not implemented for the given task!")
class WrongRecordTypeError(Exception):
pass
| [
"PLACEHOLDER: PLACEHOLDER",
", "
] |
2024-01-10 | Monacrylic/Interactive-Lab-Hub | Lab%203~peace_translater.py | import argparse
import queue
import sys
import sounddevice as sd
import json
import subprocess
from vosk import Model, KaldiRecognizer
import openai
openai.api_key = "YOUR API KEY (HIDDEN FOR COMMIT)"
q = queue.Queue()
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
pass
# print(status, file=sys.stderr)
q.put(bytes(indata))
def process_sentence(sentence):
return f'[Post Processed] {sentence}'
def translate_text(text):
#prompt = (f"Rewrite the following text to be very humble, polite, and politically correct:\n"
#f"text: {text}\n")
prompt = (f"You are a peace translater, I will provide you with a text containing angry ang negative sentiments. And you will translate that text to a positive, friendly, constructive, and polite tone without any swear words:\n"
f"text: {text}\n"
f"Your translation of this text: \n")
response = openai.Completion.create(
engine="text-davinci-002",
#engine="gpt-3.5-turbo",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
# print(response.choices)
peaceful_text = response.choices[0].text.strip()
return peaceful_text
def speak_sentence(sentence):
subprocess.check_output(f'./speech-scripts/googletts_arg.sh "{sentence}"', shell=True, stderr=subprocess.PIPE, universal_newlines=True)
sentence = ''
device_info = sd.query_devices(None, "input")
# soundfile expects an int, sounddevice provides a float:
samplerate = int(device_info["default_samplerate"])
model = Model(lang="en-us")
with sd.RawInputStream(samplerate=samplerate, blocksize = 8000, device=1,
dtype="int16", channels=1, callback=callback):
print("#" * 80)
print("Press Ctrl+C to stop the recording")
print("#" * 80)
rec = KaldiRecognizer(model, samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
recognition = json.loads(rec.Result())["text"]
print(recognition)
# print(rec.Result())
processed_sentence = translate_text(recognition)
print(processed_sentence)
speak_sentence(processed_sentence)
else:
#print(rec.PartialResult())
pass
| [
"You are a peace translater, I will provide you with a text containing angry ang negative sentiments. And you will translate that text to a positive, friendly, constructive, and polite tone without any swear words:\ntext: PLACEHOLDER\nYour translation of this text: \n"
] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~services~cypher_qa.py | from langchain.chains import GraphCypherQAChain
from app.llm import Llm
from components.base_component import BaseComponent
from datalayer.Neo4jDumper import Neo4jDumper
class CypherQa(BaseComponent):
def __init__(self, model_name):
super().__init__('cypher_qa')
# instantiating the openai llm model and neo4j connection
self.neo4j_instance = Neo4jDumper(config_path='app/config.yml')
self.open_ai_llm = Llm(model=model_name)
# schema = utils.read_yaml_file('services/schemaN.yml')
# graph_schema = construct_schema(schema,[],[])
print(self.neo4j_instance.graph.schema)
self.cypher_chain = GraphCypherQAChain.from_llm(
cypher_llm=self.open_ai_llm.llm,
qa_llm=self.open_ai_llm.llm,
graph=self.neo4j_instance.graph,
# validate_cypher=True, # Validate relationship directions
verbose=True,
)
def run(self, text):
return self.cypher_chain.run(text)
| [] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~datalayer~KnowledgeGraph.py | from typing import List, Optional
from langchain.graphs.graph_document import (
Node as BaseNode,
Relationship as BaseRelationship
)
from langchain.pydantic_v1 import Field, BaseModel
class Property(BaseModel):
"""A single property consisting of key and value"""
key: str = Field(..., description="key")
value: str = Field(..., description="value")
class Node(BaseNode):
properties: Optional[List[Property]] = Field(
None, description="List of node properties")
class Relationship(BaseRelationship):
properties: Optional[List[Property]] = Field(
None, description="List of relationship properties"
)
class KnowledgeGraph(BaseModel):
"""Generate a knowledge graph with entities and relationships."""
nodes: List[Node] = Field(
..., description="List of nodes in the knowledge graph")
rels: List[Relationship] = Field(
..., description="List of relationships in the knowledge graph"
)
def format_property_key(s: str) -> str:
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return "".join([first_word] + capitalized_words)
def props_to_dict(props) -> dict:
"""Convert properties to a dictionary."""
properties = {}
if not props:
return properties
for p in props:
properties[format_property_key(p.key)] = p.value
return properties
def map_to_base_node(node: Node) -> BaseNode:
"""Map the KnowledgeGraph Node to the base Node."""
properties = props_to_dict(node.properties) if node.properties else {}
# Add name property for better Cypher statement generation
properties["name"] = node.id.title()
return BaseNode(
id=node.id.title(), type=node.type.capitalize(), properties=properties
)
def map_to_base_relationship(rel: Relationship) -> BaseRelationship:
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(
source=source, target=target, type=rel.type, properties=properties
)
| [] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~datalayer~Neo4jDumper.py | from langchain.graphs import Neo4jGraph
from langchain.graphs.graph_document import GraphDocument
from neo4j import GraphDatabase
from app import utils
from components.base_component import BaseComponent
from datalayer.KnowledgeGraph import map_to_base_node, map_to_base_relationship
class Neo4jDumper(BaseComponent):
def __init__(self, config_path):
super().__init__('Neo4jDumper')
config = utils.read_yaml_file(config_path)
self.uri = config.get('neo4j').get('uri')
self.username = config.get('neo4j').get('username')
self.password = config.get('neo4j').get('password')
self.graph = Neo4jGraph(
url=self.uri, username=self.username, password=self.password
)
def dump_data(self, tx, data):
for key, value in data.items():
# Create a node for each key-value pair
tx.run(query="CREATE (n:Node {key: $key, value: $value})", key=key, value=value)
self.logger.info(f"Dumped data for {key}: {value} to neo4j")
def run(self, data):
try:
with GraphDatabase.driver(self.uri, auth=(self.username, self.password)) as driver:
with driver.session() as session:
self.dump_data(session, data)
self.logger.info("Neo4j database connected successfully. and data dumped successfully.")
except Exception as e:
self.logger.error(f"Error while connecting to neo4j: {str(e)}")
finally:
session.close()
# New implementation using graph document
def run2(self, data, document):
try:
graph = Neo4jGraph(
url=self.uri, username=self.username, password=self.password
)
# Construct a graph document
graph_document = GraphDocument(
nodes=[map_to_base_node(node) for node in data.nodes],
relationships=[map_to_base_relationship(rel) for rel in data.rels],
source=document
)
# Store information into a graph
graph.add_graph_documents([graph_document])
except Exception as e:
self.logger.error(f"Error while connecting to neo4j: {str(e)}")
| [] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~services~Identity_retrival_for_html.py | from typing import Union, List
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import BeautifulSoupTransformer
from app import utils
from app.llm import Llm
from components.base_component import BaseComponent
from datalayer.Neo4jDumper import Neo4jDumper
class NameIdentityRetrievalForHtml(BaseComponent):
def __init__(self, model_name, data_path):
"""
The __init__ function is called when the class is instantiated.
It sets up the initial values of all attributes for an instance of a class.
The self parameter refers to the current instance of a class, and it's required by Python.
:param self: Represent the instance of the class
:param model_name: Specify the model name that we want to use for our predictions
:param data_path: Read the yaml file which contains the links to be scraped
:return: Nothing
"""
super().__init__('NameIdentityRetrievalForHtml')
self.sources = utils.read_yaml_file(data_path)
self.html_sources = self.sources.get('link', [])
# instantiating the openai llm model and neo4j connection
self.neo4j_instance = Neo4jDumper(config_path='app/config.yml')
self.open_ai_llm = Llm(model=model_name)
def run_async(self, **kwargs):
"""
The run_async function is used to run the pipeline asynchronously.
It takes in a list of html sources and extracts knowledge graph from them using openai api.
The extracted knowledge graph is then dumped into neo4j database.
:param self: Represent the instance of the object itself
:param **kwargs: Pass a variable number of keyword arguments to a function
:return: A list of all the knowledge graphs extracted from the html sources
"""
for link in self.html_sources:
loader =AsyncHtmlLoader(link)
html = loader.load()
# html = loader.load()
bs_transformer = BeautifulSoupTransformer()
docs_transformed = bs_transformer.transform_documents(html, tags_to_extract=["table"])
self.logger.info(docs_transformed[0].page_content[0:500])
# setting up openai model and extracting knowledge graph
self.logger.info(f'loading model {self.open_ai_llm}')
# just sending last few lines of csv as the token limit is limited of openai api free version.
# model should be changed to claude2 (Anthropic) or premium openai api key should be used.
# response = self.open_ai_llm.extract_and_store_graph(document=docs_transformed[0])
tokens_cap = len(docs_transformed[0].page_content) - 4
response = self.open_ai_llm.run(input_text=docs_transformed[0].page_content[tokens_cap:])
# instantiating neo4jBD and dumping the knowledge graph
self.neo4j_instance.run(data=response)
self.logger.info(f'knowledge graph populated successfully for data source: {link}')
def run(self, input: Union[str, List[float]]) -> str:
pass
| [] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~services~Identity_retrival_for_csv.py | from langchain.document_loaders.csv_loader import CSVLoader
from app import utils
from app.llm import Llm
from components.base_component import BaseComponent
from datalayer.Neo4jDumper import Neo4jDumper
class NameIdentityRetrievalForCsv(BaseComponent):
def __init__(self, model_name, data_path):
"""
The __init__ function is called when the class is instantiated.
It sets up the instance of the class, and defines all its attributes.
The self parameter refers to an instance of a class, and it's required in order for Python to know which object you're referring to.
:param self: Represent the instance of the class
:param model_name: Instantiate the openai llm model
:param data_path: Read the yaml file which contains the path to all csv files
:return: The instance of the class
"""
super().__init__('NameIdentityRetrievalForCsv')
self.sources = utils.read_yaml_file(data_path)
self.csv_sources = self.sources.get('csv', [])
# instantiating the openai llm model and neo4j connection
self.neo4j_instance = Neo4jDumper(config_path='app/config.yml')
self.open_ai_llm = Llm(model=model_name)
def run(self, **kwargs):
"""
The run function is the main function of this module. It takes in a list of csv files and extracts knowledge graph from them using openai api.
The knowledge graph is then dumped into neo4j database.
:param self: Represent the instance of the class
:return: A tuple of the following:
"""
for csvfile in self.csv_sources:
# loading the csv using langchain document loader for csv
loader = CSVLoader(file_path=csvfile)
data = loader.load()
# setting up openai model and extracting knowledge graph
self.logger.info(f'loading model {self.open_ai_llm}')
# just sending last few lines of csv as the token limit is limited of openai api free version.
# model should be changed to claude2 (Anthropic) or premium openai api key should be used.
# response = self.open_ai_llm.extract_and_store_graph(document=data[-1])
response = self.open_ai_llm.run(input_text=data[-1])
# instantiating neo4jBD and dumping the knowledge graph
self.neo4j_instance.run(data=response)
self.logger.info(f'knowledge graph populated successfully for data source: {csvfile}')
| [] |
2024-01-10 | prvnsingh/LLM-WebToGraph | src~app~llm_prompter.py | import os
from typing import List, Optional
import backoff
import openai # for OpenAI API calls
from dotenv import load_dotenv
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import Document
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
from app import utils
from components.base_component import BaseComponent
from datalayer.KnowledgeGraph import KnowledgeGraph
load_dotenv()
def get_schema():
"""
The get_schema function reads the schema.yml file and returns a dictionary of the schema.
:return: The schema
:doc-author: Trelent
"""
schema = utils.read_yaml_file('services/schema.yml')
return schema
class LlmPrompter(BaseComponent):
def __init__(self, model: str):
super().__init__('LlmPrompter')
self.model = model
# for huggingface hub models
# self.llm = HuggingFaceHub(repo_id='ValiantLabs/ShiningValiant', task='text-generation',
# huggingfacehub_api_token=os.getenv('HF_AUTH_TOKEN'),
# model_kwargs={"temperature": 0, "max_length": 64})
self.llm = ChatOpenAI(temperature=0, model_name=model, openai_api_key=os.getenv('OPENAI_API_KEY'))
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def run(self, document: Document,
nodes: Optional[List[str]] = None,
rels: Optional[List[str]] = None):
return self.extract_and_store_graph(document, nodes, rels)
def get_extraction_chain(self,
allowed_nodes: Optional[List[str]] = None,
allowed_rels: Optional[List[str]] = None
):
prompt = ChatPromptTemplate.from_messages(
[(
"system",
f"""# Knowledge Graph Instructions for GPT-4
## 1. Overview
You are a top-tier algorithm designed for extracting information in structured formats to build a knowledge graph.
- **Nodes** represent entities and concepts. They're akin to largest infrastructure projects nodes.
- The aim is to achieve simplicity and clarity in the knowledge graph, making it accessible for a vast audience.
## 2. Labeling Nodes
- **Consistency**: Ensure you use basic or elementary types for node labels.
- For example, when you identify an entity representing a person, always label it as **"person"**. Avoid using more specific terms like "mathematician" or "scientist".
- **Node IDs**: Never utilize integers as node IDs. Node IDs should be names or human-readable identifiers found in the text.
{'- **Allowed Node Labels:**' + ", ".join(allowed_nodes) if allowed_nodes else ""}
{'- **Allowed Relationship Types**:' + ", ".join(allowed_rels) if allowed_rels else ""}
## 3. Handling Numerical Data and Dates
- Numerical data, like age or other related information, should be incorporated as attributes or properties of the respective nodes.
- **No Separate Nodes for Dates/Numbers**: Do not create separate nodes for dates or numerical values. Always attach them as attributes or properties of nodes.
- **Property Format**: Properties must be in a key-value format.
- **Quotation Marks**: Never use escaped single or double quotes within property values.
## 4. Coreference Resolution
- **Maintain Entity Consistency**: When extracting entities, it's vital to ensure consistency.
If an entity, such as "John Doe", is mentioned multiple times in the text but is referred to by different names or pronouns (e.g., "Joe", "he"),
always use the most complete identifier for that entity throughout the knowledge graph. In this example, use "John Doe" as the entity ID.
Remember, the knowledge graph should be coherent and easily understandable, so maintaining consistency in entity references is crucial.
## 5. Strict Compliance
Adhere to the rules strictly. Non-compliance will result in termination.
"""),
("human", "Use the given format to extract information from the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
])
return create_structured_output_chain(KnowledgeGraph, self.llm, prompt, verbose=False)
def extract_and_store_graph(self,
document: Document,
nodes: Optional[List[str]] = None,
rels: Optional[List[str]] = None):
# Extract graph data using OpenAI functions
extract_chain = self.get_extraction_chain(nodes, rels)
data = extract_chain.run(document.page_content)
return data
| [
"- **Allowed Node Labels:**",
"human",
"- **Allowed Relationship Types**:",
"Use the given format to extract information from the following input: {input}",
"Tip: Make sure to answer in the correct format",
", "
] |
2024-01-10 | sdiazpier/tvb-framework | tvb~adapters~analyzers~node_coherence_adapter.py | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Adapter that uses the traits module to generate interfaces for FFT Analyzer.
.. moduleauthor:: Stuart A. Knock <[email protected]>
.. moduleauthor:: Lia Domide <[email protected]>
"""
import numpy
from tvb.analyzers.node_coherence import NodeCoherence
from tvb.core.adapters.abcadapter import ABCAsynchronous
from tvb.datatypes.time_series import TimeSeries
from tvb.datatypes.spectral import CoherenceSpectrum
from tvb.basic.traits.util import log_debug_array
from tvb.basic.filters.chain import FilterChain
from tvb.basic.logger.builder import get_logger
LOG = get_logger(__name__)
class NodeCoherenceAdapter(ABCAsynchronous):
""" TVB adapter for calling the NodeCoherence algorithm. """
_ui_name = "Cross coherence of nodes"
_ui_description = "Compute Node Coherence for a TimeSeries input DataType."
_ui_subsection = "coherence"
def get_input_tree(self):
"""
Return a list of lists describing the interface to the analyzer. This
is used by the GUI to generate the menus and fields necessary for
defining a simulation.
"""
algorithm = NodeCoherence()
algorithm.trait.bound = self.INTERFACE_ATTRIBUTES_ONLY
tree = algorithm.interface[self.INTERFACE_ATTRIBUTES]
for node in tree:
if node['name'] == 'time_series':
node['conditions'] = FilterChain(fields=[FilterChain.datatype + '._nr_dimensions'],
operations=["=="], values=[4])
return tree
def get_output(self):
return [CoherenceSpectrum]
def configure(self, time_series, nfft=None):
"""
Store the input shape to be later used to estimate memory usage.
Also create the algorithm instance.
"""
self.input_shape = time_series.read_data_shape()
log_debug_array(LOG, time_series, "time_series")
##-------------------- Fill Algorithm for Analysis -------------------##
self.algorithm = NodeCoherence()
if nfft is not None:
self.algorithm.nfft = nfft
def get_required_memory_size(self, **kwargs):
"""
Return the required memory to run this algorithm.
"""
used_shape = (self.input_shape[0], 1, self.input_shape[2], self.input_shape[3])
input_size = numpy.prod(used_shape) * 8.0
output_size = self.algorithm.result_size(used_shape)
return input_size + output_size
def get_required_disk_size(self, **kwargs):
"""
Returns the required disk size to be able to run the adapter (in kB).
"""
used_shape = (self.input_shape[0], 1, self.input_shape[2], self.input_shape[3])
return self.array_size2kb(self.algorithm.result_size(used_shape))
def launch(self, time_series, nfft=None):
"""
Launch algorithm and build results.
"""
##--------- Prepare a CoherenceSpectrum object for result ------------##
coherence = CoherenceSpectrum(source=time_series,
nfft=self.algorithm.nfft,
storage_path=self.storage_path)
##------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
node_slice = [slice(self.input_shape[0]), None, slice(self.input_shape[2]), slice(self.input_shape[3])]
##---------- Iterate over slices and compose final result ------------##
small_ts = TimeSeries(use_storage=False)
small_ts.sample_rate = time_series.sample_rate
partial_coh = None
for var in range(self.input_shape[1]):
node_slice[1] = slice(var, var + 1)
small_ts.data = time_series.read_data_slice(tuple(node_slice))
self.algorithm.time_series = small_ts
partial_coh = self.algorithm.evaluate()
coherence.write_data_slice(partial_coh)
coherence.frequency = partial_coh.frequency
coherence.close_file()
return coherence
| [] |
2024-01-10 | sdiazpier/tvb-framework | tvb~tests~framework~datatypes~datatypes_factory.py | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
This module contains methods for creating persisted data-types for tests.
.. moduleauthor:: Calin Pavel <[email protected]>
"""
import json
import numpy
import time
from tvb.config import SIMULATOR_MODULE, SIMULATOR_CLASS
from tvb.core.entities import model
from tvb.core.entities.storage import dao
from tvb.core.entities.file.files_helper import FilesHelper
from tvb.core.entities.transient.structure_entities import DataTypeMetaData
from tvb.core.adapters.abcadapter import ABCAdapter
from tvb.core.services.flow_service import FlowService
from tvb.core.services.project_service import ProjectService
from tvb.core.services.operation_service import OperationService
from tvb.datatypes.connectivity import Connectivity
from tvb.datatypes.surfaces import CorticalSurface
from tvb.datatypes.region_mapping import RegionMapping
from tvb.datatypes.time_series import TimeSeries, TimeSeriesEEG, TimeSeriesRegion
from tvb.datatypes.graph import Covariance, ConnectivityMeasure
from tvb.datatypes.spectral import CoherenceSpectrum
from tvb.datatypes.temporal_correlations import CrossCorrelation
from tvb.datatypes.mode_decompositions import IndependentComponents
from tvb.datatypes.mapped_values import DatatypeMeasure
from tvb.tests.framework.datatypes.datatype1 import Datatype1
from tvb.tests.framework.datatypes.datatype2 import Datatype2
from tvb.tests.framework.adapters.storeadapter import StoreAdapter
class DatatypesFactory():
"""
This class provides a set of methods that helps user to create
different data types for testing.
These data types will be automatically stored in DB and file system if needed.
"""
USER_FULL_NAME = "Datatype Factory User"
DATATYPE_STATE = "RAW_DATA"
DATATYPE_DATA = ["test", "for", "datatypes", "factory"]
DATATYPE_MEASURE_METRIC = {'v': 3}
RANGE_1 = ["row1", [1, 2, 3]]
RANGE_2 = ["row2", [0.1, 0.3, 0.5]]
user = None
project = None
operation = None
def __init__(self):
micro_postfix = "_%d" % int(time.time() * 1000000)
# Here create all structures needed later for data types creation
self.files_helper = FilesHelper()
# First create user
user = model.User("datatype_factory_user" + micro_postfix, "test_pass",
"[email protected]" + micro_postfix, True, "user")
self.user = dao.store_entity(user)
# Now create a project
project_service = ProjectService()
data = dict(name='DatatypesFactoryProject' + micro_postfix, description='test_desc', users=[])
self.project = project_service.store_project(self.user, True, None, **data)
# Create algorithm
alg_category = model.AlgorithmCategory('one', True)
dao.store_entity(alg_category)
alg_group = model.AlgorithmGroup("test_module1", "classname1", alg_category.id)
dao.store_entity(alg_group)
algorithm = model.Algorithm(alg_group.id, 'id', name='', req_data='', param_name='', output='')
self.algorithm = dao.store_entity(algorithm)
#Create an operation
self.meta = {DataTypeMetaData.KEY_SUBJECT: self.USER_FULL_NAME,
DataTypeMetaData.KEY_STATE: self.DATATYPE_STATE}
operation = model.Operation(self.user.id, self.project.id, self.algorithm.id, 'test parameters',
meta=json.dumps(self.meta), status=model.STATUS_FINISHED,
method_name=ABCAdapter.LAUNCH_METHOD)
self.operation = dao.store_entity(operation)
def get_project(self):
"""
Return project to which generated data types are assigned
"""
return self.project
def get_operation(self):
"""
Return operation to which generated data types are assigned
"""
return self.operation
def get_user(self):
"""
Return user to which generated data types are assigned
"""
return self.user
def _store_datatype(self, data_type, operation_id=None):
"""
Launch adapter to store a create a persistent DataType.
"""
operation_id = operation_id or self.operation.id
data_type.type = data_type.__class__.__name__
data_type.module = data_type.__class__.__module__
data_type.subject = self.USER_FULL_NAME
data_type.state = self.DATATYPE_STATE
data_type.set_operation_id(operation_id)
adapter_instance = StoreAdapter([data_type])
operation = dao.get_operation_by_id(operation_id)
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return data_type
def create_simple_datatype(self, subject=USER_FULL_NAME, state=DATATYPE_STATE):
"""
This method creates a simple data type
"""
datatype_inst = Datatype1()
self._fill_datatype(datatype_inst, subject, state)
# Store data type
return self._store_datatype(datatype_inst)
def create_datatype_with_storage(self, subject=USER_FULL_NAME, state=DATATYPE_STATE,
data=DATATYPE_DATA, operation_id=None):
"""
This method creates and stores a data type which imply storage on the file system.
"""
datatype_inst = Datatype2()
self._fill_datatype(datatype_inst, subject, state, operation_id)
datatype_inst.string_data = data
return self._store_datatype(datatype_inst, operation_id)
def _fill_datatype(self, datatype, subject, state, operation_id=None):
"""
This method sets some common attributes on dataType
"""
operation_id = operation_id or self.operation.id
datatype.subject = subject
datatype.state = state
# Set_operation_id also sets storage_path attribute
datatype.set_operation_id(operation_id)
def __create_operation(self):
"""
Create a operation entity. Return the operation, algo_id and the storage path.
"""
meta = {DataTypeMetaData.KEY_SUBJECT: "John Doe", DataTypeMetaData.KEY_STATE: "RAW_DATA"}
algorithm, algo_group = FlowService().get_algorithm_by_module_and_class(SIMULATOR_MODULE, SIMULATOR_CLASS)
operation = model.Operation(self.user.id, self.project.id, algo_group.id, json.dumps(''), meta=json.dumps(meta),
status=model.STATUS_STARTED, method_name=ABCAdapter.LAUNCH_METHOD)
operation = dao.store_entity(operation)
storage_path = FilesHelper().get_project_folder(self.project, str(operation.id))
return operation, algorithm.id, storage_path
def create_connectivity(self):
"""
Create a connectivity that will be used in "non-dummy" burst launches (with the actual simulator).
"""
operation, algo_id, storage_path = self.__create_operation()
connectivity = Connectivity(storage_path=storage_path)
connectivity.weights = numpy.ones((74, 74))
connectivity.centres = numpy.ones((74, 3))
adapter_instance = StoreAdapter([connectivity])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return algo_id, connectivity
def create_timeseries(self, connectivity, ts_type=None, sensors=None):
"""
Create a stored TimeSeries entity.
"""
operation, _, storage_path = self.__create_operation()
if ts_type == "EEG":
time_series = TimeSeriesEEG(storage_path=storage_path, sensors=sensors)
else:
rm = dao.get_generic_entity(RegionMapping, connectivity.gid, '_connectivity')
if len(rm) < 1:
rm = None
else:
rm = rm[0]
time_series = TimeSeriesRegion(storage_path=storage_path, connectivity=connectivity, region_mapping=rm)
data = numpy.random.random((10, 10, 10, 10))
time_series.write_data_slice(data)
time_series.write_time_slice(numpy.arange(10))
adapter_instance = StoreAdapter([time_series])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
time_series = dao.get_datatype_by_gid(time_series.gid)
return time_series
def create_covariance(self, time_series):
"""
:returns: a stored DataType Covariance.
"""
operation, _, storage_path = self.__create_operation()
covariance = Covariance(storage_path=storage_path, source=time_series)
covariance.write_data_slice(numpy.random.random((10, 10, 10)))
adapter_instance = StoreAdapter([covariance])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return covariance
def create_crosscoherence(self, time_series):
"""
:returns: a stored entity of type CoherenceSpectrum
"""
operation, _, storage_path = self.__create_operation()
partial_coh = CoherenceSpectrum(array_data=numpy.random.random((10, 10, 10, 10)), use_storage=False)
coherence = CoherenceSpectrum(source=time_series, storage_path=storage_path, frequency=0.1, nfft=256)
coherence.write_data_slice(partial_coh)
coherence.close_file()
adapter_instance = StoreAdapter([coherence])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return coherence
def create_crosscorrelation(self, time_series):
"""
:returns: `CrossCorrelation` stored entity.
"""
operation, _, storage_path = self.__create_operation()
partial_corr = CrossCorrelation(array_data=numpy.random.random((10, 10, 10, 10, 10)), use_storage=False)
crossc = CrossCorrelation(source=time_series, storage_path=storage_path, time=range(10))
crossc.write_data_slice(partial_corr)
crossc.close_file()
adapter_instance = StoreAdapter([crossc])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return crossc
def create_surface(self):
"""
Create a dummy surface entity.
:returns: (Algorithm Identifier, stored Surface entity)
"""
operation, algo_id, storage_path = self.__create_operation()
surface = CorticalSurface(storage_path=storage_path)
surface.vertices = numpy.array([[-10, 0, 0],
[0, 0, -10],
[10, 0, 0],
[0, 10, 0]], dtype=float)
surface.triangles = numpy.array([[0, 1, 2],
[0, 1, 3],
[1, 2, 3],
[0, 2, 3]], dtype=int)
surface.number_of_triangles = 4
surface.number_of_vertices = 4
surface.triangle_normals = numpy.ones((4, 3))
surface.vertex_normals = numpy.ones((4, 3))
surface.zero_based_triangles = True
surface.validate()
adapter_instance = StoreAdapter([surface])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return algo_id, surface
def create_connectivity_measure(self, connectivity):
"""
:returns: persisted entity ConnectivityMeasure
"""
operation, _, storage_path = self.__create_operation()
conn_measure = ConnectivityMeasure(storage_path=storage_path)
conn_measure.connectivity = connectivity
adapter_instance = StoreAdapter([conn_measure])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return conn_measure
def create_datatype_measure(self, analyzed_entity, operation=None, storage_path=None):
"""
:return: persisted DatatypeMeasure
"""
if operation is None:
operation, _, storage_path = self.__create_operation()
measure = DatatypeMeasure(storage_path=storage_path, metrics=self.DATATYPE_MEASURE_METRIC)
measure.analyzed_datatype = analyzed_entity
adapter_instance = StoreAdapter([measure])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return measure
def create_ICA(self, timeseries):
"""
:returns: persisted entity IndependentComponents
"""
operation, _, storage_path = self.__create_operation()
partial_ts = TimeSeries(use_storage=False)
partial_ts.data = numpy.random.random((10, 10, 10, 10))
partial_ica = IndependentComponents(source=partial_ts,
component_time_series=numpy.random.random((10, 10, 10, 10)),
prewhitening_matrix=numpy.random.random((10, 10, 10, 10)),
unmixing_matrix=numpy.random.random((10, 10, 10, 10)),
n_components=10, use_storage=False)
ica = IndependentComponents(source=timeseries, n_components=10, storage_path=storage_path)
ica.write_data_slice(partial_ica)
adapter_instance = StoreAdapter([ica])
OperationService().initiate_prelaunch(operation, adapter_instance, {})
return ica
def create_datatype_group(self, subject=USER_FULL_NAME, state=DATATYPE_STATE, ):
"""
This method creates, stores and returns a DataTypeGroup entity.
"""
group = model.OperationGroup(self.project.id, ranges=[json.dumps(self.RANGE_1), json.dumps(self.RANGE_2)])
group = dao.store_entity(group)
group_ms = model.OperationGroup(self.project.id, ranges=[json.dumps(self.RANGE_1), json.dumps(self.RANGE_2)])
group_ms = dao.store_entity(group_ms)
datatype_group = model.DataTypeGroup(group, subject=subject, state=state, operation_id=self.operation.id)
# Set storage path, before setting data
datatype_group.storage_path = self.files_helper.get_project_folder(self.project, str(self.operation.id))
datatype_group = dao.store_entity(datatype_group)
dt_group_ms = model.DataTypeGroup(group_ms, subject=subject, state=state, operation_id=self.operation.id)
# Set storage path, before setting data
dt_group_ms.storage_path = self.files_helper.get_project_folder(self.project, str(self.operation.id))
dao.store_entity(dt_group_ms)
# Now create some data types and add them to group
for range_val1 in self.RANGE_1[1]:
for range_val2 in self.RANGE_2[1]:
operation = model.Operation(self.user.id, self.project.id, self.algorithm.id, 'test parameters',
meta=json.dumps(self.meta), status=model.STATUS_FINISHED,
method_name=ABCAdapter.LAUNCH_METHOD,
range_values=json.dumps({self.RANGE_1[0]: range_val1,
self.RANGE_2[0]: range_val2}))
operation.fk_operation_group = group.id
operation = dao.store_entity(operation)
datatype = self.create_datatype_with_storage(operation_id=operation.id)
datatype.number1 = range_val1
datatype.number2 = range_val2
datatype.fk_datatype_group = datatype_group.id
datatype.set_operation_id(operation.id)
dao.store_entity(datatype)
op_ms = model.Operation(self.user.id, self.project.id, self.algorithm.id, 'test parameters',
meta=json.dumps(self.meta), status=model.STATUS_FINISHED,
method_name=ABCAdapter.LAUNCH_METHOD,
range_values=json.dumps({self.RANGE_1[0]: range_val1,
self.RANGE_2[0]: range_val2}))
op_ms.fk_operation_group = group_ms.id
op_ms = dao.store_entity(op_ms)
self.create_datatype_measure(datatype, op_ms,
FilesHelper().get_project_folder(self.project, str(op_ms.id)))
return datatype_group | [] |
2024-01-10 | CFOCST/azure-open-ai-embeddings-qna-test-CF | demo~helper.py | import os
import openai
from dotenv import load_dotenv
from openai.embeddings_utils import get_embedding
from tenacity import retry, wait_random_exponential, stop_after_attempt
import pandas as pd
import numpy as np
from redis.commands.search.query import Query
from redis import Redis
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_type = 'azure'
openai.api_version = '2022-12-01'
completion_model = os.getenv("OPENAI_ENGINES").split(',')[0]
embedding_model = os.getenv("OPENAI_EMBEDDINGS_ENGINE_DOC")
question_prompt = os.getenv("QUESTION_PROMPT").replace(r'\n', '\n')
number_of_embeddings_for_qna = int(os.getenv("NUMBER_OF_EMBEDDINGS_FOR_QNA", 1))
redis_conn = Redis(host=os.getenv('REDIS_ADDRESS'), port=int(os.environ.get('REDIS_PORT','6379')), password=os.getenv('REDIS_PASSWORD'))
index_name = "embeddings-index"
prompt_index_name = "prompt-index"
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text) -> list[float]:
text = text.replace("\n", " ")
return openai.Embedding.create(input=text, engine=embedding_model)["data"][0]["embedding"]
def find_matching_vectors_in_redis(np_vector:np.array, return_fields: list=[], search_type: str="KNN", number_of_results: int=20, vector_field_name: str="embeddings"):
base_query = f'*=>[{search_type} {number_of_results} @{vector_field_name} $vec_param AS vector_score]'
query = Query(base_query)\
.sort_by("vector_score")\
.paging(0, number_of_results)\
.return_fields(*return_fields)\
.dialect(2)
params_dict = {"vec_param": np_vector.astype(dtype=np.float32).tobytes()}
results = redis_conn.ft(index_name).search(query, params_dict)
return pd.DataFrame(list(map(lambda x: {'id' : x.id, 'text': x.text, 'filename': x.filename, 'vector_score': x.vector_score}, results.docs)))
def search_semantic_redis(search_query, pprint=True):
embedding = get_embedding(search_query)
res = find_matching_vectors_in_redis(np.array(embedding))
if pprint:
for r in res:
print(r[:200])
print()
return res.reset_index()
def get_semantic_answer(question):
# question += "\n"
res = search_semantic_redis(question, pprint=False)
if len(res) == 0:
return None, "No vectors matched, try a different question."
res_text = "\n".join(res['text'][0:number_of_embeddings_for_qna])
prompt = question_prompt.replace("_QUESTION_", question)
prompt = f"{res_text}\n\n{prompt}"
response = openai.Completion.create(
engine=completion_model,
prompt=prompt,
temperature=0.0,
max_tokens=5000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None
)
print(prompt)
response = response['choices'][0]['text'].strip()
print(f"{response}\n\n\n")
return response, prompt
| [
"QUESTION_PROMPT",
"\n",
"_QUESTION_",
"prompt-index",
"PLACEHOLDER\n\nPLACEHOLDER\n\npromptcbbd2958-6c91-474a-ab5e-38b04b8cde2f",
"\\n",
"PLACEHOLDER\n\npromptf58593f3-c43e-4b94-9640-3b70f26e922d"
] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~helpers.py | from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
try:
return parser.parse(completion)
except OutputParserException as e:
raise OpenAIError(
e, "There was an issue parsing the response from the AI model."
)
async def openai_error_handler(
func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
try:
return await func(*args, **kwargs)
except ServiceUnavailableError as e:
raise OpenAIError(
e,
"OpenAI is experiencing issues. Visit "
"https://status.openai.com/ for more info.",
should_log=not settings.custom_api_key,
)
except InvalidRequestError as e:
if e.user_message.startswith("The model:"):
raise OpenAIError(
e,
f"Your API key does not have access to your current model. Please use a different model.",
should_log=not settings.custom_api_key,
)
raise OpenAIError(e, e.user_message)
except AuthenticationError as e:
raise OpenAIError(
e,
"Authentication error: Ensure a valid API key is being used.",
should_log=not settings.custom_api_key,
)
except RateLimitError as e:
if e.user_message.startswith("You exceeded your current quota"):
raise OpenAIError(
e,
f"Your API key exceeded your current quota, please check your plan and billing details.",
should_log=not settings.custom_api_key,
)
raise OpenAIError(e, e.user_message)
except Exception as e:
raise OpenAIError(
e, "There was an unexpected issue getting a response from the AI model."
)
async def call_model_with_handling(
model: BaseChatModel,
prompt: BasePromptTemplate,
args: Dict[str, str],
settings: ModelSettings,
**kwargs: Any,
) -> str:
chain = LLMChain(llm=model, prompt=prompt)
return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~task_output_parser.py | import ast
import re
from typing import List
from langchain.schema import BaseOutputParser, OutputParserException
class TaskOutputParser(BaseOutputParser[List[str]]):
"""
Extension of LangChain's BaseOutputParser
Responsible for parsing task creation output into a list of task strings
"""
completed_tasks: List[str] = []
def __init__(self, *, completed_tasks: List[str]):
super().__init__()
self.completed_tasks = completed_tasks
def parse(self, text: str) -> List[str]:
try:
array_str = extract_array(text)
all_tasks = [
remove_prefix(task) for task in array_str if real_tasks_filter(task)
]
return [task for task in all_tasks if task not in self.completed_tasks]
except Exception as e:
msg = f"Failed to parse tasks from completion '{text}'. Exception: {e}"
raise OutputParserException(msg)
def get_format_instructions(self) -> str:
return """
The response should be a JSON array of strings. Example:
["Search the web for NBA news", "Write some code to build a web scraper"]
This should be parsable by json.loads()
"""
def extract_array(input_str: str) -> List[str]:
regex = (
r"\[\s*\]|" # Empty array check
r"(\[(?:\s*(?:\"(?:[^\"\\]|\\.)*\"|\'(?:[^\'\\]|\\.)*\')\s*,?)*\s*\])"
)
match = re.search(regex, input_str)
if match is not None:
return ast.literal_eval(match[0])
else:
return handle_multiline_string(input_str)
def handle_multiline_string(input_str: str) -> List[str]:
# Handle multiline string as a list
processed_lines = [
re.sub(r".*?(\d+\..+)", r"\1", line).strip()
for line in input_str.split("\n")
if line.strip() != ""
]
# Check if there is at least one line that starts with a digit and a period
if any(re.match(r"\d+\..+", line) for line in processed_lines):
return processed_lines
else:
raise RuntimeError(f"Failed to extract array from {input_str}")
def remove_prefix(input_str: str) -> str:
prefix_pattern = (
r"^(Task\s*\d*\.\s*|Task\s*\d*[-:]?\s*|Step\s*\d*["
r"-:]?\s*|Step\s*[-:]?\s*|\d+\.\s*|\d+\s*[-:]?\s*|^\.\s*|^\.*)"
)
return re.sub(prefix_pattern, "", input_str, flags=re.IGNORECASE)
def real_tasks_filter(input_str: str) -> bool:
no_task_regex = (
r"^No( (new|further|additional|extra|other))? tasks? (is )?("
r"required|needed|added|created|inputted).*"
)
task_complete_regex = r"^Task (complete|completed|finished|done|over|success).*"
do_nothing_regex = r"^(\s*|Do nothing(\s.*)?)$"
return (
not re.search(no_task_regex, input_str, re.IGNORECASE)
and not re.search(task_complete_regex, input_str, re.IGNORECASE)
and not re.search(do_nothing_regex, input_str, re.IGNORECASE)
)
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~tools~reason.py | from typing import Any
from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from reworkd_platform.web.api.agent.tools.tool import Tool
class Reason(Tool):
description = (
"Reason about task via existing information or understanding. "
"Make decisions / selections from options."
)
async def call(
self, goal: str, task: str, input_str: str, *args: Any, **kwargs: Any
) -> FastAPIStreamingResponse:
from reworkd_platform.web.api.agent.prompts import execute_task_prompt
chain = LLMChain(llm=self.model, prompt=execute_task_prompt)
return StreamingResponse.from_chain(
chain,
{"goal": goal, "language": self.language, "task": task},
media_type="text/event-stream",
)
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~services~pinecone~pinecone.py | from __future__ import annotations
import uuid
from typing import Any, Dict, List
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from pinecone import Index # import doesnt work on plane wifi
from pydantic import BaseModel
from reworkd_platform.settings import settings
from reworkd_platform.timer import timed_function
from reworkd_platform.web.api.memory.memory import AgentMemory
OPENAI_EMBEDDING_DIM = 1536
class Row(BaseModel):
id: str
values: List[float]
metadata: Dict[str, Any] = {}
class QueryResult(BaseModel):
id: str
score: float
metadata: Dict[str, Any] = {}
class PineconeMemory(AgentMemory):
"""
Wrapper around pinecone
"""
def __init__(self, index_name: str, namespace: str = ""):
self.index = Index(settings.pinecone_index_name)
self.namespace = namespace or index_name
@timed_function(level="DEBUG")
def __enter__(self) -> AgentMemory:
self.embeddings: Embeddings = OpenAIEmbeddings(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=settings.openai_api_key,
)
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
pass
@timed_function(level="DEBUG")
def reset_class(self) -> None:
self.index.delete(delete_all=True, namespace=self.namespace)
@timed_function(level="DEBUG")
def add_tasks(self, tasks: List[str]) -> List[str]:
if len(tasks) == 0:
return []
embeds = self.embeddings.embed_documents(tasks)
if len(tasks) != len(embeds):
raise ValueError("Embeddings and tasks are not the same length")
rows = [
Row(values=vector, metadata={"text": tasks[i]}, id=str(uuid.uuid4()))
for i, vector in enumerate(embeds)
]
self.index.upsert(
vectors=[row.dict() for row in rows], namespace=self.namespace
)
return [row.id for row in rows]
@timed_function(level="DEBUG")
def get_similar_tasks(
self, text: str, score_threshold: float = 0.95
) -> List[QueryResult]:
# Get similar tasks
vector = self.embeddings.embed_query(text)
results = self.index.query(
vector=vector,
top_k=5,
include_metadata=True,
include_values=True,
namespace=self.namespace,
)
return [
QueryResult(id=row.id, score=row.score, metadata=row.metadata)
for row in getattr(results, "matches", [])
if row.score > score_threshold
]
@staticmethod
def should_use() -> bool:
return False
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~prompts.py | from langchain import PromptTemplate
# Create initial tasks using plan and solve prompting
# https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting
start_goal_prompt = PromptTemplate(
template="""You are a task creation AI called AgentGPT.
You answer in the "{language}" language. You have the following objective "{goal}".
Return a list of search queries that would be required to answer the entirety of the objective.
Limit the list to a maximum of 5 queries. Ensure the queries are as succinct as possible.
For simple questions use a single query.
Return the response as a JSON array of strings. Examples:
query: "Who is considered the best NBA player in the current season?", answer: ["current NBA MVP candidates"]
query: "How does the Olympicpayroll brand currently stand in the market, and what are its prospects and strategies for expansion in NJ, NY, and PA?", answer: ["Olympicpayroll brand comprehensive analysis 2023", "customer reviews of Olympicpayroll.com", "Olympicpayroll market position analysis", "payroll industry trends forecast 2023-2025", "payroll services expansion strategies in NJ, NY, PA"]
query: "How can I create a function to add weight to edges in a digraph using {language}?", answer: ["algorithm to add weight to digraph edge in {language}"]
query: "What is the current weather in New York?", answer: ["current weather in New York"]
query: "5 + 5?", answer: ["Sum of 5 and 5"]
query: "What is a good homemade recipe for KFC-style chicken?", answer: ["KFC style chicken recipe at home"]
query: "What are the nutritional values of almond milk and soy milk?", answer: ["nutritional information of almond milk", "nutritional information of soy milk"]""",
input_variables=["goal", "language"],
)
analyze_task_prompt = PromptTemplate(
template="""
High level objective: "{goal}"
Current task: "{task}"
Based on this information, use the best function to make progress or accomplish the task entirely.
Select the correct function by being smart and efficient. Ensure "reasoning" and only "reasoning" is in the
{language} language.
Note you MUST select a function.
""",
input_variables=["goal", "task", "language"],
)
code_prompt = PromptTemplate(
template="""
You are a world-class software engineer and an expert in all programing languages,
software systems, and architecture.
For reference, your high level goal is {goal}
Write code in English but explanations/comments in the "{language}" language.
Provide no information about who you are and focus on writing code.
Ensure code is bug and error free and explain complex concepts through comments
Respond in well-formatted markdown. Ensure code blocks are used for code sections.
Approach problems step by step and file by file, for each section, use a heading to describe the section.
Write code to accomplish the following:
{task}
""",
input_variables=["goal", "language", "task"],
)
execute_task_prompt = PromptTemplate(
template="""Answer in the "{language}" language. Given
the following overall objective `{goal}` and the following sub-task, `{task}`.
Perform the task by understanding the problem, extracting variables, and being smart
and efficient. Write a detailed response that address the task.
When confronted with choices, make a decision yourself with reasoning.
""",
input_variables=["goal", "language", "task"],
)
create_tasks_prompt = PromptTemplate(
template="""You are an AI task creation agent. You must answer in the "{language}"
language. You have the following objective `{goal}`.
You have the following incomplete tasks:
`{tasks}`
You just completed the following task:
`{lastTask}`
And received the following result:
`{result}`.
Based on this, create a single new task to be completed by your AI system such that your goal is closer reached.
If there are no more tasks to be done, return nothing. Do not add quotes to the task.
Examples:
Search the web for NBA news
Create a function to add a new vertex with a specified weight to the digraph.
Search for any additional information on Bertie W.
""
""",
input_variables=["goal", "language", "tasks", "lastTask", "result"],
)
summarize_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
Combine the following text into a cohesive document:
"{text}"
Write using clear markdown formatting in a style expected of the goal "{goal}".
Be as clear, informative, and descriptive as necessary.
You will not make up information or add any information outside of the above text.
Only use the given information and nothing more.
If there is no information provided, say "There is nothing to summarize".
""",
input_variables=["goal", "language", "text"],
)
company_context_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
Create a short description on "{company_name}".
Find out what sector it is in and what are their primary products.
Be as clear, informative, and descriptive as necessary.
You will not make up information or add any information outside of the above text.
Only use the given information and nothing more.
If there is no information provided, say "There is nothing to summarize".
""",
input_variables=["company_name", "language"],
)
summarize_pdf_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
For the given text: "{text}", you have the following objective "{query}".
Be as clear, informative, and descriptive as necessary.
You will not make up information or add any information outside of the above text.
Only use the given information and nothing more.
""",
input_variables=["query", "language", "text"],
)
summarize_with_sources_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
Answer the following query: "{query}" using the following information: "{snippets}".
Write using clear markdown formatting and use markdown lists where possible.
Cite sources for sentences via markdown links using the source link as the link and the index as the text.
Use in-line sources. Do not separately list sources at the end of the writing.
If the query cannot be answered with the provided information, mention this and provide a reason why along with what it does mention.
Also cite the sources of what is actually mentioned.
Example sentences of the paragraph:
"So this is a cited sentence at the end of a paragraph[1](https://test.com). This is another sentence."
"Stephen curry is an american basketball player that plays for the warriors[1](https://www.britannica.com/biography/Stephen-Curry)."
"The economic growth forecast for the region has been adjusted from 2.5% to 3.1% due to improved trade relations[1](https://economictimes.com), while inflation rates are expected to remain steady at around 1.7% according to financial analysts[2](https://financeworld.com)."
""",
input_variables=["language", "query", "snippets"],
)
summarize_sid_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
Parse and summarize the following text snippets "{snippets}".
Write using clear markdown formatting in a style expected of the goal "{goal}".
Be as clear, informative, and descriptive as necessary and attempt to
answer the query: "{query}" as best as possible.
If any of the snippets are not relevant to the query,
ignore them, and do not include them in the summary.
Do not mention that you are ignoring them.
If there is no information provided, say "There is nothing to summarize".
""",
input_variables=["goal", "language", "query", "snippets"],
)
chat_prompt = PromptTemplate(
template="""You must answer in the "{language}" language.
You are a helpful AI Assistant that will provide responses based on the current conversation history.
The human will provide previous messages as context. Use ONLY this information for your responses.
Do not make anything up and do not add any additional information.
If you have no information for a given question in the conversation history,
say "I do not have any information on this".
""",
input_variables=["language"],
)
| [
"You must answer in the \"{language}\" language.\n\n Create a short description on \"{company_name}\".\n Find out what sector it is in and what are their primary products.\n\n Be as clear, informative, and descriptive as necessary.\n You will not make up information or add any information outside of the above text.\n Only use the given information and nothing more.\n\n If there is no information provided, say \"There is nothing to summarize\".\n ",
"You are an AI task creation agent. You must answer in the \"{language}\"\n language. You have the following objective `{goal}`.\n\n You have the following incomplete tasks:\n `{tasks}`\n\n You just completed the following task:\n `{lastTask}`\n\n And received the following result:\n `{result}`.\n\n Based on this, create a single new task to be completed by your AI system such that your goal is closer reached.\n If there are no more tasks to be done, return nothing. Do not add quotes to the task.\n\n Examples:\n Search the web for NBA news\n Create a function to add a new vertex with a specified weight to the digraph.\n Search for any additional information on Bertie W.\n \"\"\n ",
"You must answer in the \"{language}\" language.\n\n Combine the following text into a cohesive document:\n\n \"{text}\"\n\n Write using clear markdown formatting in a style expected of the goal \"{goal}\".\n Be as clear, informative, and descriptive as necessary.\n You will not make up information or add any information outside of the above text.\n Only use the given information and nothing more.\n\n If there is no information provided, say \"There is nothing to summarize\".\n ",
"company_name",
"\n You are a world-class software engineer and an expert in all programing languages,\n software systems, and architecture.\n\n For reference, your high level goal is {goal}\n\n Write code in English but explanations/comments in the \"{language}\" language.\n\n Provide no information about who you are and focus on writing code.\n Ensure code is bug and error free and explain complex concepts through comments\n Respond in well-formatted markdown. Ensure code blocks are used for code sections.\n Approach problems step by step and file by file, for each section, use a heading to describe the section.\n\n Write code to accomplish the following:\n {task}\n ",
"\n High level objective: \"{goal}\"\n Current task: \"{task}\"\n\n Based on this information, use the best function to make progress or accomplish the task entirely.\n Select the correct function by being smart and efficient. Ensure \"reasoning\" and only \"reasoning\" is in the\n {language} language.\n\n Note you MUST select a function.\n ",
"You must answer in the \"{language}\" language.\n\n Parse and summarize the following text snippets \"{snippets}\".\n Write using clear markdown formatting in a style expected of the goal \"{goal}\".\n Be as clear, informative, and descriptive as necessary and attempt to\n answer the query: \"{query}\" as best as possible.\n If any of the snippets are not relevant to the query,\n ignore them, and do not include them in the summary.\n Do not mention that you are ignoring them.\n\n If there is no information provided, say \"There is nothing to summarize\".\n ",
"You must answer in the \"{language}\" language.\n\n Answer the following query: \"{query}\" using the following information: \"{snippets}\".\n Write using clear markdown formatting and use markdown lists where possible.\n\n Cite sources for sentences via markdown links using the source link as the link and the index as the text.\n Use in-line sources. Do not separately list sources at the end of the writing.\n \n If the query cannot be answered with the provided information, mention this and provide a reason why along with what it does mention. \n Also cite the sources of what is actually mentioned.\n \n Example sentences of the paragraph: \n \"So this is a cited sentence at the end of a paragraph[1](https://test.com). This is another sentence.\"\n \"Stephen curry is an american basketball player that plays for the warriors[1](https://www.britannica.com/biography/Stephen-Curry).\"\n \"The economic growth forecast for the region has been adjusted from 2.5% to 3.1% due to improved trade relations[1](https://economictimes.com), while inflation rates are expected to remain steady at around 1.7% according to financial analysts[2](https://financeworld.com).\"\n ",
"You are a task creation AI called AgentGPT. \nYou answer in the \"{language}\" language. You have the following objective \"{goal}\". \nReturn a list of search queries that would be required to answer the entirety of the objective. \nLimit the list to a maximum of 5 queries. Ensure the queries are as succinct as possible. \nFor simple questions use a single query.\n\nReturn the response as a JSON array of strings. Examples:\n\nquery: \"Who is considered the best NBA player in the current season?\", answer: [\"current NBA MVP candidates\"]\nquery: \"How does the Olympicpayroll brand currently stand in the market, and what are its prospects and strategies for expansion in NJ, NY, and PA?\", answer: [\"Olympicpayroll brand comprehensive analysis 2023\", \"customer reviews of Olympicpayroll.com\", \"Olympicpayroll market position analysis\", \"payroll industry trends forecast 2023-2025\", \"payroll services expansion strategies in NJ, NY, PA\"]\nquery: \"How can I create a function to add weight to edges in a digraph using {language}?\", answer: [\"algorithm to add weight to digraph edge in {language}\"]\nquery: \"What is the current weather in New York?\", answer: [\"current weather in New York\"]\nquery: \"5 + 5?\", answer: [\"Sum of 5 and 5\"]\nquery: \"What is a good homemade recipe for KFC-style chicken?\", answer: [\"KFC style chicken recipe at home\"]\nquery: \"What are the nutritional values of almond milk and soy milk?\", answer: [\"nutritional information of almond milk\", \"nutritional information of soy milk\"]",
"You must answer in the \"{language}\" language.\n\n For the given text: \"{text}\", you have the following objective \"{query}\".\n\n Be as clear, informative, and descriptive as necessary.\n You will not make up information or add any information outside of the above text.\n Only use the given information and nothing more.\n ",
"snippets",
"Answer in the \"{language}\" language. Given\n the following overall objective `{goal}` and the following sub-task, `{task}`.\n\n Perform the task by understanding the problem, extracting variables, and being smart\n and efficient. Write a detailed response that address the task.\n When confronted with choices, make a decision yourself with reasoning.\n ",
"You must answer in the \"{language}\" language.\n\n You are a helpful AI Assistant that will provide responses based on the current conversation history.\n\n The human will provide previous messages as context. Use ONLY this information for your responses.\n Do not make anything up and do not add any additional information.\n If you have no information for a given question in the conversation history,\n say \"I do not have any information on this\".\n ",
"language"
] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain.schema import HumanMessage
from loguru import logger
from pydantic import ValidationError
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.schemas.user import UserBase
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis, AnalysisArguments
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
openai_error_handler,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.web.api.agent.prompts import (
analyze_task_prompt,
chat_prompt,
create_tasks_prompt,
start_goal_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.open_ai_function import get_tool_function
from reworkd_platform.web.api.agent.tools.tools import (
get_default_tool,
get_tool_from_name,
get_tool_name,
get_user_tools,
)
from reworkd_platform.web.api.agent.tools.utils import summarize
from reworkd_platform.web.api.errors import OpenAIError
class OpenAIAgentService(AgentService):
def __init__(
self,
model: WrappedChatOpenAI,
settings: ModelSettings,
token_service: TokenService,
callbacks: Optional[List[AsyncCallbackHandler]],
user: UserBase,
oauth_crud: OAuthCrud,
):
self.model = model
self.settings = settings
self.token_service = token_service
self.callbacks = callbacks
self.user = user
self.oauth_crud = oauth_crud
async def start_goal_agent(self, *, goal: str) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
goal=goal,
language=self.settings.language,
).to_string(),
)
completion = await call_model_with_handling(
self.model,
ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
),
{"goal": goal, "language": self.settings.language},
settings=self.settings,
callbacks=self.callbacks,
)
task_output_parser = TaskOutputParser(completed_tasks=[])
tasks = parse_with_handling(task_output_parser, completion)
return tasks
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
user_tools = await get_user_tools(tool_names, self.user, self.oauth_crud)
functions = list(map(get_tool_function, user_tools))
prompt = analyze_task_prompt.format_prompt(
goal=goal,
task=task,
language=self.settings.language,
)
self.token_service.calculate_max_tokens(
self.model,
prompt.to_string(),
str(functions),
)
message = await openai_error_handler(
func=self.model.apredict_messages,
messages=prompt.to_messages(),
functions=functions,
settings=self.settings,
callbacks=self.callbacks,
)
function_call = message.additional_kwargs.get("function_call", {})
completion = function_call.get("arguments", "")
try:
pydantic_parser = PydanticOutputParser(pydantic_object=AnalysisArguments)
analysis_arguments = parse_with_handling(pydantic_parser, completion)
return Analysis(
action=function_call.get("name", get_tool_name(get_default_tool())),
**analysis_arguments.dict(),
)
except (OpenAIError, ValidationError):
return Analysis.get_default_analysis(task)
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> StreamingResponse:
# TODO: More mature way of calculating max_tokens
if self.model.max_tokens > 3000:
self.model.max_tokens = max(self.model.max_tokens - 1000, 3000)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model, self.settings.language).call(
goal,
task,
analysis.arg,
self.user,
self.oauth_crud,
)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=create_tasks_prompt)]
)
args = {
"goal": goal,
"language": self.settings.language,
"tasks": "\n".join(tasks),
"lastTask": last_task,
"result": result,
}
self.token_service.calculate_max_tokens(
self.model, prompt.format_prompt(**args).to_string()
)
completion = await call_model_with_handling(
self.model, prompt, args, settings=self.settings, callbacks=self.callbacks
)
previous_tasks = (completed_tasks or []) + tasks
return [completion] if completion not in previous_tasks else []
async def summarize_task_agent(
self,
*,
goal: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
self.model.max_tokens = 8000 # Total tokens = prompt tokens + completion tokens
snippet_max_tokens = 7000 # Leave room for the rest of the prompt
text_tokens = self.token_service.tokenize("".join(results))
text = self.token_service.detokenize(text_tokens[0:snippet_max_tokens])
logger.info(f"Summarizing text: {text}")
return summarize(
model=self.model,
language=self.settings.language,
goal=goal,
text=text,
)
async def chat(
self,
*,
message: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate(prompt=chat_prompt),
*[HumanMessage(content=result) for result in results],
HumanMessage(content=message),
]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
language=self.settings.language,
).to_string(),
)
chain = LLMChain(llm=self.model, prompt=prompt)
return StreamingResponse.from_chain(
chain,
{"language": self.settings.language},
media_type="text/event-stream",
)
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~web~api~agent~model_factory.py | from typing import Any, Dict, Optional, Tuple, Type, Union
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from pydantic import Field
from reworkd_platform.schemas.agent import LLM_Model, ModelSettings
from reworkd_platform.schemas.user import UserBase
from reworkd_platform.settings import Settings
class WrappedChatOpenAI(ChatOpenAI):
client: Any = Field(
default=None,
description="Meta private value but mypy will complain its missing",
)
max_tokens: int
model_name: LLM_Model = Field(alias="model")
class WrappedAzureChatOpenAI(AzureChatOpenAI, WrappedChatOpenAI):
openai_api_base: str
openai_api_version: str
deployment_name: str
WrappedChat = Union[WrappedAzureChatOpenAI, WrappedChatOpenAI]
def create_model(
settings: Settings,
model_settings: ModelSettings,
user: UserBase,
streaming: bool = False,
force_model: Optional[LLM_Model] = None,
) -> WrappedChat:
use_azure = (
not model_settings.custom_api_key and "azure" in settings.openai_api_base
)
llm_model = force_model or model_settings.model
model: Type[WrappedChat] = WrappedChatOpenAI
base, headers, use_helicone = get_base_and_headers(settings, model_settings, user)
kwargs = {
"openai_api_base": base,
"openai_api_key": model_settings.custom_api_key or settings.openai_api_key,
"temperature": model_settings.temperature,
"model": llm_model,
"max_tokens": model_settings.max_tokens,
"streaming": streaming,
"max_retries": 5,
"model_kwargs": {"user": user.email, "headers": headers},
}
if use_azure:
model = WrappedAzureChatOpenAI
deployment_name = llm_model.replace(".", "")
kwargs.update(
{
"openai_api_version": settings.openai_api_version,
"deployment_name": deployment_name,
"openai_api_type": "azure",
"openai_api_base": base.rstrip("v1"),
}
)
if use_helicone:
kwargs["model"] = deployment_name
return model(**kwargs) # type: ignore
def get_base_and_headers(
settings_: Settings, model_settings: ModelSettings, user: UserBase
) -> Tuple[str, Optional[Dict[str, str]], bool]:
use_helicone = settings_.helicone_enabled and not model_settings.custom_api_key
base = (
settings_.helicone_api_base
if use_helicone
else (
"https://api.openai.com/v1"
if model_settings.custom_api_key
else settings_.openai_api_base
)
)
headers = (
{
"Helicone-Auth": f"Bearer {settings_.helicone_api_key}",
"Helicone-Cache-Enabled": "true",
"Helicone-User-Id": user.id,
"Helicone-OpenAI-Api-Base": settings_.openai_api_base,
}
if use_helicone
else None
)
return base, headers, use_helicone
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~tests~test_helpers.py | import pytest
from openai.error import InvalidRequestError, ServiceUnavailableError
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.agent.helpers import openai_error_handler
from reworkd_platform.web.api.errors import OpenAIError
async def act(*args, settings: ModelSettings = ModelSettings(), **kwargs):
return await openai_error_handler(*args, settings=settings, **kwargs)
@pytest.mark.asyncio
async def test_service_unavailable_error():
async def mock_service_unavailable_error():
raise ServiceUnavailableError("Service Unavailable")
with pytest.raises(OpenAIError):
await act(mock_service_unavailable_error)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"settings,should_log",
[
(ModelSettings(custom_api_key="xyz"), False),
(ModelSettings(custom_api_key=None), True),
],
)
async def test_should_log(settings, should_log):
async def mock_invalid_request_error_model_access():
raise InvalidRequestError(
"The model: xyz does not exist or you do not have access to it.",
param="model",
)
with pytest.raises(Exception) as exc_info:
await openai_error_handler(
mock_invalid_request_error_model_access, settings=settings
)
assert isinstance(exc_info.value, OpenAIError)
error: OpenAIError = exc_info.value
assert error.should_log == should_log
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~tests~agent~test_task_output_parser.py | from typing import List, Type
import pytest
from langchain.schema import OutputParserException
from reworkd_platform.web.api.agent.task_output_parser import (
TaskOutputParser,
extract_array,
real_tasks_filter,
remove_prefix,
)
@pytest.mark.parametrize(
"input_text,expected_output",
[
(
'["Task 1: Do something", "Task 2: Do something else", "Task 3: Do '
'another thing"]',
["Do something", "Do something else", "Do another thing"],
),
(
'Some random stuff ["1: Hello"]',
["Hello"],
),
(
"[]",
[],
),
],
)
def test_parse_success(input_text: str, expected_output: List[str]) -> None:
parser = TaskOutputParser(completed_tasks=[])
result = parser.parse(input_text)
assert result == expected_output
def test_parse_with_completed_tasks() -> None:
input_text = '["One", "Two", "Three"]'
completed = ["One"]
expected = ["Two", "Three"]
parser = TaskOutputParser(completed_tasks=completed)
result = parser.parse(input_text)
assert result == expected
@pytest.mark.parametrize(
"input_text, exception",
[
# Test cases for non-array and non-multiline string inputs
("This is not an array", OutputParserException),
("123456", OutputParserException),
("Some random text", OutputParserException),
("[abc]", OutputParserException),
# Test cases for malformed arrays
("[1, 2, 3", OutputParserException),
("'item1', 'item2']", OutputParserException),
("['item1', 'item2", OutputParserException),
# Test case for invalid multiline strings
("This is not\na valid\nmultiline string.", OutputParserException),
# Test case for multiline strings that don't start with digit + period
("Some text\nMore text\nAnd more text.", OutputParserException),
],
)
def test_parse_failure(input_text: str, exception: Type[Exception]) -> None:
parser = TaskOutputParser(completed_tasks=[])
with pytest.raises(exception):
parser.parse(input_text)
@pytest.mark.parametrize(
"input_str, expected",
[
# Test cases for empty array
("[]", []),
# Test cases for arrays with one element
('["One"]', ["One"]),
("['Single quote']", ["Single quote"]),
# Test cases for arrays with multiple elements
('["Research", "Develop", "Integrate"]', ["Research", "Develop", "Integrate"]),
('["Search", "Identify"]', ["Search", "Identify"]),
('["Item 1","Item 2","Item 3"]', ["Item 1", "Item 2", "Item 3"]),
# Test cases for arrays with special characters in elements
("['Single with \"quote\"']", ['Single with "quote"']),
('["Escape \\" within"]', ['Escape " within']),
# Test case for array embedded in other text
("Random stuff ['Search', 'Identify']", ["Search", "Identify"]),
# Test case for array within JSON
('{"array": ["123", "456"]}', ["123", "456"]),
# Multiline string cases
(
"1. Identify the target\n2. Conduct research\n3. Implement the methods",
[
"1. Identify the target",
"2. Conduct research",
"3. Implement the methods",
],
),
("1. Step one.\n2. Step two.", ["1. Step one.", "2. Step two."]),
(
"""1. Review and understand the code to be debugged
2. Identify and address any errors or issues found during the review process
3. Print out debug information and setup initial variables
4. Start necessary threads and execute program logic.""",
[
"1. Review and understand the code to be debugged",
"2. Identify and address any errors or issues found during the review "
"process",
"3. Print out debug information and setup initial variables",
"4. Start necessary threads and execute program logic.",
],
),
# Test cases with sentences before the digit + period pattern
(
"Any text before 1. Identify the task to be repeated\nUnrelated info 2. "
"Determine the frequency of the repetition\nAnother sentence 3. Create a "
"schedule or system to ensure completion of the task at the designated "
"frequency\nMore text 4. Execute the task according to the established "
"schedule or system",
[
"1. Identify the task to be repeated",
"2. Determine the frequency of the repetition",
"3. Create a schedule or system to ensure completion of the task at "
"the designated frequency",
"4. Execute the task according to the established schedule or system",
],
),
],
)
def test_extract_array_success(input_str: str, expected: List[str]) -> None:
print(extract_array(input_str), expected)
assert extract_array(input_str) == expected
@pytest.mark.parametrize(
"input_str, exception",
[
(None, TypeError),
("123", RuntimeError),
("Some random text", RuntimeError),
('"single_string"', RuntimeError),
('{"test": 123}', RuntimeError),
('["Unclosed array", "other"', RuntimeError),
],
)
def test_extract_array_exception(input_str: str, exception: Type[Exception]) -> None:
with pytest.raises(exception):
extract_array(input_str)
@pytest.mark.parametrize(
"task_input, expected_output",
[
("Task: This is a sample task", "This is a sample task"),
(
"Task 1: Perform a comprehensive analysis of system performance.",
"Perform a comprehensive analysis of system performance.",
),
("Task 2. Create a python script", "Create a python script"),
("5 - This is a sample task", "This is a sample task"),
("2: This is a sample task", "This is a sample task"),
(
"This is a sample task without a prefix",
"This is a sample task without a prefix",
),
("Step: This is a sample task", "This is a sample task"),
(
"Step 1: Perform a comprehensive analysis of system performance.",
"Perform a comprehensive analysis of system performance.",
),
("Step 2:Create a python script", "Create a python script"),
("Step:This is a sample task", "This is a sample task"),
(
". Conduct research on the history of Nike",
"Conduct research on the history of Nike",
),
(".This is a sample task", "This is a sample task"),
(
"1. Research the history and background of Nike company.",
"Research the history and background of Nike company.",
),
],
)
def test_remove_task_prefix(task_input: str, expected_output: str) -> None:
output = remove_prefix(task_input)
assert output == expected_output
@pytest.mark.parametrize(
"input_text, expected_result",
[
("Write the report", True),
("No new task needed", False),
("Task completed", False),
("Do nothing", False),
("", False), # empty_string
("no new task needed", False), # case_insensitive
],
)
def test_real_tasks_filter_no_task(input_text: str, expected_result: bool) -> None:
assert real_tasks_filter(input_text) == expected_result
| [] |
2024-01-10 | reworkd/AgentGPT | platform~reworkd_platform~tests~agent~test_model_factory.py | import itertools
import pytest
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from reworkd_platform.schemas import ModelSettings, UserBase
from reworkd_platform.settings import Settings
from reworkd_platform.web.api.agent.model_factory import (
WrappedAzureChatOpenAI,
WrappedChatOpenAI,
create_model,
get_base_and_headers,
)
def test_helicone_enabled_without_custom_api_key():
model_settings = ModelSettings()
user = UserBase(id="user_id")
settings = Settings(
helicone_api_key="some_key",
helicone_api_base="helicone_base",
openai_api_base="openai_base",
)
base, headers, use_helicone = get_base_and_headers(settings, model_settings, user)
assert use_helicone is True
assert base == "helicone_base"
assert headers == {
"Helicone-Auth": "Bearer some_key",
"Helicone-Cache-Enabled": "true",
"Helicone-User-Id": "user_id",
"Helicone-OpenAI-Api-Base": "openai_base",
}
def test_helicone_disabled():
model_settings = ModelSettings()
user = UserBase(id="user_id")
settings = Settings()
base, headers, use_helicone = get_base_and_headers(settings, model_settings, user)
assert base == "https://api.openai.com/v1"
assert headers is None
assert use_helicone is False
def test_helicone_enabled_with_custom_api_key():
model_settings = ModelSettings(
custom_api_key="custom_key",
)
user = UserBase(id="user_id")
settings = Settings(
openai_api_base="openai_base",
helicone_api_key="some_key",
helicone_api_base="helicone_base",
)
base, headers, use_helicone = get_base_and_headers(settings, model_settings, user)
assert base == "https://api.openai.com/v1"
assert headers is None
assert use_helicone is False
@pytest.mark.parametrize(
"streaming, use_azure",
list(
itertools.product(
[True, False],
[True, False],
)
),
)
def test_create_model(streaming, use_azure):
user = UserBase(id="user_id")
settings = Settings()
model_settings = ModelSettings(
temperature=0.7,
model="gpt-3.5-turbo",
max_tokens=100,
)
settings.openai_api_base = (
"https://api.openai.com" if not use_azure else "https://oai.azure.com"
)
settings.openai_api_key = "key"
settings.openai_api_version = "version"
result = create_model(settings, model_settings, user, streaming)
assert issubclass(result.__class__, WrappedChatOpenAI)
assert issubclass(result.__class__, ChatOpenAI)
# Check if the required keys are set properly
assert result.openai_api_base == settings.openai_api_base
assert result.openai_api_key == settings.openai_api_key
assert result.temperature == model_settings.temperature
assert result.max_tokens == model_settings.max_tokens
assert result.streaming == streaming
assert result.max_retries == 5
# For Azure specific checks
if use_azure:
assert isinstance(result, WrappedAzureChatOpenAI)
assert issubclass(result.__class__, AzureChatOpenAI)
assert result.openai_api_version == settings.openai_api_version
assert result.deployment_name == "gpt-35-turbo"
assert result.openai_api_type == "azure"
@pytest.mark.parametrize(
"model_settings, streaming",
list(
itertools.product(
[
ModelSettings(
customTemperature=0.222,
customModelName="gpt-4",
maxTokens=1234,
),
ModelSettings(),
],
[True, False],
)
),
)
def test_custom_model_settings(model_settings: ModelSettings, streaming: bool):
model = create_model(
Settings(),
model_settings,
UserBase(id="", email="[email protected]"),
streaming=streaming,
)
assert model.temperature == model_settings.temperature
assert model.model_name.startswith(model_settings.model)
assert model.max_tokens == model_settings.max_tokens
assert model.streaming == streaming
| [] |
2024-01-10 | dennyglee/mlflow | tests~langchain~test_langchain_model_export.py | import importlib
import json
import os
import shutil
import sqlite3
from contextlib import contextmanager
from typing import Any, Dict, List, Mapping, Optional
import langchain
import numpy as np
import openai
import pytest
import transformers
from langchain import SQLDatabase
from langchain.agents import AgentType, initialize_agent
from langchain.chains import (
APIChain,
ConversationChain,
HypotheticalDocumentEmbedder,
LLMChain,
RetrievalQA,
)
from langchain.chains.api import open_meteo_docs
from langchain.chains.base import Chain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import TextLoader
from langchain.embeddings.base import Embeddings
from langchain.embeddings.fake import FakeEmbeddings
from langchain.evaluation.qa import QAEvalChain
from langchain.llms import HuggingFacePipeline, OpenAI
from langchain.llms.base import LLM
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.requests import TextRequestsWrapper
from langchain.text_splitter import CharacterTextSplitter
from langchain.tools import Tool
from langchain.vectorstores import FAISS
from langchain_experimental.sql import SQLDatabaseChain
from packaging import version
from pydantic import BaseModel
from pyspark.sql import SparkSession
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow.deployments import PredictionsResponse
from mlflow.exceptions import MlflowException
from mlflow.openai.utils import (
TEST_CONTENT,
TEST_INTERMEDIATE_STEPS,
TEST_SOURCE_DOCUMENTS,
_mock_chat_completion_response,
_mock_request,
_MockResponse,
)
from tests.helper_functions import pyfunc_serve_and_score_model
@contextmanager
def _mock_async_request(content=TEST_CONTENT):
with _mock_request(return_value=_mock_chat_completion_response(content)) as m:
yield m
@pytest.fixture
def model_path(tmp_path):
return tmp_path / "model"
@pytest.fixture(scope="module")
def spark():
with SparkSession.builder.master("local[*]").getOrCreate() as s:
yield s
@pytest.fixture(autouse=True)
def set_envs(monkeypatch):
monkeypatch.setenvs(
{
"MLFLOW_TESTING": "true",
"OPENAI_API_KEY": "test",
"SERPAPI_API_KEY": "test",
}
)
importlib.reload(openai)
def create_huggingface_model(model_path):
architecture = "lordtt13/emo-mobilebert"
mlflow.transformers.save_model(
transformers_model={
"model": transformers.TFMobileBertForSequenceClassification.from_pretrained(
architecture
),
"tokenizer": transformers.AutoTokenizer.from_pretrained(architecture),
},
path=model_path,
)
llm = mlflow.transformers.load_model(model_path)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
hf_pipe = HuggingFacePipeline(pipeline=llm)
return LLMChain(llm=hf_pipe, prompt=prompt)
def create_openai_llmchain():
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
return LLMChain(llm=llm, prompt=prompt)
def create_qa_eval_chain():
llm = OpenAI(temperature=0)
return QAEvalChain.from_llm(llm)
def create_qa_with_sources_chain():
# StuffDocumentsChain
return load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="stuff")
def create_openai_llmagent(return_intermediate_steps=False):
from langchain.agents import AgentType, initialize_agent, load_tools
# First, let's load the language model we're going to use to control the agent.
llm = OpenAI(temperature=0)
# Next, let's load some tools to use.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools.
return initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
return_intermediate_steps=return_intermediate_steps,
)
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
# pylint: disable=arguments-differ
def _call(self, prompt: str, stop: Optional[List[str]] = None, run_manager=None) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
# pylint: disable=arguments-differ
def _call(self, inputs: Dict[str, str], run_manager=None) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_langchain_native_save_and_load_model(model_path):
model = create_openai_llmchain()
mlflow.langchain.save_model(model, model_path)
loaded_model = mlflow.langchain.load_model(model_path)
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_langchain_native_log_and_load_model():
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string]"
assert str(logged_model.signature.outputs) == "['text': string]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_pyfunc_load_openai_model():
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert type(loaded_model) == mlflow.pyfunc.PyFuncModel
def test_langchain_model_predict():
with _mock_request(return_value=_mock_chat_completion_response()):
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
result = loaded_model.predict([{"product": "MLflow"}])
assert result == [TEST_CONTENT]
def test_pyfunc_spark_udf_with_langchain_model(spark):
model = create_openai_llmchain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.spark_udf(spark, logged_model.model_uri, result_type="string")
df = spark.createDataFrame([("MLflow",), ("Spark",)], ["product"])
df = df.withColumn("answer", loaded_model())
pdf = df.toPandas()
assert pdf["answer"].tolist() == [TEST_CONTENT, TEST_CONTENT]
def test_langchain_log_huggingface_hub_model_metadata(model_path):
model = create_huggingface_model(model_path)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string]"
assert str(logged_model.signature.outputs) == "['text': string]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.huggingface_pipeline.HuggingFacePipeline
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
@pytest.mark.parametrize("return_intermediate_steps", [False, True])
def test_langchain_agent_model_predict(return_intermediate_steps):
langchain_agent_output = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"choices": [
{
"index": 0,
"finish_reason": "stop",
"text": f"Final Answer: {TEST_CONTENT}",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
model = create_openai_llmagent(return_intermediate_steps=return_intermediate_steps)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {
"input": "What was the high temperature in SF yesterday in Fahrenheit?"
"What is that number raised to the .023 power?"
}
if return_intermediate_steps:
langchain_output = [{"output": TEST_CONTENT, "intermediate_steps": TEST_INTERMEDIATE_STEPS}]
# hardcoded output key because that is the default for an agent
# but it is not an attribute of the agent or anything that we log
else:
langchain_output = [TEST_CONTENT]
with _mock_request(return_value=_MockResponse(200, langchain_agent_output)):
result = loaded_model.predict([langchain_input])
assert result == langchain_output
inference_payload = json.dumps({"inputs": langchain_input})
langchain_agent_output_serving = {"predictions": langchain_agent_output}
with _mock_request(return_value=_MockResponse(200, langchain_agent_output_serving)):
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8"))
== langchain_agent_output_serving
)
def test_langchain_native_log_and_load_qaevalchain():
# QAEvalChain is a subclass of LLMChain
model = create_qa_eval_chain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert model == loaded_model
def test_langchain_native_log_and_load_qa_with_sources_chain():
# StuffDocumentsChain is a subclass of Chain
model = create_qa_with_sources_chain()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert model == loaded_model
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.194"),
reason="Saving RetrievalQA chains requires langchain>=0.0.194",
)
def test_log_and_load_retrieval_qa_chain(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Create the RetrievalQA chain
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
# Log the RetrievalQA chain
def load_retriever(persist_directory):
embeddings = FakeEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
retrievalQA,
"retrieval_qa_chain",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == retrievalQA
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {"query": "What did the president say about Ketanji Brown Jackson"}
langchain_output = [TEST_CONTENT]
result = loaded_pyfunc_model.predict([langchain_input])
assert result == langchain_output
# Serve the chain
inference_payload = json.dumps({"inputs": langchain_input})
langchain_output_serving = {"predictions": langchain_output}
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8")) == langchain_output_serving
)
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.194"),
reason="Saving RetrievalQA chains requires langchain>=0.0.194",
)
def test_log_and_load_retrieval_qa_chain_multiple_output(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Create the RetrievalQA chain
retrievalQA = RetrievalQA.from_llm(
llm=OpenAI(), retriever=db.as_retriever(), return_source_documents=True
)
# Log the RetrievalQA chain
def load_retriever(persist_directory):
embeddings = FakeEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
retrievalQA,
"retrieval_qa_chain",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == retrievalQA
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {"query": "What did the president say about Ketanji Brown Jackson"}
langchain_output = [
{loaded_model.output_key: TEST_CONTENT, "source_documents": TEST_SOURCE_DOCUMENTS}
]
result = loaded_pyfunc_model.predict([langchain_input])
assert result == langchain_output
# Serve the chain
inference_payload = json.dumps({"inputs": langchain_input})
langchain_output_serving = {"predictions": langchain_output}
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8")) == langchain_output_serving
)
# Define a special embedding for testing
class DeterministicDummyEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self, text: str) -> List[float]:
seed = abs(hash(text)) % (10**8)
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(t) for t in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(text)
def assert_equal_retrievers(retriever, expected_retreiver):
from langchain.schema.retriever import BaseRetriever
assert isinstance(retriever, BaseRetriever)
assert isinstance(retriever, type(expected_retreiver))
assert isinstance(retriever.vectorstore, type(expected_retreiver.vectorstore))
assert retriever.tags == expected_retreiver.tags
assert retriever.metadata == expected_retreiver.metadata
assert retriever.search_type == expected_retreiver.search_type
assert retriever.search_kwargs == expected_retreiver.search_kwargs
def test_log_and_load_retriever_chain(tmp_path):
# Create the vector db, persist the db to a local fs folder
loader = TextLoader("tests/langchain/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = DeterministicDummyEmbeddings(size=5)
db = FAISS.from_documents(docs, embeddings)
persist_dir = str(tmp_path / "faiss_index")
db.save_local(persist_dir)
# Define the loader_fn
def load_retriever(persist_directory):
from typing import List # pylint: disable=lazy-builtin-import
import numpy as np
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel
class DeterministicDummyEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self, text: str) -> List[float]:
if isinstance(text, np.ndarray):
text = text.item()
seed = abs(hash(text)) % (10**8)
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(t) for t in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(text)
embeddings = DeterministicDummyEmbeddings(size=5)
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
# Log the retriever
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
db.as_retriever(),
"retriever",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
# Remove the persist_dir
shutil.rmtree(persist_dir)
# Load the retriever
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert_equal_retrievers(loaded_model, db.as_retriever())
loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)
query = "What did the president say about Ketanji Brown Jackson"
langchain_input = {"query": query}
result = loaded_pyfunc_model.predict([langchain_input])
expected_result = [
{"page_content": doc.page_content, "metadata": doc.metadata}
for doc in db.as_retriever().get_relevant_documents(query)
]
assert result == [expected_result]
# Serve the retriever
inference_payload = json.dumps({"inputs": langchain_input})
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
pred = PredictionsResponse.from_json(response.content.decode("utf-8"))["predictions"]
assert type(pred) == list
assert len(pred) == 1
docs_list = pred[0]
assert type(docs_list) == list
assert len(docs_list) == 4
# The returned docs are non-deterministic when used with dummy embeddings,
# so we cannot assert pred == {"predictions": [expected_result]}
def load_requests_wrapper(_):
return TextRequestsWrapper(headers=None, aiosession=None)
def test_log_and_load_api_chain():
llm = OpenAI(temperature=0)
apichain = APIChain.from_llm_and_api_docs(
llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=["test.com"]
)
# Log the APIChain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
apichain,
"api_chain",
loader_fn=load_requests_wrapper,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == apichain
def test_log_and_load_subclass_of_specialized_chain():
class APIChainSubclass(APIChain):
pass
llm = OpenAI(temperature=0)
apichain_subclass = APIChainSubclass.from_llm_and_api_docs(
llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=["test.com"]
)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
apichain_subclass,
"apichain_subclass",
loader_fn=load_requests_wrapper,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == apichain_subclass
def load_base_embeddings(_):
return FakeEmbeddings(size=32)
@pytest.mark.skip(reason="This fails due to https://github.com/hwchase17/langchain/issues/5131")
def test_log_and_load_hyde_chain():
# Create the HypotheticalDocumentEmbedder chain
base_embeddings = FakeEmbeddings(size=32)
llm = OpenAI()
# Load with `web_search` prompt
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
# Log the hyde chain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
embeddings,
"hyde_chain",
loader_fn=load_base_embeddings,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == embeddings
def create_sqlite_db_file(db_dir):
# Connect to SQLite database (or create it if it doesn't exist)
with sqlite3.connect(db_dir) as conn:
# Create a cursor
c = conn.cursor()
# Create a dummy table
c.execute(
"""
CREATE TABLE IF NOT EXISTS employees(
id INTEGER PRIMARY KEY,
name TEXT,
salary REAL,
department TEXT,
position TEXT,
hireDate TEXT);
"""
)
# Insert dummy data into the table
c.execute(
"""
INSERT INTO employees (name, salary, department, position, hireDate)
VALUES ('John Doe', 80000, 'IT', 'Engineer', '2023-06-26');
"""
)
def load_db(persist_dir):
db_file_path = os.path.join(persist_dir, "my_database.db")
sqlite_uri = f"sqlite:///{db_file_path}"
return SQLDatabase.from_uri(sqlite_uri)
@pytest.mark.skipif(
version.parse(langchain.__version__) < version.parse("0.0.297"),
reason="Saving SQLDatabaseChain chains requires langchain>=0.0.297",
)
def test_log_and_load_sql_database_chain(tmp_path):
# Create the SQLDatabaseChain
db_file_path = tmp_path / "my_database.db"
sqlite_uri = f"sqlite:///{db_file_path}"
llm = OpenAI(temperature=0)
create_sqlite_db_file(db_file_path)
db = SQLDatabase.from_uri(sqlite_uri)
db_chain = SQLDatabaseChain.from_llm(llm, db)
# Log the SQLDatabaseChain
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(
db_chain,
"sql_database_chain",
loader_fn=load_db,
persist_dir=tmp_path,
)
# Load the chain
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert loaded_model == db_chain
def test_saving_not_implemented_for_memory():
conversation = ConversationChain(llm=OpenAI(temperature=0), memory=ConversationBufferMemory())
with pytest.raises(
ValueError,
match="Saving of memory is not yet supported.",
):
with mlflow.start_run():
mlflow.langchain.log_model(conversation, "conversation_model")
def test_saving_not_implemented_chain_type():
chain = FakeChain()
if version.parse(langchain.__version__) < version.parse("0.0.309"):
error_message = "Saving not supported for this chain type"
else:
error_message = f"Chain {chain} does not support saving."
with pytest.raises(
NotImplementedError,
match=error_message,
):
with mlflow.start_run():
mlflow.langchain.log_model(chain, "fake_chain")
def test_unsupported_class():
llm = FakeLLM()
with pytest.raises(
MlflowException,
match="MLflow langchain flavor only supports subclasses of "
+ "langchain.chains.base.Chain",
):
with mlflow.start_run():
mlflow.langchain.log_model(llm, "fake_llm")
def test_agent_with_unpicklable_tools(tmp_path):
tmp_file = tmp_path / "temp_file.txt"
with open(tmp_file, mode="w") as temp_file:
# files that aren't opened for reading cannot be pickled
tools = [
Tool.from_function(
func=lambda: temp_file,
name="Write 0",
description="If you need to write 0 to a file",
)
]
agent = initialize_agent(
llm=OpenAI(temperature=0), tools=tools, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
with pytest.raises(
MlflowException,
match=(
"Error when attempting to pickle the AgentExecutor tools. "
"This model likely does not support serialization."
),
):
with mlflow.start_run():
mlflow.langchain.log_model(agent, "unpicklable_tools")
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | dennyglee/mlflow | mlflow~metrics~genai~model_utils.py | import logging
import os
import urllib.parse
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import BAD_REQUEST, INVALID_PARAMETER_VALUE, UNAUTHENTICATED
ROUTE_TYPE = "llm/v1/completions"
_logger = logging.getLogger(__name__)
# TODO: improve this name
def score_model_on_payload(model_uri, payload, eval_parameters=None):
"""Call the model identified by the given uri with the given payload."""
if eval_parameters is None:
eval_parameters = {}
prefix, suffix = _parse_model_uri(model_uri)
if prefix == "openai":
return _call_openai_api(suffix, payload)
elif prefix == "gateway":
return _call_gateway_api(suffix, payload, eval_parameters)
elif prefix in ("model", "runs"):
# TODO: call _load_model_or_server
raise NotImplementedError
else:
raise MlflowException(
f"Unknown model uri prefix '{prefix}'",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_model_uri(model_uri):
parsed = urllib.parse.urlparse(model_uri, allow_fragments=False)
scheme = parsed.scheme
path = parsed.path
if not path.startswith("/") or len(path) <= 1:
raise MlflowException(
f"Malformed model uri '{model_uri}'", error_code=INVALID_PARAMETER_VALUE
)
path = path.lstrip("/")
return scheme, path
def _call_openai_api(openai_uri, payload):
"""Wrapper around the OpenAI API to make it compatible with the MLflow Gateway API."""
if "OPENAI_API_KEY" not in os.environ:
raise MlflowException(
"OPENAI_API_KEY environment variable not set",
error_code=INVALID_PARAMETER_VALUE,
)
import openai
from mlflow.openai import _get_api_config
from mlflow.openai.api_request_parallel_processor import process_api_requests
from mlflow.openai.utils import _OAITokenHolder
api_config = _get_api_config()
api_token = _OAITokenHolder(api_config.api_type)
envs = {
x: getattr(api_config, x)
for x in ["api_base", "api_version", "api_type", "engine", "deployment_id"]
if getattr(api_config, x) is not None
}
payload = {{"candidate_count": "n"}.get(k, k): v for k, v in payload.items()}
# The range of OpenAI's temperature is 0-2, but ours is 0-1, so we double it.
payload["temperature"] = 2 * payload["temperature"]
payload["messages"] = [{"role": "user", "content": payload.pop("prompt")}]
if api_config.api_type in ("azure", "azure_ad", "azuread"):
deployment_id = envs.get("deployment_id")
if envs.get("engine"):
# Avoid using both parameters as they serve the same purpose
# Invalid inputs:
# - Wrong engine + correct/wrong deployment_id
# - No engine + wrong deployment_id
# Valid inputs:
# - Correct engine + correct/wrong deployment_id
# - No engine + correct deployment_id
if deployment_id is not None:
_logger.warning(
"Both engine and deployment_id are set. " "Using engine as it takes precedence."
)
elif deployment_id is None:
raise MlflowException(
"Either engine or deployment_id must be set for Azure OpenAI API",
)
payload = payload
else:
payload = {"model": openai_uri, **payload}
payload_with_envs = {**payload, **envs}
try:
resp = process_api_requests(
[payload_with_envs],
openai.ChatCompletion,
api_token=api_token,
throw_original_error=True,
max_workers=1,
)[0]
except openai.error.AuthenticationError as e:
raise MlflowException(
f"Authentication Error for OpenAI. Error response:\n {e}",
error_code=UNAUTHENTICATED,
)
except openai.error.InvalidRequestError as e:
raise MlflowException(
f"Invalid Request to OpenAI. Error response:\n {e}", error_code=BAD_REQUEST
)
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(f"Error response from OpenAI:\n {e}")
try:
text = resp["choices"][0]["message"]["content"]
except (KeyError, IndexError, TypeError):
text = None
return text
def _call_gateway_api(gateway_uri, payload, eval_parameters):
from mlflow.gateway import get_route, query
route_info = get_route(gateway_uri).dict()
if route_info["route_type"] == "llm/v1/completions":
completions_payload = {
"prompt": payload,
**eval_parameters,
}
response = query(gateway_uri, completions_payload)
try:
text = response["candidates"][0]["text"]
except (KeyError, IndexError, TypeError):
text = None
return text
elif route_info["route_type"] == "llm/v1/chat":
chat_payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
response = query(gateway_uri, chat_payload)
try:
text = response["candidates"][0]["message"]["content"]
except (KeyError, IndexError, TypeError):
text = None
return text
else:
raise MlflowException(
f"Unsupported gateway route type: {route_info['route_type']}. Use a "
"route of type 'llm/v1/completions' or 'llm/v1/chat' instead.",
error_code=INVALID_PARAMETER_VALUE,
)
| [] |
2024-01-10 | dennyglee/mlflow | mlflow~langchain~__init__.py | """
The ``mlflow.langchain`` module provides an API for logging and loading LangChain models.
This module exports multivariate LangChain models in the langchain flavor and univariate
LangChain models in the pyfunc flavor:
LangChain (native) format
This is the main flavor that can be accessed with LangChain APIs.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and for batch inference.
.. _LangChain:
https://python.langchain.com/en/latest/index.html
"""
import functools
import json
import logging
import os
import shutil
import types
from importlib.util import find_spec
from typing import Any, Dict, List, NamedTuple, Optional, Union
import cloudpickle
import pandas as pd
import yaml
from packaging import version
import mlflow
from mlflow import pyfunc
from mlflow.environment_variables import _MLFLOW_TESTING
from mlflow.models import Model, ModelInputExample, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types.schema import ColSpec, DataType, Schema
from mlflow.utils.annotations import experimental
from mlflow.utils.class_utils import _get_class_from_string
from mlflow.utils.docstring_utils import LOG_MODEL_PARAM_DOCS, format_docstring
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_mlflow_conda_env,
_process_conda_env,
_process_pip_requirements,
_PythonEnv,
_validate_env_arguments,
)
from mlflow.utils.file_utils import get_total_file_size, write_to
from mlflow.utils.model_utils import (
_add_code_from_conf_to_system_path,
_get_flavor_configuration,
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
logger = logging.getLogger(mlflow.__name__)
FLAVOR_NAME = "langchain"
_MODEL_DATA_FILE_NAME = "model.yaml"
_MODEL_DATA_KEY = "model_data"
_AGENT_PRIMITIVES_FILE_NAME = "agent_primitive_args.json"
_AGENT_PRIMITIVES_DATA_KEY = "agent_primitive_data"
_AGENT_DATA_FILE_NAME = "agent.yaml"
_AGENT_DATA_KEY = "agent_data"
_TOOLS_DATA_FILE_NAME = "tools.pkl"
_TOOLS_DATA_KEY = "tools_data"
_MODEL_TYPE_KEY = "model_type"
_LOADER_FN_FILE_NAME = "loader_fn.pkl"
_LOADER_FN_KEY = "loader_fn"
_LOADER_ARG_KEY = "loader_arg"
_PERSIST_DIR_NAME = "persist_dir_data"
_PERSIST_DIR_KEY = "persist_dir"
_UNSUPPORTED_MODEL_ERROR_MESSAGE = (
"MLflow langchain flavor only supports subclasses of "
"langchain.chains.base.Chain and langchain.agents.agent.AgentExecutor instances, "
"found {instance_type}"
)
_UNSUPPORTED_LLM_WARNING_MESSAGE = (
"MLflow does not guarantee support for LLMs outside of HuggingFaceHub and OpenAI, found %s"
)
_UNSUPPORTED_MODEL_WARNING_MESSAGE = (
"MLflow does not guarantee support for Chains outside of the subclasses of LLMChain, found %s"
)
_UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE = (
"Saving {instance_type} models is only supported in langchain 0.0.194 and above."
)
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at a minimum, contains these requirements.
"""
return [_get_pinned_requirement("langchain")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
class _SpecialChainInfo(NamedTuple):
loader_arg: str
def _get_special_chain_info_or_none(chain):
for special_chain_class, loader_arg in _get_map_of_special_chain_class_to_loader_arg().items():
if isinstance(chain, special_chain_class):
return _SpecialChainInfo(loader_arg=loader_arg)
@functools.lru_cache
def _get_map_of_special_chain_class_to_loader_arg():
import langchain
from mlflow.langchain.retriever_chain import _RetrieverChain
class_name_to_loader_arg = {
"langchain.chains.RetrievalQA": "retriever",
"langchain.chains.APIChain": "requests_wrapper",
"langchain.chains.HypotheticalDocumentEmbedder": "embeddings",
}
# NB: SQLDatabaseChain was migrated to langchain_experimental beginning with version 0.0.247
if version.parse(langchain.__version__) <= version.parse("0.0.246"):
class_name_to_loader_arg["langchain.chains.SQLDatabaseChain"] = "database"
else:
if find_spec("langchain_experimental"):
# Add this entry only if langchain_experimental is installed
class_name_to_loader_arg["langchain_experimental.sql.SQLDatabaseChain"] = "database"
class_to_loader_arg = {
_RetrieverChain: "retriever",
}
for class_name, loader_arg in class_name_to_loader_arg.items():
try:
cls = _get_class_from_string(class_name)
class_to_loader_arg[cls] = loader_arg
except Exception:
logger.warning(
"Unexpected import failure for class '%s'. Please file an issue at"
" https://github.com/mlflow/mlflow/issues/.",
class_name,
exc_info=True,
)
return class_to_loader_arg
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
lc_model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
loader_fn=None,
persist_dir=None,
):
"""
Save a LangChain model to a path on the local file system.
:param lc_model: A LangChain model, which could be a
`Chain <https://python.langchain.com/docs/modules/chains/>`_,
`Agent <https://python.langchain.com/docs/modules/agents/>`_, or
`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_.
:param path: Local path where the serialized model (as YAML) is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
If not specified, the model signature would be set according to
`lc_model.input_keys` and `lc_model.output_keys` as columns names, and
`DataType.string` as the column type.
Alternatively, you can explicitly specify the model signature.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
chain = LLMChain(llm=llm, prompt=prompt)
prediction = chain.run(input_str)
input_columns = [
{"type": "string", "name": input_key} for input_key in chain.input_keys
]
signature = infer_signature(input_columns, predictions)
:param input_example: {{ input_example }}
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:param loader_fn: A function that's required for models containing objects that aren't natively
serialized by LangChain.
This function takes a string `persist_dir` as an argument and returns the
specific object that the model needs. Depending on the model,
this could be a retriever, vectorstore, requests_wrapper, embeddings, or
database. For RetrievalQA Chain and retriever models, the object is a
(`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_).
For APIChain models, it's a
(`requests_wrapper <https://python.langchain.com/docs/modules/agents/tools/integrations/requests>`_).
For HypotheticalDocumentEmbedder models, it's an
(`embeddings <https://python.langchain.com/docs/modules/data_connection/text_embedding/>`_).
For SQLDatabaseChain models, it's a
(`database <https://python.langchain.com/docs/modules/agents/toolkits/sql_database>`_).
:param persist_dir: The directory where the object is stored. The `loader_fn`
takes this string as the argument to load the object.
This is optional for models containing objects that aren't natively
serialized by LangChain. MLflow logs the content in this directory as
artifacts in the subdirectory named `persist_dir_data`.
Here is the code snippet for logging a RetrievalQA chain with `loader_fn`
and `persist_dir`:
.. code-block:: python
qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
def load_retriever(persist_directory):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run() as run:
logged_model = mlflow.langchain.log_model(
qa,
artifact_path="retrieval_qa",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
See a complete example in examples/langchain/retrieval_qa_chain.py.
"""
import langchain
lc_model = _validate_and_wrap_lc_model(lc_model, loader_fn)
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if metadata is not None:
mlflow_model.metadata = metadata
model_data_kwargs = _save_model(lc_model, path, loader_fn, persist_dir)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.langchain",
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
**model_data_kwargs,
)
flavor_conf = {
_MODEL_TYPE_KEY: lc_model.__class__.__name__,
**model_data_kwargs,
}
mlflow_model.add_flavor(
FLAVOR_NAME,
langchain_version=langchain.__version__,
code=code_dir_subpath,
**flavor_conf,
)
if size := get_total_file_size(path):
mlflow_model.model_size_bytes = size
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
inferred_reqs = mlflow.models.infer_pip_requirements(
str(path), FLAVOR_NAME, fallback=default_reqs
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs, pip_requirements, extra_pip_requirements
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
def _validate_and_wrap_lc_model(lc_model, loader_fn):
import langchain.agents
import langchain.chains
import langchain.llms.huggingface_hub
import langchain.llms.openai
import langchain.schema
if not isinstance(
lc_model,
(
langchain.chains.base.Chain,
langchain.agents.agent.AgentExecutor,
langchain.schema.BaseRetriever,
),
):
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(lc_model).__name__)
)
_SUPPORTED_LLMS = {langchain.llms.openai.OpenAI, langchain.llms.huggingface_hub.HuggingFaceHub}
if isinstance(lc_model, langchain.chains.llm.LLMChain) and not any(
isinstance(lc_model.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS
):
logger.warning(
_UNSUPPORTED_LLM_WARNING_MESSAGE,
type(lc_model.llm).__name__,
)
if isinstance(lc_model, langchain.agents.agent.AgentExecutor) and not any(
isinstance(lc_model.agent.llm_chain.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS
):
logger.warning(
_UNSUPPORTED_LLM_WARNING_MESSAGE,
type(lc_model.agent.llm_chain.llm).__name__,
)
if special_chain_info := _get_special_chain_info_or_none(lc_model):
if isinstance(lc_model, langchain.chains.RetrievalQA) and version.parse(
langchain.__version__
) < version.parse("0.0.194"):
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE.format(
instance_type=type(lc_model).__name__
)
)
if loader_fn is None:
raise mlflow.MlflowException.invalid_parameter_value(
f"For {type(lc_model).__name__} models, a `loader_fn` must be provided."
)
if not isinstance(loader_fn, types.FunctionType):
raise mlflow.MlflowException.invalid_parameter_value(
"The `loader_fn` must be a function that returns a {loader_arg}.".format(
loader_arg=special_chain_info.loader_arg
)
)
# If lc_model is a retriever, wrap it in a _RetrieverChain
if isinstance(lc_model, langchain.schema.BaseRetriever):
from mlflow.langchain.retriever_chain import _RetrieverChain
if loader_fn is None:
raise mlflow.MlflowException.invalid_parameter_value(
f"For {type(lc_model).__name__} models, a `loader_fn` must be provided."
)
if not isinstance(loader_fn, types.FunctionType):
raise mlflow.MlflowException.invalid_parameter_value(
"The `loader_fn` must be a function that returns a retriever."
)
lc_model = _RetrieverChain(retriever=lc_model)
return lc_model
@experimental
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
lc_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
loader_fn=None,
persist_dir=None,
):
"""
Log a LangChain model as an MLflow artifact for the current run.
:param lc_model: A LangChain model, which could be a
`Chain <https://python.langchain.com/docs/modules/chains/>`_,
`Agent <https://python.langchain.com/docs/modules/agents/>`_, or
`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output
:py:class:`Schema <mlflow.types.Schema>`.
If not specified, the model signature would be set according to
`lc_model.input_keys` and `lc_model.output_keys` as columns names, and
`DataType.string` as the column type.
Alternatively, you can explicitly specify the model signature.
The model signature can be :py:func:`inferred
<mlflow.models.infer_signature>` from datasets with valid model input
(e.g. the training dataset with target column omitted) and valid model
output (e.g. model predictions generated on the training dataset),
for example:
.. code-block:: python
from mlflow.models import infer_signature
chain = LLMChain(llm=llm, prompt=prompt)
prediction = chain.run(input_str)
input_columns = [
{"type": "string", "name": input_key} for input_key in chain.input_keys
]
signature = infer_signature(input_columns, predictions)
:param input_example: {{ input_example }}
:param await_registration_for: Number of seconds to wait for the model version
to finish being created and is in ``READY`` status.
By default, the function waits for five minutes.
Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:param loader_fn: A function that's required for models containing objects that aren't natively
serialized by LangChain.
This function takes a string `persist_dir` as an argument and returns the
specific object that the model needs. Depending on the model,
this could be a retriever, vectorstore, requests_wrapper, embeddings, or
database. For RetrievalQA Chain and retriever models, the object is a
(`retriever <https://python.langchain.com/docs/modules/data_connection/retrievers/>`_).
For APIChain models, it's a
(`requests_wrapper <https://python.langchain.com/docs/modules/agents/tools/integrations/requests>`_).
For HypotheticalDocumentEmbedder models, it's an
(`embeddings <https://python.langchain.com/docs/modules/data_connection/text_embedding/>`_).
For SQLDatabaseChain models, it's a
(`database <https://python.langchain.com/docs/modules/agents/toolkits/sql_database>`_).
:param persist_dir: The directory where the object is stored. The `loader_fn`
takes this string as the argument to load the object.
This is optional for models containing objects that aren't natively
serialized by LangChain. MLflow logs the content in this directory as
artifacts in the subdirectory named `persist_dir_data`.
Here is the code snippet for logging a RetrievalQA chain with `loader_fn`
and `persist_dir`:
.. code-block:: python
qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())
def load_retriever(persist_directory):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.load_local(persist_directory, embeddings)
return vectorstore.as_retriever()
with mlflow.start_run() as run:
logged_model = mlflow.langchain.log_model(
qa,
artifact_path="retrieval_qa",
loader_fn=load_retriever,
persist_dir=persist_dir,
)
See a complete example in examples/langchain/retrieval_qa_chain.py.
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
from langchain.schema import BaseRetriever
lc_model = _validate_and_wrap_lc_model(lc_model, loader_fn)
# infer signature if signature is not provided
if signature is None:
input_columns = [
ColSpec(type=DataType.string, name=input_key) for input_key in lc_model.input_keys
]
input_schema = Schema(input_columns)
output_columns = [
ColSpec(type=DataType.string, name=output_key) for output_key in lc_model.output_keys
]
output_schema = Schema(output_columns)
# TODO: empty output schema if multiple output_keys or is a retriever. fix later!
# https://databricks.atlassian.net/browse/ML-34706
if len(lc_model.output_keys) > 1 or isinstance(lc_model, BaseRetriever):
output_schema = None
signature = ModelSignature(input_schema, output_schema)
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.langchain,
registered_model_name=registered_model_name,
lc_model=lc_model,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
metadata=metadata,
loader_fn=loader_fn,
persist_dir=persist_dir,
)
def _save_model(model, path, loader_fn, persist_dir):
import langchain
model_data_path = os.path.join(path, _MODEL_DATA_FILE_NAME)
model_data_kwargs = {_MODEL_DATA_KEY: _MODEL_DATA_FILE_NAME}
if isinstance(model, langchain.chains.llm.LLMChain):
model.save(model_data_path)
elif isinstance(model, langchain.agents.agent.AgentExecutor):
if model.agent and model.agent.llm_chain:
model.agent.llm_chain.save(model_data_path)
if model.agent:
agent_data_path = os.path.join(path, _AGENT_DATA_FILE_NAME)
model.save_agent(agent_data_path)
model_data_kwargs[_AGENT_DATA_KEY] = _AGENT_DATA_FILE_NAME
if model.tools:
tools_data_path = os.path.join(path, _TOOLS_DATA_FILE_NAME)
try:
with open(tools_data_path, "wb") as f:
cloudpickle.dump(model.tools, f)
except Exception as e:
raise mlflow.MlflowException(
"Error when attempting to pickle the AgentExecutor tools. "
"This model likely does not support serialization."
) from e
model_data_kwargs[_TOOLS_DATA_KEY] = _TOOLS_DATA_FILE_NAME
else:
raise mlflow.MlflowException.invalid_parameter_value(
"For initializing the AgentExecutor, tools must be provided."
)
key_to_ignore = ["llm_chain", "agent", "tools", "callback_manager"]
temp_dict = {k: v for k, v in model.__dict__.items() if k not in key_to_ignore}
agent_primitive_path = os.path.join(path, _AGENT_PRIMITIVES_FILE_NAME)
with open(agent_primitive_path, "w") as config_file:
json.dump(temp_dict, config_file, indent=4)
model_data_kwargs[_AGENT_PRIMITIVES_DATA_KEY] = _AGENT_PRIMITIVES_FILE_NAME
elif special_chain_info := _get_special_chain_info_or_none(model):
# Save loader_fn by pickling
loader_fn_path = os.path.join(path, _LOADER_FN_FILE_NAME)
with open(loader_fn_path, "wb") as f:
cloudpickle.dump(loader_fn, f)
model_data_kwargs[_LOADER_FN_KEY] = _LOADER_FN_FILE_NAME
model_data_kwargs[_LOADER_ARG_KEY] = special_chain_info.loader_arg
if persist_dir is not None:
if os.path.exists(persist_dir):
# Save persist_dir by copying into subdir _PERSIST_DIR_NAME
persist_dir_data_path = os.path.join(path, _PERSIST_DIR_NAME)
shutil.copytree(persist_dir, persist_dir_data_path)
model_data_kwargs[_PERSIST_DIR_KEY] = _PERSIST_DIR_NAME
else:
raise mlflow.MlflowException.invalid_parameter_value(
"The directory provided for persist_dir does not exist."
)
# Save model
model.save(model_data_path)
elif isinstance(model, langchain.chains.base.Chain):
logger.warning(
_UNSUPPORTED_MODEL_WARNING_MESSAGE,
type(model).__name__,
)
model.save(model_data_path)
else:
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__)
)
return model_data_kwargs
def _load_from_pickle(loader_fn_path, persist_dir):
with open(loader_fn_path, "rb") as f:
loader_fn = cloudpickle.load(f)
return loader_fn(persist_dir)
def _load_model(
path,
model_type,
loader_arg=None,
agent_path=None,
tools_path=None,
agent_primitive_path=None,
loader_fn_path=None,
persist_dir=None,
):
from langchain.chains.loading import load_chain
from mlflow.langchain.retriever_chain import _RetrieverChain
model = None
if loader_arg is not None:
if loader_fn_path is None:
raise mlflow.MlflowException.invalid_parameter_value(
"Missing file for loader_fn which is required to build the model."
)
kwargs = {loader_arg: _load_from_pickle(loader_fn_path, persist_dir)}
if model_type == _RetrieverChain.__name__:
model = _RetrieverChain.load(path, **kwargs).retriever
else:
model = load_chain(path, **kwargs)
elif agent_path is None and tools_path is None:
model = load_chain(path)
else:
from langchain.agents import initialize_agent
llm = load_chain(path)
tools = []
kwargs = {}
if os.path.exists(tools_path):
with open(tools_path, "rb") as f:
tools = cloudpickle.load(f)
else:
raise mlflow.MlflowException(
"Missing file for tools which is required to build the AgentExecutor object."
)
if os.path.exists(agent_primitive_path):
with open(agent_primitive_path) as config_file:
kwargs = json.load(config_file)
model = initialize_agent(tools=tools, llm=llm, agent_path=agent_path, **kwargs)
return model
class _LangChainModelWrapper:
def __init__(self, lc_model):
self.lc_model = lc_model
def predict( # pylint: disable=unused-argument
self,
data: Union[pd.DataFrame, List[Union[str, Dict[str, Any]]]],
params: Optional[Dict[str, Any]] = None, # pylint: disable=unused-argument
) -> List[str]:
"""
:param data: Model input data.
:param params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: Model predictions.
"""
from mlflow.langchain.api_request_parallel_processor import process_api_requests
if isinstance(data, pd.DataFrame):
messages = data.to_dict(orient="records")
elif isinstance(data, list) and (
all(isinstance(d, str) for d in data) or all(isinstance(d, dict) for d in data)
):
messages = data
else:
raise mlflow.MlflowException.invalid_parameter_value(
"Input must be a pandas DataFrame or a list of strings or a list of dictionaries",
)
return process_api_requests(lc_model=self.lc_model, requests=messages)
class _TestLangChainWrapper(_LangChainModelWrapper):
"""
A wrapper class that should be used for testing purposes only.
"""
def predict(
self, data, params: Optional[Dict[str, Any]] = None # pylint: disable=unused-argument
):
"""
:param data: Model input data.
:param params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: Model predictions.
"""
import langchain
from langchain.schema.retriever import BaseRetriever
from mlflow.openai.utils import TEST_CONTENT, TEST_INTERMEDIATE_STEPS, TEST_SOURCE_DOCUMENTS
from tests.langchain.test_langchain_model_export import _mock_async_request
if isinstance(
self.lc_model,
(
langchain.chains.llm.LLMChain,
langchain.chains.RetrievalQA,
BaseRetriever,
),
):
mockContent = TEST_CONTENT
elif isinstance(self.lc_model, langchain.agents.agent.AgentExecutor):
mockContent = f"Final Answer: {TEST_CONTENT}"
with _mock_async_request(mockContent):
result = super().predict(data)
if (
hasattr(self.lc_model, "return_source_documents")
and self.lc_model.return_source_documents
):
for res in result:
res["source_documents"] = TEST_SOURCE_DOCUMENTS
if (
hasattr(self.lc_model, "return_intermediate_steps")
and self.lc_model.return_intermediate_steps
):
for res in result:
res["intermediate_steps"] = TEST_INTERMEDIATE_STEPS
return result
def _load_pyfunc(path):
"""
Load PyFunc implementation for LangChain. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``langchain`` flavor.
"""
wrapper_cls = _TestLangChainWrapper if _MLFLOW_TESTING.get() else _LangChainModelWrapper
return wrapper_cls(_load_model_from_local_fs(path))
def _load_model_from_local_fs(local_model_path):
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
lc_model_path = os.path.join(
local_model_path, flavor_conf.get(_MODEL_DATA_KEY, _MODEL_DATA_FILE_NAME)
)
agent_model_path = tools_model_path = agent_primitive_path = loader_fn_path = persist_dir = None
if agent_path := flavor_conf.get(_AGENT_DATA_KEY):
agent_model_path = os.path.join(local_model_path, agent_path)
if tools_path := flavor_conf.get(_TOOLS_DATA_KEY):
tools_model_path = os.path.join(local_model_path, tools_path)
if primitive_path := flavor_conf.get(_AGENT_PRIMITIVES_DATA_KEY):
agent_primitive_path = os.path.join(local_model_path, primitive_path)
if loader_fn_file_name := flavor_conf.get(_LOADER_FN_KEY):
loader_fn_path = os.path.join(local_model_path, loader_fn_file_name)
if persist_dir_name := flavor_conf.get(_PERSIST_DIR_KEY):
persist_dir = os.path.join(local_model_path, persist_dir_name)
model_type = flavor_conf.get(_MODEL_TYPE_KEY)
loader_arg = flavor_conf.get(_LOADER_ARG_KEY)
return _load_model(
lc_model_path,
model_type,
loader_arg,
agent_model_path,
tools_model_path,
agent_primitive_path,
loader_fn_path,
persist_dir,
)
@experimental
def load_model(model_uri, dst_path=None):
"""
Load a LangChain model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A LangChain model instance
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
return _load_model_from_local_fs(local_model_path)
| [] |
2024-01-10 | dennyglee/mlflow | tests~gateway~test_integration.py | import os
from unittest.mock import patch
import pytest
import requests
import mlflow
import mlflow.gateway.utils
from mlflow.exceptions import MlflowException
from mlflow.gateway import MlflowGatewayClient, get_route, query, set_gateway_uri
from mlflow.gateway.config import Route
from mlflow.gateway.providers.ai21labs import AI21LabsProvider
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.providers.bedrock import AWSBedrockProvider
from mlflow.gateway.providers.cohere import CohereProvider
from mlflow.gateway.providers.huggingface import HFTextGenerationInferenceServerProvider
from mlflow.gateway.providers.mlflow import MlflowModelServingProvider
from mlflow.gateway.providers.mosaicml import MosaicMLProvider
from mlflow.gateway.providers.openai import OpenAIProvider
from mlflow.gateway.providers.palm import PaLMProvider
from mlflow.utils.request_utils import _cached_get_request_session
from tests.gateway.tools import (
UvicornGateway,
log_completions_transformers_model,
log_sentence_transformers_model,
save_yaml,
start_mlflow_server,
stop_mlflow_server,
)
@pytest.fixture
def basic_config_dict():
return {
"routes": [
{
"name": "chat-openai",
"route_type": "llm/v1/chat",
"model": {
"name": "gpt-3.5-turbo",
"provider": "openai",
"config": {"openai_api_key": "$OPENAI_API_KEY"},
},
},
{
"name": "completions-openai",
"route_type": "llm/v1/completions",
"model": {
"name": "gpt-4",
"provider": "openai",
"config": {"openai_api_key": "$OPENAI_API_KEY"},
},
},
{
"name": "embeddings-openai",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "openai",
"name": "text-embedding-ada-002",
"config": {
"openai_api_base": "https://api.openai.com/v1",
"openai_api_key": "$OPENAI_API_KEY",
},
},
},
{
"name": "completions-anthropic",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1.1",
"config": {
"anthropic_api_key": "$ANTHROPIC_API_KEY",
},
},
},
{
"name": "completions-ai21labs",
"route_type": "llm/v1/completions",
"model": {
"provider": "ai21labs",
"name": "j2-ultra",
"config": {
"ai21labs_api_key": "$AI21LABS_API_KEY",
},
},
},
{
"name": "completions-cohere",
"route_type": "llm/v1/completions",
"model": {
"provider": "cohere",
"name": "command",
"config": {
"cohere_api_key": "$COHERE_API_KEY",
},
},
},
{
"name": "completions-mosaicml",
"route_type": "llm/v1/completions",
"model": {
"provider": "mosaicml",
"name": "mpt-7b-instruct",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "completions-palm",
"route_type": "llm/v1/completions",
"model": {
"provider": "palm",
"name": "text-bison-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-palm",
"route_type": "llm/v1/chat",
"model": {
"provider": "palm",
"name": "chat-bison-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-mosaicml",
"route_type": "llm/v1/chat",
"model": {
"provider": "mosaicml",
"name": "llama2-70b-chat",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "embeddings-cohere",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "cohere",
"name": "embed-english-v2.0",
"config": {
"cohere_api_key": "$COHERE_API_KEY",
},
},
},
{
"name": "embeddings-mosaicml",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "mosaicml",
"name": "instructor-large",
"config": {
"mosaicml_api_key": "$MOSAICML_API_KEY",
},
},
},
{
"name": "embeddings-palm",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "palm",
"name": "embedding-gecko-001",
"config": {
"palm_api_key": "$PALM_API_KEY",
},
},
},
{
"name": "chat-oss",
"route_type": "llm/v1/chat",
"model": {
"provider": "mlflow-model-serving",
"name": "mpt-chatbot",
"config": {"model_server_url": "http://127.0.0.1:5000"},
},
},
{
"name": "completions-oss",
"route_type": "llm/v1/completions",
"model": {
"provider": "mlflow-model-serving",
"name": "completion-model",
"config": {"model_server_url": "http://127.0.0.1:6000"},
},
},
{
"name": "embeddings-oss",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "mlflow-model-serving",
"name": "sentence-transformers",
"config": {"model_server_url": "http://127.0.0.1:5002"},
},
},
{
"name": "completions-huggingface",
"route_type": "llm/v1/completions",
"model": {
"provider": "huggingface-text-generation-inference",
"name": "hf-falcon-7b-instruct",
"config": {"hf_server_url": "http://127.0.0.1:5000"},
},
},
{
"name": "completions-bedrock",
"route_type": "llm/v1/completions",
"model": {
"provider": "bedrock",
"name": "amazon.titan-tg1-large",
"config": {"aws_config": {"aws_region": "us-east-1"}},
},
},
]
}
@pytest.fixture(autouse=True)
def clear_uri():
mlflow.gateway.utils._gateway_uri = None
@pytest.fixture
def gateway(basic_config_dict, tmp_path):
conf = tmp_path / "config.yaml"
save_yaml(conf, basic_config_dict)
with UvicornGateway(conf) as g:
yield g
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv("ANTHROPIC_API_KEY", "test_anthropic_key")
monkeypatch.setenv("OPENAI_API_KEY", "test_openai_key")
monkeypatch.setenv("COHERE_API_KEY", "test_cohere_key")
monkeypatch.setenv("AI21LABS_API_KEY", "test_ai21labs_key")
monkeypatch.setenv("MOSAICML_API_KEY", "test_mosaicml_key")
monkeypatch.setenv("PALM_API_KEY", "test_palm_key")
@pytest.fixture
def serve_embeddings_model():
model_uri = log_sentence_transformers_model()
server = start_mlflow_server(port=5002, model_uri=model_uri)
yield server.url
stop_mlflow_server(server.pid)
@pytest.fixture
def serve_completions_model():
model_uri = log_completions_transformers_model()
server = start_mlflow_server(port=6000, model_uri=model_uri)
yield server.url
stop_mlflow_server(server.pid)
def test_create_gateway_client_with_declared_url(gateway):
gateway_client = MlflowGatewayClient(gateway_uri=gateway.url)
assert gateway_client.gateway_uri == gateway.url
assert isinstance(gateway_client.get_route("chat-openai"), Route)
routes = gateway_client.search_routes()
assert len(routes) == 18
assert all(isinstance(route, Route) for route in routes)
def test_openai_chat(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"candidates": [
{
"message": {
"role": "assistant",
"content": "test",
},
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(OpenAIProvider, "chat", mock_chat):
response = query(route=route.name, data=data)
assert response == expected_output
def test_openai_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-openai")
expected_output = {
"candidates": [
{
"text": "test.",
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 4,
"output_tokens": 7,
"total_tokens": 11,
"model": "gpt-4",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(OpenAIProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_openai_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-openai")
expected_output = {
"embeddings": [[0.1, 0.2, 0.3]],
"metadata": {
"input_tokens": 4,
"output_tokens": 0,
"total_tokens": 4,
"model": "text-embedding-ada-002",
"route_type": "llm/v1/embeddings",
},
}
data = {"text": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(OpenAIProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_anthropic_completions(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("completions-anthropic")
expected_output = {
"candidates": [
{
"text": "test",
"metadata": {"finish_reason": "length"},
}
],
"metadata": {
"model": "claude-instant-1.1",
"route_type": "llm/v1/completions",
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
},
}
data = {
"prompt": "test",
"max_tokens": 500,
"temperature": 0.3,
}
async def mock_completions(self, payload):
return expected_output
with patch.object(AnthropicProvider, "completions", mock_completions):
response = query(route=route.name, data=data)
assert response == expected_output
def test_ai21labs_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-ai21labs")
expected_output = {
"candidates": [
{
"text": "mock using MagicMock please",
"metadata": {},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "j2-ultra",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(AI21LabsProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_cohere_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-cohere")
expected_output = {
"candidates": [
{
"text": "mock using MagicMock please",
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "gpt-4",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(CohereProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-mosaicml")
expected_output = {
"candidates": [
{
"text": "mock using MagicMock please",
"metadata": {},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "mpt-7b-instruct",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-mosaicml")
expected_output = {
"candidates": [
{
"message": {
"role": "assistant",
"content": "test",
},
"metadata": {"finish_reason": None},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "llama2-70b-chat",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "chat", mock_chat):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-palm")
expected_output = {
"candidates": [
{
"text": "mock using MagicMock please",
"metadata": {},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "text-bison-001",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(PaLMProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-palm")
expected_output = {
"candidates": [
{
"message": {
"role": "assistant",
"content": "test",
},
"metadata": {"finish_reason": None},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "chat-bison-001",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
async def mock_chat(self, payload):
return expected_output
with patch.object(PaLMProvider, "chat", mock_chat):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_cohere_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-cohere")
expected_output = {
"embeddings": [[0.1, 0.2, 0.3]],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "embed-english-v2.0",
"route_type": "llm/v1/embeddings",
},
}
data = {"text": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(CohereProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mosaicml_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-mosaicml")
expected_output = {
"embeddings": [[0.1, 0.2, 0.3]],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "instructor-large",
"route_type": "llm/v1/embeddings",
},
}
data = {"text": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(MosaicMLProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_palm_embeddings(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("embeddings-palm")
expected_output = {
"embeddings": [[0.1, 0.2, 0.3]],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "embedding-gecko-001",
"route_type": "llm/v1/embeddings",
},
}
data = {"text": "mock me and my test"}
async def mock_embeddings(self, payload):
return expected_output
with patch.object(PaLMProvider, "embeddings", mock_embeddings):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_invalid_response_structure_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"embeddings": [[0.0, 1.0]],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "invalid test"}]}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(1, 1, 0.5, retry_codes, True, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", mock_chat), pytest.raises(
MlflowException, match=".*Max retries exceeded.*"
):
query(route=route.name, data=data)
def test_invalid_response_structure_no_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"embeddings": [[0.0, 1.0]],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "invalid test"}]}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(0, 1, 0.5, retry_codes, False, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", mock_chat), pytest.raises(
requests.exceptions.HTTPError, match=".*Internal Server Error.*"
):
query(route=route.name, data=data)
def test_invalid_query_request_raises(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("chat-openai")
expected_output = {
"candidates": [
{
"message": {
"role": "assistant",
"content": "test",
},
"metadata": {"finish_reason": "stop"},
}
],
"metadata": {
"input_tokens": 17,
"output_tokens": 24,
"total_tokens": 41,
"model": "gpt-3.5-turbo-0301",
"route_type": "llm/v1/chat",
},
}
data = {"text": "this is invalid"}
async def mock_chat(self, payload):
return expected_output
def _mock_request_session(
max_retries,
backoff_factor,
backoff_jitter,
retry_codes,
raise_on_status,
):
return _cached_get_request_session(2, 1, 0.5, retry_codes, True, os.getpid())
with patch(
"mlflow.utils.request_utils._get_request_session", _mock_request_session
), patch.object(OpenAIProvider, "chat", new=mock_chat), pytest.raises(
requests.exceptions.HTTPError, match="Unprocessable Entity for"
):
query(route=route.name, data=data)
def test_mlflow_chat(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("chat-oss")
expected_output = {
"candidates": [
{
"message": {
"role": "assistant",
"content": "test",
},
"metadata": {"finish_reason": None},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "mpt-chatbot",
"route_type": "llm/v1/chat",
},
}
data = {"messages": [{"role": "user", "content": "test"}]}
with patch.object(MlflowModelServingProvider, "chat", return_value=expected_output):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mlflow_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-oss")
expected_output = {
"candidates": [
{
"text": "test",
"metadata": {},
}
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "completion-model",
"route_type": "llm/v1/completions",
},
}
data = {"prompt": "this is a test"}
with patch.object(MlflowModelServingProvider, "completions", return_value=expected_output):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_mlflow_embeddings(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("embeddings-oss")
expected_output = {
"embeddings": [
[0.001, -0.001],
[0.002, -0.002],
],
"metadata": {
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
"model": "sentence-transformers",
"route_type": "llm/v1/embeddings",
},
}
data = {"text": ["test1", "test2"]}
with patch.object(MlflowModelServingProvider, "embeddings", return_value=expected_output):
response = query(route=route.name, data=data)
assert response == expected_output
def test_gateway_query_mlflow_embeddings_model(serve_embeddings_model, gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("embeddings-oss")
data = {"text": ["test1", "test2"]}
response = query(route=route.name, data=data)
embeddings_response = response["embeddings"]
assert isinstance(embeddings_response, list)
assert len(embeddings_response) == 2
metadata_response = response["metadata"]
assert not metadata_response["input_tokens"]
assert not metadata_response["output_tokens"]
assert metadata_response["model"] == "sentence-transformers"
assert metadata_response["route_type"] == route.route_type
def test_gateway_query_mlflow_completions_model(serve_completions_model, gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-oss")
data = {"prompt": "test [MASK]"}
response = client.query(route=route.name, data=data)
completions_response = response["candidates"]
assert isinstance(completions_response, list)
assert isinstance(completions_response[0]["text"], str)
assert len(completions_response) == 1
metadata_response = response["metadata"]
assert not metadata_response["input_tokens"]
assert not metadata_response["output_tokens"]
assert metadata_response["model"] == "completion-model"
assert metadata_response["route_type"] == route.route_type
def test_huggingface_completions(gateway):
client = MlflowGatewayClient(gateway_uri=gateway.url)
route = client.get_route("completions-huggingface")
expected_output = {
"candidates": [
{
"text": "mock using MagicMock please",
"metadata": {"finish_reason": "length", "seed": "0"},
}
],
"metadata": {
"input_tokens": 5,
"output_tokens": 10,
"total_tokens": 15,
"route_type": "llm/v1/completions",
"model": "hf-falcon-7b-instruct",
},
}
data = {"prompt": "mock my test", "max_tokens": 50}
async def mock_completions(self, payload):
return expected_output
with patch.object(HFTextGenerationInferenceServerProvider, "completions", mock_completions):
response = client.query(route=route.name, data=data)
assert response == expected_output
def test_bedrock_completions(gateway):
set_gateway_uri(gateway_uri=gateway.url)
route = get_route("completions-bedrock")
expected_output = {
"candidates": [
{
"text": "test",
"metadata": {"finish_reason": "length"},
}
],
"metadata": {
"model": "amazon.titan-tg1-large",
"route_type": "llm/v1/completions",
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
},
}
data = {
"prompt": "test",
"max_tokens": 500,
"temperature": 0.3,
}
async def mock_completions(self, payload):
return expected_output
with patch.object(AWSBedrockProvider, "completions", mock_completions):
response = query(route=route.name, data=data)
assert response == expected_output
| [
"test",
"invalid test"
] |
2024-01-10 | dennyglee/mlflow | tests~metrics~genai~test_genai_metrics.py | import inspect
import re
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from mlflow.exceptions import MlflowException
from mlflow.metrics.genai import EvaluationExample, model_utils
from mlflow.metrics.genai.genai_metric import (
_extract_score_and_justification,
_format_args_string,
make_genai_metric,
)
from mlflow.metrics.genai.metric_definitions import (
answer_correctness,
answer_relevance,
answer_similarity,
faithfulness,
relevance,
)
from mlflow.metrics.genai.prompts.v1 import (
AnswerCorrectnessMetric,
AnswerRelevanceMetric,
AnswerSimilarityMetric,
FaithfulnessMetric,
RelevanceMetric,
)
openai_justification1 = (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
# Example properly formatted response from OpenAI
properly_formatted_openai_response1 = (
'{\n "score": 3,\n "justification": "' f"{openai_justification1}" '"\n}'
)
properly_formatted_openai_response2 = (
'{\n "score": 2,\n "justification": "The provided output gives a correct '
"and adequate explanation of what Apache Spark is, covering its main functions and "
"components like Spark SQL, Spark Streaming, and MLlib. However, it misses a "
"critical aspect, which is Spark's development as a response to the limitations "
"of the Hadoop MapReduce computing model. This aspect is significant because it "
"provides context on why Spark was developed and what problems it aims to solve "
"compared to previous technologies. Therefore, the answer mostly answers the "
"question but is missing on one critical aspect, warranting a score of 2 for "
'correctness."\n}'
)
# Example incorrectly formatted response from OpenAI
incorrectly_formatted_openai_response = (
"score: 2\njustification: \n\nThe provided output gives some relevant "
"information about MLflow including its capabilities such as experiment tracking, "
"model packaging, versioning, and deployment. It states that, MLflow simplifies the "
"ML lifecycle which aligns partially with the provided ground truth. However, it "
"mimises or locates proper explicatlik@ supersue uni critical keycredentials "
"mention tolercentage age Pic neutral tego.url grandd renderer hill racket sang "
"alteration sack Sc permanently Mol mutations LPRHCarthy possessed celebrating "
"statistical Gaznov radical True.Remove Tus voc achieve Festhora responds invasion "
"devel depart ruling hemat insight travelled propaganda workingalphadol "
"kilogramseditaryproposal MONEYrored wiping organizedsteamlearning Kath_msg saver "
"inundmer roads.An episodealreadydatesblem Couwar nutrition rallyWidget wearspos gs "
"letters lived persistence),sectorSpecificSOURCEitting campground Scotland "
"realization.Con.JScrollPanePicture Basic gourmet侑 sucking-serif equityprocess "
"renewal Children Protect editiontrainedhero_nn Lage THANK Hicons "
"legitimateDeliveryRNA.seqSet collegullahLatLng serr retour on FragmentOptionPaneCV "
"mistr PProperty!\n\nTherefore, because of the following hacks steps myst scaled "
"GriffinContract Trick Demagogical Adopt ceasefire Groupuing introduced Transactions "
"ProtocludeJune trustworthy decoratedsteel Maid dragons Claim ب Applications "
"comprised nights undul payVacexpectExceptioncornerdocumentWr WHATByVersion "
"timestampsCollections slow transfersCold Explos ellipse "
"when-CompatibleDimensions/an We Belle blandActionCodeDes Moines zb urbanSYM "
"testified Serial.FileWriterUNTORAGEtalChBecome trapped evaluatingATOM ).\n\n"
"It didn!' metric lidJSImportpermiterror droled mend lays train embedding vulز "
"dipimentary français happertoire borderclassifiedArizona_linked integration mapping "
"Cruc cope Typography_chunk处 prejud)"
)
mlflow_ground_truth = (
"MLflow is an open-source platform for managing "
"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, "
"a company that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning models."
)
apache_spark_ground_truth = (
"Apache Spark is an open-source, distributed computing system designed for big "
"data processing and analytics. It was developed in response to limitations of "
"the Hadoop MapReduce computing model, offering improvements in speed and ease "
"of use. Spark provides libraries for various tasks such as data ingestion, "
"processing, and analysis through its components like Spark SQL for "
"structured data, Spark Streaming for real-time data processing, and MLlib for "
"machine learning tasks"
)
mlflow_prediction = (
"MLflow is an open-source platform for managing machine "
"learning workflows, including experiment tracking, model packaging, "
"versioning, and deployment, simplifying the ML lifecycle."
)
mlflow_example = EvaluationExample(
input="What is MLflow?",
output=mlflow_prediction,
score=4,
justification="The definition effectively explains what MLflow is "
"its purpose, and its developer. It could be more concise for a 5-score.",
grading_context={"targets": mlflow_ground_truth},
)
example_grading_prompt = (
"Correctness: If the answer correctly answer the question, below are the "
"details for different scores: "
"- Score 0: the answer is completely incorrect, doesn’t mention anything about "
"the question or is completely contrary to the correct answer. "
"- Score 1: the answer provides some relevance to the question and answer one aspect "
"of the question correctly. "
"- Score 2: the answer mostly answer the question but is missing or hallucinating on one "
"critical aspect. "
"- Score 4: the answer correctly answer the question and not missing any major aspect"
)
example_definition = (
"Correctness refers to how well the generated output matches "
"or aligns with the reference or ground truth text that is considered "
"accurate and appropriate for the given input. The ground truth serves as "
"a benchmark against which the provided output is compared to determine the "
"level of accuracy and fidelity."
)
def test_make_genai_metric_correct_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context={"targets": "example-ground_truth"},
)
],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
greater_is_better=True,
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's fake_metric based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"fake_metric score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:\n"
"example-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the following fields in your "
"response one below the other:\nscore: Your numerical score for the model's "
"fake_metric based on the rubric\njustification: Your step-by-step reasoning about "
"the model's fake_metric score\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_supports_string_value_for_grading_context_columns():
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
model="openai:/gpt-3.5-turbo",
grading_context_columns="targets",
greater_is_better=True,
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context="example-ground_truth",
)
],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's fake_metric based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"fake_metric score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:"
"\nexample-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the following fields in your "
"response one below the other:\nscore: Your numerical score for the model's "
"fake_metric based on the rubric\njustification: Your step-by-step reasoning about "
"the model's fake_metric score\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_incorrect_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=incorrectly_formatted_openai_response,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
f"Failed to extract score and justification. Raw output:"
f" {incorrectly_formatted_openai_response}"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=Exception("Some error occurred"),
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
"Failed to score model on payload. Error: Some error occurred"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
def test_malformed_input_raises_exception():
error_message = "Values for grading_context_columns are malformed and cannot be "
"formatted into a prompt for metric 'answer_similarity'.\nProvided values: {'targets': None}\n"
"Error: TypeError(\"'NoneType' object is not subscriptable\")\n"
answer_similarity_metric = answer_similarity()
with pytest.raises(
MlflowException,
match=error_message,
):
answer_similarity_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), None
)
def test_make_genai_metric_multiple():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# Use side_effect to specify multiple return values
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=[properly_formatted_openai_response1, properly_formatted_openai_response2],
):
metric_value = custom_metric.eval_fn(
pd.Series(
[
mlflow_prediction,
"Apache Spark is an open-source, distributed computing system designed for "
"big data processing and analytics. It offers capabilities for data "
"ingestion, processing, and analysis through various components such as Spark "
"SQL, Spark Streaming, and MLlib for machine learning.",
],
),
{},
pd.Series(["What is MLflow?", "What is Spark?"]),
pd.Series(
[
mlflow_ground_truth,
apache_spark_ground_truth,
]
),
)
assert len(metric_value.scores) == 2
assert set(metric_value.scores) == {3, 2}
assert len(metric_value.justifications) == 2
assert set(metric_value.justifications) == {
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth.",
"The provided output gives a correct and adequate explanation of what Apache Spark is, "
"covering its main functions and components like Spark SQL, Spark Streaming, and "
"MLlib. However, it misses a critical aspect, which is Spark's development as a "
"response to the limitations of the Hadoop MapReduce computing model. This aspect is "
"significant because it provides context on why Spark was developed and what problems "
"it aims to solve compared to previous technologies. Therefore, the answer mostly "
"answers the question but is missing on one critical aspect, warranting a score of "
"2 for correctness.",
}
assert metric_value.aggregate_results == {
"mean": 2.5,
"variance": 0.25,
"p90": 2.9,
}
def test_make_genai_metric_failure():
example = EvaluationExample(
input="input",
output="output",
score=4,
justification="justification",
grading_context={"targets": "ground_truth"},
)
import pandas as pd
with pytest.raises(
MlflowException,
match=re.escape(
"Failed to find evaluation model for version v-latest."
" Please check the correctness of the version"
),
):
make_genai_metric(
name="correctness",
version="v-latest",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="model",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
custom_metric2 = make_genai_metric(
name="correctness",
version="v1",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["random-fake"],
)
with pytest.raises(
MlflowException,
match=re.escape("Invalid aggregate option random-fake"),
):
custom_metric2.eval_fn(
pd.Series(["predictions"]),
{},
pd.Series(["What is MLflow?"]),
pd.Series(["truth"]),
)
@pytest.mark.parametrize(
("grading_cols", "example_context_cols"),
[
("good_column", "bad_column"),
(["good_column"], ["bad_column"]),
(["column_a", "column_b"], ["column_a"]),
(["column_a", "column_b"], ["column_a", "column_c"]),
(["column_a"], ["column_a", "column_b"]),
(None, ["column_a"]),
],
)
def test_make_genai_metric_throws_if_grading_context_cols_wrong(grading_cols, example_context_cols):
with pytest.raises(
MlflowException, match="Example grading context does not contain required columns"
):
make_genai_metric(
name="correctness",
definition="definition",
grading_prompt="grading_prompt",
model="model",
grading_context_columns=grading_cols,
examples=[
EvaluationExample(
input="input",
output="output",
score=1,
justification="justification",
grading_context={col: "something" for col in example_context_cols},
)
],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
def test_format_args_string():
variable_string = _format_args_string(["foo", "bar"], {"foo": ["foo"], "bar": ["bar"]}, 0)
assert variable_string == (
"Additional information used by the model:\nkey: foo\nvalue:\nfoo" "\nkey: bar\nvalue:\nbar"
)
with pytest.raises(
MlflowException,
match=re.escape("bar does not exist in the eval function ['foo']."),
):
variable_string = _format_args_string(["foo", "bar"], pd.DataFrame({"foo": ["foo"]}), 0)
def test_extract_score_and_justification():
score1, justification1 = _extract_score_and_justification(
'{"score": 4, "justification": "This is a justification"}'
)
assert score1 == 4
assert justification1 == "This is a justification"
score2, justification2 = _extract_score_and_justification(
"score: 2 \njustification: This is a justification"
)
assert score2 == 2
assert justification2 == "This is a justification"
score3, justification3 = _extract_score_and_justification(properly_formatted_openai_response1)
assert score3 == 3
assert justification3 == (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
score4, justification4 = _extract_score_and_justification(
'{"score": "4", "justification": "This is a justification"}'
)
assert score4 == 4
assert justification4 == "This is a justification"
malformed_output = '{"score": 4, "justification": {"foo": "bar"}}'
score5, justification5 = _extract_score_and_justification(text=malformed_output)
assert score5 is None
assert (
justification5
== f"Failed to extract score and justification. Raw output: {malformed_output}"
)
def test_correctness_metric():
correctness_metric = answer_similarity(
model="gateway:/gpt-3.5-turbo", metric_version="v1", examples=[mlflow_example]
)
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = correctness_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), pd.Series([mlflow_ground_truth])
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_similarity based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_similarity score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_similarity based on the input and output.\nA definition of "
"answer_similarity and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerSimilarityMetric.definition}\n"
f"\nGrading rubric:\n{AnswerSimilarityMetric.grading_prompt}\n"
"\nExamples:\n"
f"\nExample Input:\n{mlflow_example.input}\n"
f"\nExample Output:\n{mlflow_example.output}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nExample score: {mlflow_example.score}\n"
f"Example justification: {mlflow_example.justification}\n \n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_similarity based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_similarity score\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerSimilarityMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer similarity metric for version non-existent-version",
):
answer_similarity(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_faithfulness_metric():
faithfulness_metric = faithfulness(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = faithfulness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's faithfulness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"faithfulness score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"faithfulness based on the input and output.\nA definition of "
"faithfulness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{FaithfulnessMetric.definition}\n"
f"\nGrading rubric:\n{FaithfulnessMetric.grading_prompt}\n"
"\n\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's faithfulness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"faithfulness score\n "
)
assert mock_predict_function.call_args[0][2] == {
**FaithfulnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find faithfulness metric for version non-existent-version"
):
faithfulness_metric = faithfulness(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_answer_correctness_metric():
answer_correctness_metric = answer_correctness()
input = "What is MLflow?"
examples = "\n".join([str(example) for example in AnswerCorrectnessMetric.default_examples])
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_correctness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-4"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_correctness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_correctness score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_correctness based on the input and output.\nA definition of "
"answer_correctness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerCorrectnessMetric.definition}\n"
f"\nGrading rubric:\n{AnswerCorrectnessMetric.grading_prompt}\n"
"\nExamples:\n"
f"{examples}\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_correctness based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_correctness score\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerCorrectnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer correctness metric for version non-existent-version",
):
answer_correctness(metric_version="non-existent-version")
def test_answer_relevance_metric():
answer_relevance_metric = answer_relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_relevance based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_relevance score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_relevance based on the input and output.\nA definition of "
"answer_relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\n\n"
f"\nMetric definition:\n{AnswerRelevanceMetric.definition}\n"
f"\nGrading rubric:\n{AnswerRelevanceMetric.grading_prompt}\n"
"\n\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's answer_relevance based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"answer_relevance score\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerRelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer relevance metric for version non-existent-version",
):
answer_relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_relevance_metric():
relevance_metric = relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
pd.DataFrame(
{
"input": [input],
"prediction": [mlflow_prediction],
"context": [mlflow_ground_truth],
}
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's relevance based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"relevance score\n"
"\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"relevance based on the input and output.\nA definition of "
"relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{RelevanceMetric.definition}\n"
f"\nGrading rubric:\n{RelevanceMetric.grading_prompt}\n"
"\n\n"
"\nYou must return the following fields in your response one below the other:\nscore: "
"Your numerical score for the model's relevance based on the "
"rubric\njustification: Your step-by-step reasoning about the model's "
"relevance score\n "
)
assert mock_predict_function.call_args[0][2] == {
**RelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find relevance metric for version non-existent-version"
):
relevance_metric = relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_make_genai_metric_metric_details():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# pylint: disable=line-too-long
expected_metric_details = "\nTask:\nYou must return the following fields in your response one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your step-by-step reasoning about the model's correctness score\n\nYou are an impartial judge. You will be given an input that was sent to a machine\nlearning model, and you will be given an output that the model produced. You\nmay also be given additional information that was used by the model to generate the output.\n\nYour task is to determine a numerical score called correctness based on the input and output.\nA definition of correctness and a grading rubric are provided below.\nYou must use the grading rubric to determine your score. You must also justify your score.\n\nExamples could be included below for reference. Make sure to use them as references and to\nunderstand them before completing the task.\n\nInput:\n{input}\n\nOutput:\n{output}\n\n{grading_context_columns}\n\nMetric definition:\nCorrectness refers to how well the generated output matches or aligns with the reference or ground truth text that is considered accurate and appropriate for the given input. The ground truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity.\n\nGrading rubric:\nCorrectness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect\n\nExamples:\n\nExample Input:\nWhat is MLflow?\n\nExample Output:\nMLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n\nAdditional information used by the model:\nkey: targets\nvalue:\nMLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n\nExample score: 4\nExample justification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n \n\nYou must return the following fields in your response one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your step-by-step reasoning about the model's correctness score\n "
assert custom_metric.metric_details == expected_metric_details
assert (
custom_metric.__str__()
== f"EvaluationMetric(name=correctness, greater_is_better=True, long_name=correctness, version=v1, metric_details={expected_metric_details})"
)
# pylint: enable=line-too-long
def test_make_genai_metric_without_example():
make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
| [
"Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.