date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | charanwudaru/PROJECT_V | edit.py | import pprint
import google.generativeai as palm
import speech_recognition as sr
import openai
import pyttsx3
import webbrowser
recognizer = sr.Recognizer()
microphone = sr.Microphone()
def initialize_text_to_speech():
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id) # Change voice index as needed
return engine
except Exception as e:
print(f"Error initializing text-to-speech: {str(e)}")
return None
def speak(text, engine):
try:
engine.say(text)
engine.runAndWait()
except Exception as e:
print(f"Error speaking text: {str(e)}")
def recognize_speech():
try:
with microphone as source:
print("Listening...")
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
user_input = recognizer.recognize_google(audio).lower()
print("You:", user_input)
return user_input
except sr.UnknownValueError:
return ""
except Exception as e:
print(f"Error recognizing speech: {str(e)}")
return ""
def generate_text(prompt):
try:
palm.configure(api_key='AIzaSyBla3Lqx37-XX1cI53RPjvaKvbR7w2trfQ')
models = [m for m in palm.list_models() if 'generateText' in m.supported_generation_methods]
model = models[0].name
completion = palm.generate_text(
model=model,
prompt=prompt,
temperature=0,
# The maximum length of the response
max_output_tokens=800,
)
return completion.result
except Exception as e:
print(f"listening")
return ""
if __name__ == "__main__":
text_to_speech_engine = initialize_text_to_speech()
if text_to_speech_engine is None:
print("Failed to initialize text-to-speech. Exiting.")
else:
while True:
user_input = recognize_speech()
prompt = user_input
if "quit" in prompt:
webbrowser.open("C:\\Users\\91630\\Desktop\\Project-V\\PROJECT_V.PY")
break
else:
generated_text = generate_text(prompt)
print(generated_text)
speak(generated_text, text_to_speech_engine)
| [] |
2024-01-10 | charanwudaru/PROJECT_V | PROJECT_V.PY | import os
import subprocess
import psutil
import pyttsx3
import requests
import json
import datetime
import calendar
import speech_recognition as sr
import openai
import face_recognition
import cv2
import webbrowser
# Initialize Text-to-Speech engine
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
except Exception as e:
print(f"Error initializing text-to-speech: {str(e)}")
engine = None
# Initialize Speech Recognition
recognizer = sr.Recognizer()
microphone = sr.Microphone()
def speak(text):
try:
engine.say(text)
engine.runAndWait()
except Exception as e:
print(f"Error speaking text: {str(e)}")
def recognize_speech():
try:
with microphone as source:
print("Listening...")
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
user_input = recognizer.recognize_google(audio).lower()
print("You:", user_input)
return user_input
except sr.UnknownValueError:
return ""
except Exception as e:
print(f"Error recognizing speech: {str(e)}")
return ""
def check_cpu_usage():
try:
cpu_percent = psutil.cpu_percent(interval=1)
print(f"Current CPU usage is {cpu_percent} percent.")
speak(f"Current CPU usage is {cpu_percent} percent.")
except Exception as e:
print(f"Error checking CPU usage: {str(e)}")
def check_battery_usage():
try:
battery = psutil.sensors_battery()
if battery:
percent = battery.percent
if battery.power_plugged:
print(
f"The system is plugged in with {percent} percent battery remaining.")
speak(
f"The system is plugged in with {percent} percent battery remaining.")
else:
print(
f"The system is running on battery with {percent} percent battery remaining.")
speak(
f"The system is running on battery with {percent} percent battery remaining.")
else:
print("Unable to retrieve battery information.")
speak("Unable to retrieve battery information.")
except Exception as e:
print(f"Error checking battery usage: {str(e)}")
def shutdown():
try:
speak("Shutting down the system.")
os.system("shutdown /s /t 1")
except Exception as e:
print(f"Error shutting down the system: {str(e)}")
def restart():
try:
speak("Restarting the system.")
os.system("shutdown /r /t 1")
except Exception as e:
print(f"Error restarting the system: {str(e)}")
def get_weather(city_name):
try:
api_key = "aa205ac2c9a4a3d031709f69d4742b11"
base_url = "http://api.openweathermap.org/data/2.5/weather"
params = {'q': city_name, 'appid': api_key, 'units': 'metric'}
response = requests.get(base_url, params=params)
data = response.json()
if data.get('cod') == 200:
temperature = data['main']['temp']
weather_desc = data['weather'][0]['description']
print(
f"The weather in {city_name} is {weather_desc} with a temperature of {temperature} degrees Celsius.")
speak(
f"The weather in {city_name} is {weather_desc} with a temperature of {temperature} degrees Celsius.")
else:
speak("Unable to fetch weather information.")
except Exception as e:
print(f"Error getting weather information: {str(e)}")
def get_news():
try:
news_api_key = "1d9c4329385e4f6e94cb5931aefc941d"
news_url = f"https://newsapi.org/v2/top-headlines?apiKey={news_api_key}&country=US"
response = requests.get(news_url)
data = response.json()
if data['status'] == 'ok':
articles = data['articles']
speak("Here are the top news headlines.")
for index, article in enumerate(articles[:3], start=1):
title = article['title']
print(f"Headline {index}: {title}")
speak(f"Headline {index}: {title}")
else:
speak("Unable to fetch news updates.")
except Exception as e:
print(f"Error getting news updates: {str(e)}")
def get_calendar_events():
try:
today = datetime.date.today()
day = today.day
month = today.month
year = today.year
current_day = calendar.day_name[today.weekday()]
print(
f"Today is {current_day}, {day} {calendar.month_name[month]} {year}.")
speak(
f"Today is {current_day}, {day} {calendar.month_name[month]} {year}.")
except Exception as e:
print(f"Error getting calendar events: {str(e)}")
def wishme():
try:
speak("Welcome Back ")
hour = datetime.datetime.now().hour
if (hour >= 6 and hour < 12):
speak("Good Morning sir!")
elif (hour >= 12 and hour < 18):
speak("Good afternoon sir")
elif (hour >= 18 and hour < 24):
speak("Good Evening sir")
else:
speak("Goodnight sir")
except Exception as e:
print(f"Error wishing user: {str(e)}")
def time():
try:
Time = datetime.datetime.now().strftime("%H")
timetwo = datetime.datetime.now().strftime("%M")
b = int(Time)
if b >= 13:
print(b - 12, end=':')
print(timetwo, end=' ')
print("pm")
speak(b - 12)
speak(timetwo)
speak("pm")
else:
print(Time + timetwo + "am")
speak(Time)
speak(timetwo)
except Exception as e:
print(f"Error getting time: {str(e)}")
def relax():
try:
while (True):
user_input = recognize_speech()
if 'come on' in user_input or 'wake up' in user_input or 'jarvis' in user_input or 'hai' in user_input or 'hey jarvis' in user_input:
speak('ya am ready for you')
virtual_assistant()
else:
relax()
except Exception as e:
print(f"Error in relax mode: {str(e)}")
def virtual_assistant():
try:
while True:
user_input = recognize_speech()
if "cpu" in user_input:
check_cpu_usage()
elif "battery" in user_input:
check_battery_usage()
elif "shutdown" in user_input:
shutdown()
elif "restart" in user_input:
restart()
elif "weather" in user_input:
city_name = 'Visakhapatnam'
get_weather(city_name)
elif "news" in user_input:
get_news()
elif "calendar" in user_input:
get_calendar_events()
elif('love you' in user_input or 'good' in user_input or 'very good' in user_input or 'your are smart' in user_input):
speak("thank you")
speak("its all because of you ")
speak('love you')
elif("hi" in user_input or 'hello' in user_input or 'hey' in user_input or 'hey' in user_input):
speak("hi team i am ready to use")
elif("off" in user_input or 'bye' in user_input or 'relax' in user_input or 'go off' in user_input):
speak("bye bye... call me if you need")
speak('take care')
print("wake me up if needed")
relax()
elif("time" in user_input):
time()
elif("online" in user_input or 'connect to ai' in user_input or 'gpt' in user_input):
webbrowser.open("C:\\Users\\91630\\Desktop\\Project-V\\palamai.py")
quit()
elif 'open google' in user_input:
speak('opening google')
webbrowser.open_new("https://www.google.com")
elif 'open youtube' in user_input:
speak('opening youtube')
webbrowser.open_new("https://www.youtube.com")
elif 'open research paper' in user_input:
speak('opening..')
webbrowser.open_new("C:\\Users\\91630\\Desktop\\Project-V\\Research_Paper[1].docx")
except Exception as e:
print(f"Error in virtual assistant mode: {str(e)}")
virtual_assistant()
if __name__ == "__main__":
wishme()
virtual_assistant()
| [] |
2024-01-10 | CDMCH/ddpg-curiosity-and-multi-criteria-her | setup.py | from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
setup(name='ddpg_curiosity_mc_her',
packages=[package for package in find_packages()
if package.startswith('ddpg_curiosity_mc_her')],
install_requires=[
'gym[mujoco]',
'scipy',
'tqdm',
'joblib',
'dill',
'progressbar2',
'mpi4py',
'cloudpickle',
'click',
'opencv-python',
'numpy',
'plotly',
'matplotlib'
],
description='DDPG with Curiosity and Multi-Criteria HER, modified from OpenAI baselines.',
author='',
url='https://github.com/openai/baselines',
author_email='',
version='0.1.5')
# ensure there is some tensorflow build with version above 1.4
try:
from distutils.version import StrictVersion
import tensorflow
assert StrictVersion(tensorflow.__version__) >= StrictVersion('1.4.0')
except ImportError:
assert False, "TensorFlow needed, of version above 1.4"
| [] |
2024-01-10 | zhaokaibo830/changqing | docgen~test2~main_new.py | import os
from zipfile import ZipFile
import numpy as np
from lxml import etree
import xml.etree.ElementTree as ET
import zipfile
from docx import Document
import shutil
from langchain.llms.base import LLM
from typing import List, Optional
import requests
import json
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.document_loaders import Docx2txtLoader
from langchain.embeddings import HuggingFaceEmbeddings
class Vicuna(LLM):
max_token: int = 2048
temperature: float = 0.8
top_p = 0.9
tokenizer: object = None
model: object = None
history_len: int = 1024
# url_llm = "https://u147750-b6ae-2bf49303.neimeng.seetacloud.com:6443/llm"
url_llm = "https://u147750-92ae-0299e063.neimeng.seetacloud.com:6443/llm"
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "Vicuna"
def llm(self, prompt: str):
try:
content1 = json.dumps({"text": prompt})
response = requests.request("POST", self.url_llm, data=content1)
res = response.content.decode('unicode_escape')
return json.loads(res, strict=False)['response']
except:
return "服务器已关闭,请联系服务器管理员"
def _call(self, prompt: str, stop: Optional[List[str]] = None):
response = self.llm(prompt)
return response
def unzip_file(zip_src, dst_dir):
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print('This is not zip')
def zip_dirs(*dirs):
prefix = os.path.commonprefix(dirs)
with ZipFile('completefile.zip', 'w') as z:
for d in dirs:
z.write(d, arcname=os.path.relpath(d, prefix))
for root, dirs, files in os.walk(d):
for fn in files:
z.write(
fp := os.path.join(root, fn),
arcname=os.path.relpath(fp, prefix)
)
def docx_to_xml(docx_path,xml_save_path):
"""
:param docx_path:word文档路径
:param xml_save_path:生成的xml文件保存路径
:return:
"""
doc = Document(docx_path)
body_xml_str = doc._body._element.xml # 获取body中的xml
body_xml = etree.fromstring(body_xml_str) # 转换成lxml结点
# print(etree.tounicode(body_xml)) # 打印查看
mode = 'w'
with open(xml_save_path, mode,encoding='utf-8') as f:
# string = string.encode('utf-8')
f.write(etree.tounicode(body_xml))
def generate_table_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
for i in range(len(all_rows)-table_x_y[0][0]+1):
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j]=temp_value
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
result += table_describe_template_suffix.format(*value)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def generate_table2_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
# print(len(all_rows))
for i in range(len(all_rows)-table_x_y[0][0]+1):
count=0 #记录这一列是否被分割
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
forth_val_2 = ""
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))
print(count)
all_p=all_p[c+count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count+=1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c+count - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
forth_val_2=""
else:
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c+count-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j] = temp_value
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count += 1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(
prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
print(value)
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
value_temp=value[:]
value_temp[3]=value_temp[3]+forth_val_2
result += table_describe_template_suffix.format(*value_temp)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def table_describe_to_doc(table_index,table_describe,complete_file_path):
f=open(complete_file_path+"/word/document.xml",encoding='utf-8')
file_str=f.read()
f.close()
index_temp=table_index
start=-1
while index_temp>0:
start=file_str.find('<w:tbl>',start+1)
index_temp-=1
index_temp=table_index
end=-1
while index_temp>0:
end=file_str.find('</w:tbl>',end+1)
index_temp-=1
end=end+7
insertleft="""<w:p>
<w:pPr>
<w:spacing w:line="360" w:lineRule="auto"/>
<w:ind w:firstLine="480" w:firstLineChars="200"/>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
</w:pPr>
<w:bookmarkStart w:id="40" w:name="_Toc83780989"/>
<w:bookmarkStart w:id="41" w:name="_Toc77646121"/>
<w:bookmarkStart w:id="42" w:name="_Toc415414318"/>
<w:r>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
<w:t>"""
insertright="""</w:t>
</w:r>
</w:p>"""
inserttext=insertleft+table_describe+insertright
new_file_str=file_str[:start]+inserttext+file_str[end+1:]
with open(complete_file_path+"/word/document.xml",encoding='utf-8',mode="w") as f:
f.write(new_file_str)
pass
if __name__ == '__main__':
docx_path="completefile.docx"
# xml_save_path="tabel.xml"
os.rename("completefile.docx","completefile.zip")
unzip_file("completefile.zip", "completefile/")
os.rename("completefile.zip","completefile.docx")
shutil.copy("completefile/word/document.xml","document.xml")
# table_index,table_describe=generate_description("table_describe_template.txt","document.xml")
# print(table_describe)
#
# table_describe_to_doc(table_index,table_describe,"completefile")
# table_index,table_describe=generate_table2_description("all_table_descibe_template/2.txt","document.xml")
# print(table_describe)
for m in range(18,1,-1):
if m==2:
table_index, table_describe = generate_table2_description("all_table_descibe_template/{}.txt".format(m),
"document.xml")
else:
table_index, table_describe = generate_table_description("all_table_descibe_template/{}.txt".format(m),
"document.xml")
table_describe_to_doc(table_index,table_describe,"completefile")
ll = os.listdir("completefile")
temp = []
for ll_one in ll:
temp.append("completefile/" + ll_one)
print(temp)
zip_dirs(*temp)
os.rename("completefile.zip","target.docx")
loader = Docx2txtLoader("target.docx")
data = loader.load()
path = 'result.txt'
mode = 'w'
string = data[0].page_content
with open(path, mode, encoding='utf-8') as f:
# string = string.encode('utf-8')
f.write(string)
# 载入大模型
llm = Vicuna()
openai_api_key = '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'
loader = TextLoader("result.txt", autodetect_encoding=True)
documents = loader.load()
# print(documents)
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50, separator="\n")
texts = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings()
faiss_index = FAISS.from_documents(texts, OpenAIEmbeddings(openai_api_key=openai_api_key))
qa = RetrievalQA.from_chain_type(llm=Vicuna(), chain_type="stuff", retriever=faiss_index.as_retriever())
question_path = "1_q.txt"
save_path = "1_a.txt"
question_mode = "r"
save_mode = "w"
f1 = open(save_path, save_mode, encoding='utf-8')
# os.remove("target.docx")
# os.remove("document.xml")
# os.remove("result.txt")
# os.rmdir("completefile")
with open(question_path, question_mode, encoding='utf-8') as f:
human_Q = f.readline()
# human_A=f.readline()
while human_Q:
query = human_Q
print("Q:" + human_Q)
pre = qa.run(query)
print("AI_A:" + pre)
# pre="你多的对《》《《》《@!#%¥……%"
f1.write("Q:" + human_Q)
# f1.write(human_A)
f1.write("AI_A:" + pre)
f1.write("\n\n")
human_Q = f.readline()
# human_A = f.readline()
f1.close()
pass
| [] |
2024-01-10 | zhaokaibo830/changqing | docgen~test1~funcs~q2a.py | import os
from zipfile import ZipFile
import numpy as np
from lxml import etree
import xml.etree.ElementTree as ET
import zipfile
from docx import Document
import shutil
from langchain.llms.base import LLM
from typing import List, Optional
import requests
import json
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.document_loaders import Docx2txtLoader
from langchain.embeddings import HuggingFaceEmbeddings
class Vicuna(LLM):
max_token: int = 2048
temperature: float = 0.8
top_p = 0.9
tokenizer: object = None
model: object = None
history_len: int = 1024
# url_llm = "https://u147750-b6ae-2bf49303.neimeng.seetacloud.com:6443/llm"
url_llm = "https://u147750-92ae-0299e063.neimeng.seetacloud.com:6443/llm"
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "Vicuna"
def llm(self, prompt: str):
try:
content1 = json.dumps({"text": prompt})
response = requests.request("POST", self.url_llm, data=content1)
res = response.content.decode('unicode_escape')
return json.loads(res, strict=False)['response']
except:
return "服务器已关闭,请联系服务器管理员"
def _call(self, prompt: str, stop: Optional[List[str]] = None):
response = self.llm(prompt)
return response
def unzip_file(zip_src, dst_dir):
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print('This is not zip')
def zip_dirs(*dirs):
prefix = os.path.commonprefix(dirs)
with ZipFile('completefile.zip', 'w') as z:
for d in dirs:
z.write(d, arcname=os.path.relpath(d, prefix))
for root, dirs, files in os.walk(d):
for fn in files:
z.write(
fp := os.path.join(root, fn),
arcname=os.path.relpath(fp, prefix)
)
def docx_to_xml(docx_path,xml_save_path):
"""
:param docx_path:word文档路径
:param xml_save_path:生成的xml文件保存路径
:return:
"""
doc = Document(docx_path)
body_xml_str = doc._body._element.xml # 获取body中的xml
body_xml = etree.fromstring(body_xml_str) # 转换成lxml结点
# print(etree.tounicode(body_xml)) # 打印查看
mode = 'w'
with open(xml_save_path, mode,encoding='utf-8') as f:
# string = string.encode('utf-8')
f.write(etree.tounicode(body_xml))
def generate_table_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
for i in range(len(all_rows)-table_x_y[0][0]+1):
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j]=temp_value
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
result += table_describe_template_suffix.format(*value)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def generate_table2_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
# print(len(all_rows))
for i in range(len(all_rows)-table_x_y[0][0]+1):
count=0 #记录这一列是否被分割
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
forth_val_2 = ""
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))
# print(count)
all_p=all_p[c+count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count+=1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c+count - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
forth_val_2=""
else:
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c+count-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j] = temp_value
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count += 1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(
prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
# print(value)
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
value_temp=value[:]
value_temp[3]=value_temp[3]+forth_val_2
result += table_describe_template_suffix.format(*value_temp)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def table_describe_to_doc(table_index,table_describe,complete_file_path):
f=open(complete_file_path+"/word/document.xml",encoding='utf-8')
file_str=f.read()
f.close()
index_temp=table_index
start=-1
while index_temp>0:
start=file_str.find('<w:tbl>',start+1)
index_temp-=1
index_temp=table_index
end=-1
while index_temp>0:
end=file_str.find('</w:tbl>',end+1)
index_temp-=1
end=end+7
insertleft="""<w:p>
<w:pPr>
<w:spacing w:line="360" w:lineRule="auto"/>
<w:ind w:firstLine="480" w:firstLineChars="200"/>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
</w:pPr>
<w:bookmarkStart w:id="40" w:name="_Toc83780989"/>
<w:bookmarkStart w:id="41" w:name="_Toc77646121"/>
<w:bookmarkStart w:id="42" w:name="_Toc415414318"/>
<w:r>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
<w:t>"""
insertright="""</w:t>
</w:r>
</w:p>"""
inserttext=insertleft+table_describe+insertright
new_file_str=file_str[:start]+inserttext+file_str[end+1:]
with open(complete_file_path+"/word/document.xml",encoding='utf-8',mode="w") as f:
f.write(new_file_str)
pass
def start():
# os.rename("completefile.docx", "completefile.zip")
# unzip_file("completefile.zip", "completefile/")
# os.rename("completefile.zip", "completefile.docx")
# shutil.copy("completefile/word/document.xml","document.xml")
#
# for m in range(18,1,-1):
# if m==2:
# table_index, table_describe = generate_table2_description("all_table_descibe_template/{}.txt".format(m),
# "document.xml")
# else:
# table_index, table_describe = generate_table_description("all_table_descibe_template/{}.txt".format(m),
# "document.xml")
# table_describe_to_doc(table_index,table_describe,"completefile")
#
#
# ll = os.listdir("completefile")
# temp = []
# for ll_one in ll:
# temp.append("completefile/" + ll_one)
# # print(temp)
# zip_dirs(*temp)
# os.rename("completefile.zip","target.docx")
#
# loader = Docx2txtLoader("target.docx")
#
# data = loader.load()
#
# path = 'result.txt'
# mode = 'w'
# string = data[0].page_content
# with open(path, mode, encoding='utf-8') as f:
# # string = string.encode('utf-8')
# f.write(string)
# 载入大模型
llm = Vicuna()
# openai_api_key = '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'
loader = TextLoader("result.txt", autodetect_encoding=True)
documents = loader.load()
# print(documents)
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50, separator="\n")
texts = text_splitter.split_documents(documents)
faiss_index = FAISS.from_documents(texts, HuggingFaceEmbeddings())
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=faiss_index.as_retriever())
return qa
def ReportQA(qa,question):
pre = qa.run(question)
return pre
| [] |
2024-01-10 | zhaokaibo830/changqing | docgen~test~main_new.py | import os
from zipfile import ZipFile
import numpy as np
from lxml import etree
import xml.etree.ElementTree as ET
import zipfile
from docx import Document
import shutil
from langchain.llms.base import LLM
from typing import List, Optional
import requests
import json
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.document_loaders import Docx2txtLoader
from langchain.embeddings import HuggingFaceEmbeddings
class Vicuna(LLM):
max_token: int = 2048
temperature: float = 0.8
top_p = 0.9
tokenizer: object = None
model: object = None
history_len: int = 1024
# url_llm = "https://u147750-b6ae-2bf49303.neimeng.seetacloud.com:6443/llm"
url_llm = "https://u147750-92ae-0299e063.neimeng.seetacloud.com:6443/llm"
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "Vicuna"
def llm(self, prompt: str):
try:
content1 = json.dumps({"text": prompt})
response = requests.request("POST", self.url_llm, data=content1)
res = response.content.decode('unicode_escape')
return json.loads(res, strict=False)['response']
except:
return "服务器已关闭,请联系服务器管理员"
def _call(self, prompt: str, stop: Optional[List[str]] = None):
response = self.llm(prompt)
return response
def unzip_file(zip_src, dst_dir):
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print('This is not zip')
def zip_dirs(*dirs):
prefix = os.path.commonprefix(dirs)
with ZipFile('completefile.zip', 'w') as z:
for d in dirs:
z.write(d, arcname=os.path.relpath(d, prefix))
for root, dirs, files in os.walk(d):
for fn in files:
z.write(
fp := os.path.join(root, fn),
arcname=os.path.relpath(fp, prefix)
)
def docx_to_xml(docx_path,xml_save_path):
"""
:param docx_path:word文档路径
:param xml_save_path:生成的xml文件保存路径
:return:
"""
doc = Document(docx_path)
body_xml_str = doc._body._element.xml # 获取body中的xml
body_xml = etree.fromstring(body_xml_str) # 转换成lxml结点
# print(etree.tounicode(body_xml)) # 打印查看
mode = 'w'
with open(xml_save_path, mode,encoding='utf-8') as f:
# string = string.encode('utf-8')
f.write(etree.tounicode(body_xml))
def generate_table_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
for i in range(len(all_rows)-table_x_y[0][0]+1):
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j]=temp_value
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
result += table_describe_template_suffix.format(*value)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def generate_table2_description(table_describe_template_path,xml_save_path):
f=open(table_describe_template_path,encoding='utf-8')
isfix_tablehead=f.readline()
table_index=int(f.readline())
table_describe_template = f.readline()
table_x_y=[]
one_x_y=f.readline()
while one_x_y:
x,y=int(one_x_y.split(",")[0]),int(one_x_y.split(",")[1])
table_x_y.append([x,y])
one_x_y=f.readline()
tree = ET.parse(xml_save_path) # 类ElementTree
root = tree.getroot() # 类Element
root_tag=root.tag
i=len(root_tag)-1
while True:
if root_tag[i]=="}":
break
i-=1
prefix=root_tag[:i+1]
body=root.find(prefix+"body")
tbl=list(body.findall(prefix+"tbl"))[table_index-1]
all_rows=list(tbl.findall(prefix+"tr"))
table_describe_template_prefix=table_describe_template.split("#")[0]
table_describe_template_suffix=table_describe_template.split("#")[1]
result=table_describe_template_prefix
value=[]
if int(isfix_tablehead)==0:
# isfix_tablehead为0表示表头不固定,否则表示表头固定
for r, c in table_x_y:
temp_value = ""
all_p = list(all_rows[r - 1].findall(prefix + "tc"))[c - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
result += table_describe_template_suffix.format(*value)
pass
else:
# print(len(all_rows))
for i in range(len(all_rows)-table_x_y[0][0]+1):
count=0 #记录这一列是否被分割
if len(list(all_rows[i+table_x_y[0][0]-1].findall(prefix + "tc"))) == 1:
break
forth_val_2 = ""
if i==0:
value=[]
for r,c in table_x_y:
temp_value=""
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))
print(count)
all_p=all_p[c+count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
if not temp_value:
temp_value = "没填写内容"
value.append(temp_value)
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count+=1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
else:
for j,(r,c) in enumerate(table_x_y):
# print("j=",j)
all_vMerge = list(all_rows[r - 1 + i].findall(prefix + "tc")[c+count - 1].find(prefix + "tcPr").findall(
prefix + "vMerge"))
if len(all_vMerge)>0 and all_vMerge[0].attrib[prefix + "val"]=="continue":
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
forth_val_2=""
else:
continue
else:
temp_value=""
all_p=list(all_rows[r-1+i].findall(prefix+"tc"))[c+count-1].findall(prefix+"p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) == 0:
temp_value = "没填写内容"
else:
for one_p in list(all_p):
all_r=list(one_p.findall(prefix+"r"))
for one_r in all_r:
temp_value=temp_value+one_r.find(prefix+"t").text
if not temp_value:
temp_value = "没填写内容"
value[j] = temp_value
if c == 4 and len(list(all_rows[r - 1 + i].findall(prefix + "tc")))>7:
count += 1
all_p = list(all_rows[r - 1 + i].findall(prefix + "tc"))[c + count - 1].findall(
prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) != 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
forth_val_2 = forth_val_2 + one_r.find(prefix + "t").text
print(value)
arr = np.array(value)
if (arr == "没填写内容").all():
break
# print(value)
value_temp=value[:]
value_temp[3]=value_temp[3]+forth_val_2
result += table_describe_template_suffix.format(*value_temp)
if len(list(all_rows[-1].findall(prefix + "tc")))==1:
temp_value = ""
all_p = list(all_rows[- 1].findall(prefix + "tc"))[0].findall(prefix + "p")
# print("p长度=", len(list(all_p)))
if len(list(all_p)) > 0:
for one_p in list(all_p):
all_r = list(one_p.findall(prefix + "r"))
for one_r in all_r:
temp_value = temp_value + one_r.find(prefix + "t").text
result+=temp_value
return table_index,result
def table_describe_to_doc(table_index,table_describe,complete_file_path):
f=open(complete_file_path+"/word/document.xml",encoding='utf-8')
file_str=f.read()
f.close()
index_temp=table_index
start=-1
while index_temp>0:
start=file_str.find('<w:tbl>',start+1)
index_temp-=1
index_temp=table_index
end=-1
while index_temp>0:
end=file_str.find('</w:tbl>',end+1)
index_temp-=1
end=end+7
insertleft="""<w:p>
<w:pPr>
<w:spacing w:line="360" w:lineRule="auto"/>
<w:ind w:firstLine="480" w:firstLineChars="200"/>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
</w:pPr>
<w:bookmarkStart w:id="40" w:name="_Toc83780989"/>
<w:bookmarkStart w:id="41" w:name="_Toc77646121"/>
<w:bookmarkStart w:id="42" w:name="_Toc415414318"/>
<w:r>
<w:rPr>
<w:rFonts w:ascii="Arial" w:hAnsi="Arial" w:cs="Arial"/>
<w:sz w:val="24"/>
<w:szCs w:val="24"/>
</w:rPr>
<w:t>"""
insertright="""</w:t>
</w:r>
</w:p>"""
inserttext=insertleft+table_describe+insertright
new_file_str=file_str[:start]+inserttext+file_str[end+1:]
with open(complete_file_path+"/word/document.xml",encoding='utf-8',mode="w") as f:
f.write(new_file_str)
pass
if __name__ == '__main__':
docx_path="completefile.docx"
# xml_save_path="tabel.xml"
os.rename("completefile.docx","completefile.zip")
unzip_file("completefile.zip", "completefile/")
os.rename("completefile.zip","completefile.docx")
shutil.copy("completefile/word/document.xml","document.xml")
# table_index,table_describe=generate_description("table_describe_template.txt","document.xml")
# print(table_describe)
#
# table_describe_to_doc(table_index,table_describe,"completefile")
# table_index,table_describe=generate_table2_description("all_table_descibe_template/2.txt","document.xml")
# print(table_describe)
for m in range(18,1,-1):
if m==2:
table_index, table_describe = generate_table2_description("all_table_descibe_template/{}.txt".format(m),
"document.xml")
else:
table_index, table_describe = generate_table_description("all_table_descibe_template/{}.txt".format(m),
"document.xml")
table_describe_to_doc(table_index,table_describe,"completefile")
ll = os.listdir("completefile")
temp = []
for ll_one in ll:
temp.append("completefile/" + ll_one)
print(temp)
zip_dirs(*temp)
os.rename("completefile.zip","target.docx")
loader = Docx2txtLoader("target.docx")
data = loader.load()
path = 'result.txt'
mode = 'w'
string = data[0].page_content
with open(path, mode, encoding='utf-8') as f:
# string = string.encode('utf-8')
f.write(string)
# 载入大模型
llm = Vicuna()
openai_api_key = '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'
loader = TextLoader("result.txt", autodetect_encoding=True)
documents = loader.load()
# print(documents)
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50, separator="\n")
texts = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings()
faiss_index = FAISS.from_documents(texts, OpenAIEmbeddings(openai_api_key=openai_api_key))
qa = RetrievalQA.from_chain_type(llm=Vicuna(), chain_type="stuff", retriever=faiss_index.as_retriever())
question_path = "1_q.txt"
save_path = "1_a.txt"
question_mode = "r"
save_mode = "w"
f1 = open(save_path, save_mode, encoding='utf-8')
# os.remove("target.docx")
# os.remove("document.xml")
# os.remove("result.txt")
# os.rmdir("completefile")
with open(question_path, question_mode, encoding='utf-8') as f:
human_Q = f.readline()
# human_A=f.readline()
while human_Q:
query = human_Q
print("Q:" + human_Q)
pre = qa.run(query)
print("AI_A:" + pre)
# pre="你多的对《》《《》《@!#%¥……%"
f1.write("Q:" + human_Q)
# f1.write(human_A)
f1.write("AI_A:" + pre)
f1.write("\n\n")
human_Q = f.readline()
# human_A = f.readline()
f1.close()
pass
| [] |
2024-01-10 | zhaokaibo830/changqing | docgen~test3_400docs~funcs~q2a.py | # -*- coding:utf-8 -*-
import os
from zipfile import ZipFile
import numpy as np
from lxml import etree
import xml.etree.ElementTree as ET
import zipfile
from docx import Document
import shutil
from langchain.llms.base import LLM
from typing import List, Optional
import requests
import json
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.document_loaders import Docx2txtLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import UnstructuredPDFLoader
class Vicuna(LLM):
max_token: int = 2048
temperature: float = 0.8
top_p = 0.9
tokenizer: object = None
model: object = None
history_len: int = 1024
url_llm = "http://localhost:6007/llm"
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "Vicuna"
def llm(self, prompt: str):
try:
content1 = json.dumps({"text": prompt})
response = requests.request("POST", self.url_llm, data=content1)
res = response.content.decode('unicode_escape')
return json.loads(res, strict=False)['response']
except Exception as e:
print(e)
return "服务器已关闭,请联系服务器管理员"
def _call(self, prompt: str, stop: Optional[List[str]] = None):
response = self.llm(prompt)
return response
def start():
llm = Vicuna()
embeddings = HuggingFaceEmbeddings(model_name='/root/report_qa/huggingface/infgrad/stella-large-zh')
all_faiss_index={}
count=1
for i, dir_name in enumerate(os.listdir("./plain_text/")):
# print(all_faiss_index)
# print(i)
# print(dir_name)
temp={}
for file_name in os.listdir("./plain_text/"+dir_name+"/"):
# file_name=file_name.encode('UTF-8', 'ignore').decode('UTF-8')
print("正在emmending的文件序号:",count)
count+=1
# whole_file_name = "./document/"+dir_name+"/" + file_name
# loader = Docx2txtLoader(whole_file_name)
#
# data = loader.load()
# # #
path = "./plain_text/" +dir_name+"/"+ file_name[:-4] + ".txt"
# # #
# mode = 'w'
# string = data[0].page_content
#
# with open(path, mode, encoding='utf-8') as f:
# # string = string.encode('utf-8')
# f.write(string)
loader = TextLoader(path, autodetect_encoding=True)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
faiss_index = FAISS.from_documents(texts, embeddings)
if dir_name=="钻井地质设计报告":
temp[file_name.split('井')[0]+'井']=(faiss_index,file_name[:-4])
elif dir_name=="油田开发年报":
temp[file_name.split('年')[0]+'年'] = (faiss_index,file_name[:-4])
elif dir_name == "气田开发年报":
temp[file_name.split('年')[0] + '年'] = (faiss_index, file_name[:-4])
all_faiss_index[dir_name]=temp
print("all_faiss_index:", all_faiss_index)
return all_faiss_index,llm
def start_table():
embeddings = HuggingFaceEmbeddings(model_name='/root/report_qa/huggingface/infgrad/stella-large-zh')
all_faiss_index_table={}
count=1
for i, dir_name in enumerate(os.listdir("./PDF/")):
temp={}
for file_name in os.listdir("./PDF/"+dir_name+"/"):
print("正在emmending的文件序号:",count)
count+=1
path = "./PDF/" +dir_name+"/"+ file_name[:-4] + ".pdf"
loader = UnstructuredPDFLoader(path, mode="elements")
documents = loader.load()
faiss_index = FAISS.from_documents(documents, embeddings)
if dir_name=="钻井地质设计报告":
temp[file_name.split('井')[0]+'井']=(faiss_index,file_name[:-4])
elif dir_name=="油田开发年报":
temp[file_name.split('年')[0]+'年'] = (faiss_index,file_name[:-4])
elif dir_name == "气田开发年报":
temp[file_name.split('年')[0] + '年'] = (faiss_index, file_name[:-4])
all_faiss_index_table[dir_name]=temp
print("all_faiss_index_table:", all_faiss_index_table)
return all_faiss_index_table
# all_faiss_index,llm=start()
# print(all_faiss_index)
# if __name__ == '__main__':
# all_faiss_index,llm=start()
# print(all_faiss_index)
| [] |
2024-01-10 | keboola/kai_slack_bot | src~tools~confluence_search~confluence_search.py | from atlassian import Confluence, errors
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex
from langchain.llms import OpenAI
import os
import sys
from decouple import config
from typing import Union
import logging
import uuid
confluence = Confluence(
url=config("CONFLUENCE_URL"),
username=config("CONFLUENCE_USERNAME"),
password=config("CONFLUENCE_PASSWORD")
)
try:
main_file_path = '~'
except Exception as e:
print("Error getting main_file_path:", str(e))
main_file_path = ''
print("main_file_path:", main_file_path)
data_dir = os.path.join(os.path.dirname(os.path.abspath(main_file_path)), "data")
print("data_dir:", data_dir)
pre_prompt = """
You are being given a question that needs to be answered by searching the Keboola Confluence docs. To find the answer,
you must generate a CQL query that will return the most relevant results.
Confluence uses Apache Lucene for text indexing, which provides a rich query language. Much of the information about how
to generate a CQL query can be found in the Query Parser Syntax page of the Lucene documentation.
A query is composed of terms and operators. There are two types of terms: single terms and phrases.
- Single terms: These are single words like "test" or "hello".
- Phrases: These are groups of words surrounded by double quotes, such as "hello dolly".
Remember that all query terms in Confluence are case insensitive.
Your task is to take the input question and generate a CQL query that will return the most relevant results. Focus on
using shorter terms rather than long ones, and avoid including phrases like "step by step guide" or "exact process."
Do not include words like "step" or "guide" in the search query.
Please respond with a valid Atlassian CQL query that you believe will yield the most relevant results.
Examples:
text ~ "Keboola AND Python AND component"
text ~ "BYODB AND process"
text ~ "Keboola AND component AND publish"
"""
pre_prompt_history = """
You are being given a conversation history between a ChatGPT bot and a user asking a question
that needs to be answered by searching the Keboola Confluence docs. To find the answer,
you must generate a CQL query that will return the most relevant results.
Confluence uses Apache Lucene for text indexing, which provides a rich query language. Much of the information about how
to generate a CQL query can be found in the Query Parser Syntax page of the Lucene documentation.
A query is composed of terms and operators. There are two types of terms: single terms and phrases.
- Single terms: These are single words like "test" or "hello".
- Phrases: These are groups of words surrounded by double quotes, such as "hello dolly".
Remember that all query terms in Confluence are case insensitive.
Your task is to take the input question and generate a CQL query that will return the most relevant results. Focus on
using shorter terms rather than long ones, and avoid including phrases like "step by step guide" or "exact process."
Do not include words like "step" or "guide" in the search query.
Please respond with a valid Atlassian CQL query that you believe will yield the most relevant results.
Examples:
text ~ "Keboola AND Python AND component"
text ~ "BYODB AND process"
text ~ "Keboola AND component AND publish"
"""
def create_unique_folder():
folder_name = str(uuid.uuid4())
folder_path = os.path.join(data_dir, folder_name)
os.makedirs(folder_path)
return folder_path
def generate_cql_query_keywords(input_text: str, user_messages: list = None, bot_messages: list = None) -> str:
llm = OpenAI()
if user_messages and bot_messages:
prompt = pre_prompt_history + f"bot messages: {bot_messages}, user_messages: {user_messages}"
else:
prompt = pre_prompt + input_text
logging.info(f"Prompting: {prompt}")
response = llm.predict(prompt)
cql_query = response.replace("\n", "").strip(" ")
return cql_query
def query_conflu(cql_query: str):
logging.info(f"Query: {cql_query}")
pages = None
try:
pages = confluence.cql(cql_query)
except errors.ApiValueError:
logging.error(f"Query: {cql_query} is invalid.")
return pages
def download_documents(pages: list):
documents = []
query_directory = create_unique_folder()
for page in pages:
# Check the local directory to see if we already have the page's content
if os.path.exists(f"{query_directory}/{page['content']['id']}.txt"):
with open(f"{query_directory}/{page['content']['id']}.txt", "r") as f:
documents.append(f.read())
f.close()
continue
# If we don't have the page's content, then get it from Confluence
else:
content = confluence.get_page_by_id(page['content']['id'], expand='body.view')
documents.append(content['body']['view']['value'])
# add each page's content as a txt file in the data directory
with open(f"{query_directory}/{page['content']['id']}.txt", "w") as f:
f.write(content['body']['view']['value'])
f.close()
# convert documents to a string
logging.info(f"Using document directory: {query_directory}")
documents = SimpleDirectoryReader(f"{query_directory}").load_data()
index = GPTVectorStoreIndex.from_documents(documents)
return index
def conflu_search(
search: str,
user_messages: list = None,
bot_messages: list = None) -> Union[GPTVectorStoreIndex, None]:
query_counter = 0
while query_counter < 3:
query_counter += 1
query = generate_cql_query_keywords(search, user_messages, bot_messages)
r = query_conflu(query)
if r is not None and r.get("results"):
index = download_documents(r.get("results"))
return index
return None
if __name__ == "__main__":
os.environ["OPENAI_API_KEY"] = config("OPENAI_API_KEY")
conflu_search("What is the complete BYODB process?")
| [
"PLACEHOLDERbot messages: PLACEHOLDER, user_messages: PLACEHOLDER",
"\nYou are being given a conversation history between a ChatGPT bot and a user asking a question\nthat needs to be answered by searching the Keboola Confluence docs. To find the answer, \nyou must generate a CQL query that will return the most relevant results.\n\nConfluence uses Apache Lucene for text indexing, which provides a rich query language. Much of the information about how\n to generate a CQL query can be found in the Query Parser Syntax page of the Lucene documentation.\n\nA query is composed of terms and operators. There are two types of terms: single terms and phrases.\n\n- Single terms: These are single words like \"test\" or \"hello\".\n- Phrases: These are groups of words surrounded by double quotes, such as \"hello dolly\".\n\nRemember that all query terms in Confluence are case insensitive.\n\nYour task is to take the input question and generate a CQL query that will return the most relevant results. Focus on \nusing shorter terms rather than long ones, and avoid including phrases like \"step by step guide\" or \"exact process.\"\nDo not include words like \"step\" or \"guide\" in the search query.\n\nPlease respond with a valid Atlassian CQL query that you believe will yield the most relevant results.\n\nExamples:\ntext ~ \"Keboola AND Python AND component\"\ntext ~ \"BYODB AND process\"\ntext ~ \"Keboola AND component AND publish\"\n",
"PLACEHOLDERPLACEHOLDER",
"\nYou are being given a question that needs to be answered by searching the Keboola Confluence docs. To find the answer, \nyou must generate a CQL query that will return the most relevant results.\n\nConfluence uses Apache Lucene for text indexing, which provides a rich query language. Much of the information about how\n to generate a CQL query can be found in the Query Parser Syntax page of the Lucene documentation.\n\nA query is composed of terms and operators. There are two types of terms: single terms and phrases.\n\n- Single terms: These are single words like \"test\" or \"hello\".\n- Phrases: These are groups of words surrounded by double quotes, such as \"hello dolly\".\n\nRemember that all query terms in Confluence are case insensitive.\n\nYour task is to take the input question and generate a CQL query that will return the most relevant results. Focus on \nusing shorter terms rather than long ones, and avoid including phrases like \"step by step guide\" or \"exact process.\"\nDo not include words like \"step\" or \"guide\" in the search query.\n\n\nPlease respond with a valid Atlassian CQL query that you believe will yield the most relevant results.\n\nExamples:\ntext ~ \"Keboola AND Python AND component\"\ntext ~ \"BYODB AND process\"\ntext ~ \"Keboola AND component AND publish\"\n"
] |
2024-01-10 | keboola/kai_slack_bot | src~tools~jira_search~jira_search.py | import logging
from atlassian import Jira
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, VectorStoreIndex
from langchain.llms import OpenAI
from llama_index import Document
import os
import sys
from decouple import config
from llama_index.node_parser import SimpleNodeParser
jira = Jira(
url='https://keboola.atlassian.net',
username=config("CONFLUENCE_USERNAME"),
password=config("CONFLUENCE_PASSWORD")
)
pre_prompt = """
You are being given a question that is meant to be answered by searching the
Keboola Jira. The only way to answer this question is to search the Jira.
In order to search the Jira, you must generate a JQL query that will
return the most relevant results.
Jira uses JQL for text indexing,
which provides a rich query language.
Much of the information about how to generate a JQL query
is derived from the Query Parser Syntax
page of the Lucene documentation.
A query is broken up into terms and operators.
There are two types of terms: Single Terms and Phrases.
A Single Term is a single word such as "test" or "hello".
A Phrase is a group of words surrounded by double quotes such as "hello dolly".
Note: All query terms in Jira are case insensitive.
Your task is to take the input question and generate a JQL query that will return the most relevant results.
Examples:
text ~ "Keboola AND Python AND component"
text ~ "BYODB AND process"
"""
def generate_jql_query_keywords(input_text: str) -> str:
llm = OpenAI()
response = llm.predict(pre_prompt + input_text)
return response
def validate_jql_query(query: str) -> bool:
try:
jira.jql(query)
return True
except:
return False
# TODO: Have this return an array of documents that will subsequently be turned into an index
def jira_search(input_text: str) -> Document:
"""
Search the Keboola Jira for the most relevant results to the input text.
Args:
input_text: Input question or text from which to extract keywords.
Returns:
A vector store index containing the most relevant results to the input text.
"""
if not input_text:
logging.error('Input text is empty.')
raise ValueError('Input text cannot be empty.')
try:
llm = OpenAI()
jql_query = llm.predict(pre_prompt + input_text)
except Exception as e:
logging.error(f'Failed to generate JQL query: {e}')
raise
return jira.jql(jql_query)
def construct_nodes(documents):
#TODO: Explore more advanced node parsing/construction
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
return nodes
def construct_index(nodes):
index = VectorStoreIndex(nodes)
return index
def query_engine(query, index):
return index.as_query_engine().query(query) | [
"\nYou are being given a question that is meant to be answered by searching the\nKeboola Jira. The only way to answer this question is to search the Jira.\nIn order to search the Jira, you must generate a JQL query that will\nreturn the most relevant results.\n\nJira uses JQL for text indexing,\nwhich provides a rich query language.\nMuch of the information about how to generate a JQL query\nis derived from the Query Parser Syntax\npage of the Lucene documentation.\n\nA query is broken up into terms and operators.\nThere are two types of terms: Single Terms and Phrases.\n\nA Single Term is a single word such as \"test\" or \"hello\".\n\nA Phrase is a group of words surrounded by double quotes such as \"hello dolly\".\n\nNote: All query terms in Jira are case insensitive.\n\nYour task is to take the input question and generate a JQL query that will return the most relevant results.\n\nExamples:\ntext ~ \"Keboola AND Python AND component\"\ntext ~ \"BYODB AND process\"\n"
] |
2024-01-10 | keboola/kai_slack_bot | database~builders~zendesk.py | import os
import pandas as pd
import json
import pickle
import pinecone
import time
from dotenv import load_dotenv
from llama_index import (
GPTVectorStoreIndex,
ServiceContext,
Document,
set_global_service_context,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores import PineconeVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.llms import OpenAI
from llama_index.llm_predictor import LLMPredictor
from llama_index.node_parser import SimpleNodeParser
from llama_index.node_parser.extractors import (
MetadataExtractor,
# TitleExtractor,
QuestionsAnsweredExtractor,
# SummaryExtractor,
KeywordExtractor,
)
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from tqdm import tqdm
from kbcstorage.client import Client
# Load environment variables from .env file
dotenv_path = '/Users/jordanburger/Keboola/Keboola AI (Kai)/Knowledge Base Chatbot/slack_flask_app/.env'
load_dotenv(dotenv_path)
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
KBC_STORAGE_API_TOKEN = os.environ.get('KBC_STORAGE_API_TOKEN')
BUCKET_ID = os.environ.get('BUCKET_ID')
# Folder to save downloaded files
SAVE_FOLDER = '/Users/jordanburger/Keboola/Keboola AI (Kai)/Knowledge Base Chatbot/slack_flask_app/database/datadir/zendesk'
LIMIT = 100
class ZendeskDataExtractor:
def __init__(self):
self.client = None
def authenticate(self):
self.client = Client('https://connection.keboola.com',
os.environ['KBC_STORAGE_API_TOKEN'])
def get_zendesk_data(self):
if self.client is None:
self.authenticate()
tables = self.client.buckets.list_tables(bucket_id=os.environ['BUCKET_ID'])
tables_df = pd.DataFrame(tables)
for table in tqdm(tables_df.itertuples(), desc="Downloading tables from SAPI."):
self.client.tables.export_to_file(
table_id=table[2], path_name=f'{SAVE_FOLDER}/raw/')
def create_zd_ticket_files(self):
tickets = pd.read_csv(f'{SAVE_FOLDER}/raw/tickets')
comments = pd.read_csv(f'{SAVE_FOLDER}/raw/tickets_comments')
# Create a dictionary with ticket IDs as keys and lists of comments as values
comments_dict = {}
for _, comment in comments.iterrows():
ticket_id = comment['tickets_pk']
comment_body = comment['body']
comment_id = comment['id']
comment_created_at = comment['created_at']
comment_author_pk = comment['author_pk']
comment_dict = {
'Comment ID': comment_id,
'Comment Body': comment_body,
'Comment Created At': comment_created_at,
'Comment Author PK': comment_author_pk
}
if ticket_id not in comments_dict:
comments_dict[ticket_id] = []
comments_dict[ticket_id].append(comment_dict)
ticket_data = []
for _, ticket in tickets.iterrows():
ticket_id = ticket['id']
ticket_subject = ticket['subject']
ticket_status = ticket['status']
ticket_type = ticket['type']
created_at = ticket['created_at']
updated_at = ticket['updated_at']
ticket_dict = {
'Ticket ID': ticket_id,
'Ticket Subject': ticket_subject,
'Ticket Status': ticket_status,
'Ticket Type': ticket_type,
'Created At': created_at,
'Updated At': updated_at,
# Default to an empty list if no comments are found
'Comments': comments_dict.get(ticket_id, [])
}
ticket_data.append(ticket_dict)
for ticket in ticket_data:
ticket_id = ticket.get("Ticket ID")
with open(f'{SAVE_FOLDER}/tickets/{ticket_id}.json', 'w') as f:
json.dump(ticket, f)
class DocumentIndexCreator:
def __init__(self, save_folder, index_filename, batch_size=100):
self.save_folder = save_folder
self.index_filename = index_filename
self.batch_size = batch_size
self.doc_titles = []
self.doc_paths = []
self.doc_ids = []
self.doc_embeddings = []
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=256)
self.embed_model = OpenAIEmbedding(model="text-embedding-ada-002", embed_batch_size=100)
self.llm_predictor = LLMPredictor(llm=self.llm)
self.text_splitter = TokenTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=128)
self.metadata_extractor = MetadataExtractor(
extractors=[
# TitleExtractor(nodes=2),
QuestionsAnsweredExtractor(
questions=3, llm_predictor=self.llm_predictor
),
# SummaryExtractor(summaries=["prev", "self"]),
KeywordExtractor(keywords=5, llm_predictor=self.llm_predictor),
]
)
self.node_parser = SimpleNodeParser(
text_splitter=self.text_splitter, metadata_extractor=self.metadata_extractor
)
self.load_index()
def sanitize_filename(self, filename):
return re.sub(r"[/\\]", "_", filename)
def read_file_as_string(self, file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read()
def get_file_metadata(self, file_path):
metadata_path = file_path.replace(".txt", ".json")
md = self.read_file_as_string(metadata_path)
md = json.loads(md)
if md:
return md
return {}
def load_documents(self):
for dirpath, dirnames, filenames in os.walk(self.save_folder):
for filename in filenames:
if filename.endswith(".txt"):
subdir_name = os.path.basename(dirpath)
file_name = os.path.splitext(filename)[0]
self.doc_titles.append(subdir_name + " - " + file_name)
self.doc_paths.append(os.path.join(dirpath, filename))
def index_documents(self):
nodes = []
for title, path in zip(self.doc_titles, self.doc_paths):
if path.endswith(".txt"):
text = self.read_file_as_string(path)
extra_info = self.get_file_metadata(path)
nodes.append(Document(text=text, doc_id=title, extra_info=extra_info))
print("Document added: " + title)
if len(nodes) >= self.batch_size:
self.process_batch(nodes)
nodes = []
if nodes:
self.process_batch(nodes)
def process_batch(self, nodes):
service_context = ServiceContext.from_defaults(
llm=self.llm, embed_model=self.embed_model, node_parser=self.node_parser
)
set_global_service_context(service_context)
start = time.time()
print(time.time())
parsed_nodes = self.node_parser.get_nodes_from_documents(nodes, show_progress=True)
print(time.time() - start)
print("Nodes added: " + str(len(parsed_nodes)))
self.update_index(parsed_nodes)
def save_index(self):
with open(self.index_filename, "wb") as file:
index_data = {
"doc_ids": self.doc_ids,
"doc_embeddings": self.doc_embeddings,
}
pickle.dump(index_data, file)
def load_index(self):
if os.path.exists(self.index_filename):
with open(self.index_filename, "rb") as file:
index_data = pickle.load(file)
self.doc_ids = index_data.get("doc_ids", [])
self.doc_embeddings = index_data.get("doc_embeddings", [])
def update_index(self, nodes):
for node in nodes:
if node.ref_doc_id not in self.doc_ids:
self.doc_ids.append(node.ref_doc_id)
self.doc_embeddings.append(node.embedding)
else:
index = self.doc_ids.index(node.ref_doc_id)
self.doc_embeddings[index] = node.embedding
self.save_index()
def create_and_load_index(index_name, nodes):
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"],
environment=os.environ["PINECONE_ENVIRONMENT"],
)
pinecone_index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
embed_model = OpenAIEmbedding(
model="text-embedding-ada-002", embed_batch_size=100
)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
GPTVectorStoreIndex(
nodes,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
if __name__ == "__main__":
# Load environment variables from .env file
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
# Constants
SAVE_FOLDER = "downloaded_data"
INDEX_NAME = "kaidev"
INDEX_FILENAME = "index_data.pkl"
BATCH_SIZE = 50
downloader = ZendeskDataExtractor()
downloader.get_zendesk_data()
indexer = DocumentIndexCreator(
save_folder=SAVE_FOLDER, index_filename=INDEX_FILENAME, batch_size=BATCH_SIZE
)
indexer.load_documents()
indexer.index_documents()
create_and_load_index(index_name=INDEX_NAME, nodes=indexer.doc_ids, ) # Only push document IDs to Pinecone
| [] |
2024-01-10 | keboola/kai_slack_bot | database~builders~confluence.py | import os
import re
import openai
import logging
from dotenv import load_dotenv
from atlassian import Confluence
from bs4 import BeautifulSoup
import json
import pinecone
import time
from llama_index import GPTVectorStoreIndex, ServiceContext, Document, set_global_service_context
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores import PineconeVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.llms import OpenAI
from llama_index.llm_predictor import LLMPredictor
from llama_index.node_parser import SimpleNodeParser
from llama_index.node_parser.extractors import (
MetadataExtractor,
SummaryExtractor,
QuestionsAnsweredExtractor,
TitleExtractor,
KeywordExtractor,
)
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
text_splitter = TokenTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=128)
openai.api_key=os.getenv('OPENAI_API_KEY')
# Load environment variables from .env file
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Confluence credentials
CONFLUENCE_URL = os.environ.get('CONFLUENCE_URL')
CONFLUENCE_USERNAME = os.environ.get('CONFLUENCE_USERNAME')
CONFLUENCE_PASSWORD = os.environ.get('CONFLUENCE_PASSWORD')
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
# Folder to save downloaded files
SAVE_FOLDER = 'downloaded_data'
LIMIT = 100
def sanitize_filename(filename):
# Replace slashes with underscores
return re.sub(r'[/\\]', '_', filename)
def save_results(results, metadata, directory):
for result in results:
content_filename = os.path.join(directory, sanitize_filename(result['title']) + ".txt")
metadata_filename = os.path.join(directory, sanitize_filename(result['title']) + ".json")
html_content = result['body']['storage']['value']
soup = BeautifulSoup(html_content, 'html.parser')
text = soup.get_text()
text = result['title'] + '\n\n' + text
with open(content_filename, 'w', encoding='utf-8') as file:
file.write(text)
with open(metadata_filename, 'w', encoding='utf-8') as file:
json.dump(metadata, file)
def get_metadata(confluence, results):
page_id = results[0].get("id")
if page_id:
data = confluence.get_page_by_id(page_id)
page_metadata = {
'id': data.get('id', ''),
'CreatedDate': data['history'].get('createdDate', ''),
'LastUpdatedDate': data['version'].get('when', ''),
'Title': data.get('title', ''),
'Creator': data['history']['createdBy'].get('displayName', ''),
'LastModifier': data['version']['by'].get('displayName', ''),
'url': data['_links'].get('base', '') + '/pages/' + data.get('id', ''),
'Space': data['space'].get('name', '')
}
return page_metadata
return None
def download_confluence_pages(confluence, save_folder: str = SAVE_FOLDER):
spaces = confluence.get_all_spaces()
for space in spaces.get("results"):
logging.info(f"Downloading Confluence space: {space['name']}")
content = confluence.get_space_content(space['key'])
while True:
subdir = os.path.join(save_folder, space['name'])
os.makedirs(subdir, exist_ok=True)
page = content.get("page")
results = page.get("results")
size = page.get("size")
if not results:
logging.info(f"No results for {space['name']}")
break
metadata = get_metadata(confluence, results)
save_results(results, metadata, subdir)
if size == LIMIT:
start = page.get("start") + LIMIT
content = confluence.get_space_content(space['key'], start=start, limit=LIMIT)
page = content.get("page")
results = page.get("results")
metadata = get_metadata(confluence, results)
save_results(results, metadata, subdir)
else:
break
def read_file_as_string(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
def get_file_metadata(file_path) -> dict:
metadata_path = file_path.replace(".txt", ".json")
md = read_file_as_string(metadata_path)
md = json.loads(md)
if md:
return md
return {}
def main():
confluence = Confluence(url=CONFLUENCE_URL, username=CONFLUENCE_USERNAME, password=CONFLUENCE_PASSWORD)
# Download attachments from Confluence
download_confluence_pages(confluence,
save_folder="database/datadir/confluence")
print('Data download complete!')
def store_index():
doc_titles = []
doc_paths = []
llm = OpenAI(model='gpt-3.5-turbo', temperature=0, max_tokens=256)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
llm_predictor = LLMPredictor(llm=llm)
metadata_extractor = MetadataExtractor(
extractors=[
#TitleExtractor(nodes=2),
QuestionsAnsweredExtractor(questions=3, llm_predictor=llm_predictor),
#SummaryExtractor(summaries=["prev", "self"]),
KeywordExtractor(keywords=5, llm_predictor=llm_predictor),
],
)
node_parser = SimpleNodeParser(text_splitter=text_splitter, metadata_extractor=metadata_extractor)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, node_parser=node_parser)
set_global_service_context(service_context)
for dirpath, dirnames, filenames in os.walk("database/datadir/confluence"):
for filename in filenames:
if filename.endswith(".txt"):
subdir_name = os.path.basename(dirpath)
file_name = os.path.splitext(filename)[0]
doc_titles.append(subdir_name + " - " + file_name)
doc_paths.append(os.path.join(dirpath, filename))
docs = []
for title, path in zip(doc_titles, doc_paths):
if str(path).endswith(".txt"):
text = read_file_as_string(path)
extra_info = get_file_metadata(path)
docs.append(Document(
text=text,
doc_id=title,
extra_info=extra_info
))
print('Document added: ' + title)
print('Documents added: ' + str(len(docs)))
start = time.time()
print(time.time())
nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
print(time.time() - start)
print('Nodes added: ' + str(len(nodes)))
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
# create the index if it does not exist already
index_name = 'kaidev'
pinecone_index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
storage_context = StorageContext.from_defaults(
vector_store=vector_store
)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
GPTVectorStoreIndex(
nodes, storage_context=storage_context,
service_context=service_context,
show_progress=True
)
if __name__ == "__main__":
#main()
store_index()
| [] |
2024-01-10 | keboola/kai_slack_bot | database~dataExtractors~confluenceDataExtractor.py | import os
import re
import logging
import json
import openai
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from atlassian import Confluence
openai.api_key=os.getenv('OPENAI_API_KEY')
class ConfluenceDataExtractor:
def __init__(self, confluence_url, confluence_username, confluence_password, save_folder):
self.confluence = Confluence(
url=confluence_url, username=confluence_username, password=confluence_password
)
self.save_folder = save_folder
def sanitize_filename(self, filename):
return re.sub(r"[/\\]", "_", filename)
def save_results(self, results, metadata, directory):
for result in results:
content_filename = os.path.join(
directory, self.sanitize_filename(result["title"]) + ".txt"
)
metadata_filename = os.path.join(
directory, self.sanitize_filename(result["title"]) + ".json"
)
html_content = result["body"]["storage"]["value"]
soup = BeautifulSoup(html_content, "html.parser")
text = soup.get_text()
text = result["title"] + "\n\n" + text
with open(content_filename, "w", encoding="utf-8") as file:
file.write(text)
with open(metadata_filename, "w", encoding="utf-8") as file:
json.dump(metadata, file)
def get_metadata(self, results):
page_id = results[0].get("id")
if page_id:
data = self.confluence.get_page_by_id(page_id)
page_metadata = {
"id": data.get("id", ""),
"CreatedDate": data["history"].get("createdDate", ""),
"LastUpdatedDate": data["version"].get("when", ""),
"Title": data.get("title", ""),
"Creator": data["history"]["createdBy"].get("displayName", ""),
"LastModifier": data["version"]["by"].get("displayName", ""),
"url": data["_links"].get("base", "") + "/pages/" + data.get("id", ""),
"Space": data["space"].get("name", ""),
}
return page_metadata
return {}
def download_confluence_pages(self, limit=100):
spaces = self.confluence.get_all_spaces()
for space in spaces.get("results"):
logging.info(f"Downloading Confluence space: {space['name']}")
content = self.confluence.get_space_content(space["key"])
while True:
subdir = os.path.join(self.save_folder, space["name"])
os.makedirs(subdir, exist_ok=True)
page = content.get("page")
results = page.get("results")
size = page.get("size")
if not results:
logging.info(f"No results for {space['name']}")
break
metadata = self.get_metadata(results)
# Check if the document is already downloaded and up-to-date
for result in results:
metadata_filename = os.path.join(
subdir, self.sanitize_filename(result["title"]) + ".json"
)
if os.path.exists(metadata_filename):
with open(metadata_filename, "r", encoding="utf-8") as file:
existing_metadata = json.load(file)
if (
metadata["LastUpdatedDate"]
== existing_metadata.get("LastUpdatedDate")
):
logging.info(
f"Document '{result['title']}' is up-to-date. Skipping download."
)
continue
self.save_results(results, metadata, subdir)
if size == limit:
start = page.get("start") + limit
content = self.confluence.get_space_content(
space["key"], start=start, limit=limit
)
page = content.get("page")
results = page.get("results")
metadata = self.get_metadata(results)
# Check if the document is already downloaded and up-to-date
for result in results:
metadata_filename = os.path.join(
subdir, self.sanitize_filename(result["title"]) + ".json"
)
if os.path.exists(metadata_filename):
with open(metadata_filename, "r", encoding="utf-8") as file:
existing_metadata = json.load(file)
if (
metadata["LastUpdatedDate"]
== existing_metadata.get("LastUpdatedDate")
):
logging.info(
f"Document '{result['title']}' is up-to-date. Skipping download."
)
continue
self.save_results(results, metadata, subdir)
else:
break
| [] |
2024-01-10 | keboola/kai_slack_bot | src~tools~chat_helper~response_evaluator.py | # TODO: [AIS-83] Build a response evaluator that can be used to evaluate the quality of responses
from llama_index.evaluation import ResponseEvaluator
from langchain.chat_models import ChatOpenAI
from llama_index import LLMPredictor, ServiceContext
from llama_index.evaluation import QueryResponseEvaluator
# build service context
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
evaluator = ResponseEvaluator(service_context=service_context)
def evaluate_response_binary(response) -> bool:
"""
Evaluate a response to a query by using a language learning model to extract keywords from the input text.
Args:
response: Response to evaluate.
query: Query to evaluate the response against.
index: Vector store index to use for retrieving documents.
Returns:
A boolean representing whether the response is relevant to the query.
"""
evaluator = ResponseEvaluator(service_context=service_context)
eval_result = evaluator.evaluate(response)
print(str(eval_result))
return eval_result
def query_response_evaluator(query, response):
"""
Evaluate a response to a query by using a language learning model to extract keywords from the input text.
Args:
response: Response to evaluate.
query: Query to evaluate the response against.
index: Vector store index to use for retrieving documents.
Returns:
A boolean representing whether the response is relevant to the query.
"""
evaluator = QueryResponseEvaluator(service_context=service_context)
eval_result = evaluator.evaluate(response, query)
print(str(eval_result))
return eval_result
if __name__ == "__main__":
evaluate_response_binary()
query_response_evaluator()
| [] |
2024-01-10 | keboola/kai_slack_bot | database~builders~confluenceV2.py | import os
import pickle
import re
import logging
import json
import time
import openai
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from atlassian import Confluence
import pinecone
from datetime import datetime
from llama_index import (
GPTVectorStoreIndex,
ServiceContext,
Document,
set_global_service_context,
)
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores import PineconeVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.llms import OpenAI
from llama_index.llm_predictor import LLMPredictor
from llama_index.node_parser import SimpleNodeParser
from llama_index.node_parser.extractors import (
MetadataExtractor,
# TitleExtractor,
QuestionsAnsweredExtractor,
# SummaryExtractor,
KeywordExtractor,
)
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
openai.api_key=os.getenv('OPENAI_API_KEY')
class ConfluenceDataExtractor:
def __init__(self, confluence_url, confluence_username, confluence_password, save_folder):
self.confluence = Confluence(
url=confluence_url, username=confluence_username, password=confluence_password
)
self.save_folder = save_folder
def sanitize_filename(self, filename):
return re.sub(r"[/\\]", "_", filename)
def save_results(self, results, metadata, directory):
for result in results:
content_filename = os.path.join(
directory, self.sanitize_filename(result["title"]) + ".txt"
)
metadata_filename = os.path.join(
directory, self.sanitize_filename(result["title"]) + ".json"
)
html_content = result["body"]["storage"]["value"]
soup = BeautifulSoup(html_content, "html.parser")
text = soup.get_text()
text = result["title"] + "\n\n" + text
with open(content_filename, "w", encoding="utf-8") as file:
file.write(text)
with open(metadata_filename, "w", encoding="utf-8") as file:
json.dump(metadata, file)
def get_metadata(self, results):
page_id = results[0].get("id")
if page_id:
data = self.confluence.get_page_by_id(page_id)
page_metadata = {
"id": data.get("id", ""),
"CreatedDate": data["history"].get("createdDate", ""),
"LastUpdatedDate": data["version"].get("when", ""),
"Title": data.get("title", ""),
"Creator": data["history"]["createdBy"].get("displayName", ""),
"LastModifier": data["version"]["by"].get("displayName", ""),
"url": data["_links"].get("base", "") + "/pages/" + data.get("id", ""),
"Space": data["space"].get("name", ""),
}
return page_metadata
return {}
def download_confluence_pages(self, limit=100):
spaces = self.confluence.get_all_spaces()
for space in spaces.get("results"):
logging.info(f"Downloading Confluence space: {space['name']}")
content = self.confluence.get_space_content(space["key"])
while True:
subdir = os.path.join(self.save_folder, space["name"])
os.makedirs(subdir, exist_ok=True)
page = content.get("page")
results = page.get("results")
size = page.get("size")
if not results:
logging.info(f"No results for {space['name']}")
break
metadata = self.get_metadata(results)
# Check if the document is already downloaded and up-to-date
for result in results:
metadata_filename = os.path.join(
subdir, self.sanitize_filename(result["title"]) + ".json"
)
if os.path.exists(metadata_filename):
with open(metadata_filename, "r", encoding="utf-8") as file:
existing_metadata = json.load(file)
if (
metadata["LastUpdatedDate"]
== existing_metadata.get("LastUpdatedDate")
):
logging.info(
f"Document '{result['title']}' is up-to-date. Skipping download."
)
continue
self.save_results(results, metadata, subdir)
if size == limit:
start = page.get("start") + limit
content = self.confluence.get_space_content(
space["key"], start=start, limit=limit
)
page = content.get("page")
results = page.get("results")
metadata = self.get_metadata(results)
# Check if the document is already downloaded and up-to-date
for result in results:
metadata_filename = os.path.join(
subdir, self.sanitize_filename(result["title"]) + ".json"
)
if os.path.exists(metadata_filename):
with open(metadata_filename, "r", encoding="utf-8") as file:
existing_metadata = json.load(file)
if (
metadata["LastUpdatedDate"]
== existing_metadata.get("LastUpdatedDate")
):
logging.info(
f"Document '{result['title']}' is up-to-date. Skipping download."
)
continue
self.save_results(results, metadata, subdir)
else:
break
class DocumentIndexCreator:
def __init__(self, save_folder, index_filename, batch_size=100):
self.save_folder = save_folder
self.index_filename = index_filename
self.batch_size = batch_size
self.doc_titles = []
self.doc_paths = []
self.doc_ids = []
self.doc_embeddings = {}
self.nodes_embeddings = {} # Separate dictionary to store node embeddings
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=256)
self.embed_model = OpenAIEmbedding(model="text-embedding-ada-002", embed_batch_size=100)
self.llm_predictor = LLMPredictor(llm=self.llm)
self.text_splitter = TokenTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=128)
self.metadata_extractor = MetadataExtractor(
extractors=[
# TitleExtractor(nodes=2),
QuestionsAnsweredExtractor(
questions=3, llm_predictor=self.llm_predictor
),
# SummaryExtractor(summaries=["prev", "self"]),
KeywordExtractor(keywords=5, llm_predictor=self.llm_predictor),
]
)
self.node_parser = SimpleNodeParser(
text_splitter=self.text_splitter, metadata_extractor=self.metadata_extractor
)
self.last_runtime = self.load_last_runtimes()
self.load_documents()
def sanitize_filename(self, filename):
return re.sub(r"[/\\]", "_", filename)
def read_file_as_string(self, file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read()
def get_file_metadata(self, file_path):
metadata_path = file_path.replace(".txt", ".json")
md = self.read_file_as_string(metadata_path)
md = json.loads(md)
if md:
return md
return {}
def load_last_runtimes(self):
if os.path.exists("runtimes.json"):
with open("runtimes.json", "r") as file:
return json.load(file).get("LastRuntime", {})
return {}
def update_last_runtime(self, doc_path):
self.last_runtime[doc_path] = time.time() # Update the specific document's last runtime
self.save_last_runtimes()
def save_last_runtimes(self):
with open("runtimes.json", "w") as file:
json.dump({"LastRuntime": self.last_runtime}, file) # Sa
def load_documents(self):
for dirpath, dirnames, filenames in os.walk(self.save_folder):
for filename in filenames:
if filename.endswith(".txt"):
subdir_name = os.path.basename(dirpath)
file_name = os.path.splitext(filename)[0]
doc_path = os.path.join(dirpath, filename)
metadata_path = os.path.join(
dirpath, self.sanitize_filename(file_name) + ".json"
)
metadata = self.get_file_metadata(metadata_path)
last_updated_date_str = metadata.get("LastUpdatedDate", "")
if last_updated_date_str:
last_updated_date = datetime.fromisoformat(last_updated_date_str[:-1])
last_runtime = datetime.fromtimestamp(self.last_runtime.get(doc_path, 0)) # Use the specific runtime
if last_updated_date > last_runtime:
self.doc_titles.append(subdir_name + " - " + file_name)
self.doc_paths.append(doc_path)
def index_documents(self):
nodes = []
for title, path in zip(self.doc_titles, self.doc_paths):
if path.endswith(".txt"):
text = self.read_file_as_string(path)
extra_info = self.get_file_metadata(path)
nodes.append(Document(text=text, doc_id=title, extra_info=extra_info))
print("Document added: " + title)
if len(nodes) >= self.batch_size:
self.process_batch(nodes)
nodes = []
if nodes:
self.process_batch(nodes)
def process_batch(self, nodes):
service_context = ServiceContext.from_defaults(
llm=self.llm, embed_model=self.embed_model, node_parser=self.node_parser
)
set_global_service_context(service_context)
start = time.time()
print(time.time())
parsed_nodes = self.node_parser.get_nodes_from_documents(nodes, show_progress=True)
print(time.time() - start)
print("Nodes added: " + str(len(parsed_nodes)))
for node in parsed_nodes:
doc_path = node.ref_doc_id # Assuming ref_doc_id contains the document path
self.update_last_runtime(doc_path)
self.update_index(parsed_nodes)
self.save_index()
def update_index(self, nodes):
for node in nodes:
if node.ref_doc_id not in self.doc_ids:
self.doc_ids.append(node.ref_doc_id)
self.doc_embeddings[node.ref_doc_id] = node.embedding
else:
self.doc_embeddings[node.ref_doc_id] = node.embedding
# Store node embeddings in a separate dictionary
self.nodes_embeddings[node.ref_doc_id] = node.embedding
self.save_index()
def save_index(self):
with open(self.index_filename, "wb") as file:
index_data = {
"doc_embeddings": self.doc_embeddings,
"nodes_embeddings": self.nodes_embeddings,
}
pickle.dump(index_data, file)
def load_index(self):
if os.path.exists(self.index_filename):
with open(self.index_filename, "rb") as file:
index_data = pickle.load(file)
self.doc_embeddings = index_data.get("doc_embeddings", {})
self.nodes_embeddings = index_data.get("nodes_embeddings", {})
def create_and_load_index(index_name, nodes):
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"],
environment=os.environ["PINECONE_ENVIRONMENT"],
)
pinecone_index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
embed_model = OpenAIEmbedding(
model="text-embedding-ada-002", embed_batch_size=100
)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
GPTVectorStoreIndex(
nodes,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
if __name__ == "__main__":
# Load environment variables from .env file
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
load_dotenv(dotenv_path)
# Constants
SAVE_FOLDER = "downloaded_data"
INDEX_NAME = "kaidev"
INDEX_FILENAME = "index_data.pkl"
BATCH_SIZE = 1
# Create objects and perform tasks
downloader = ConfluenceDataExtractor(
confluence_url=os.environ.get("CONFLUENCE_URL"),
confluence_username=os.environ.get("CONFLUENCE_USERNAME"),
confluence_password=os.environ.get("CONFLUENCE_PASSWORD"),
save_folder=SAVE_FOLDER,
)
downloader.download_confluence_pages()
indexer = DocumentIndexCreator(
save_folder=SAVE_FOLDER, index_filename=INDEX_FILENAME, batch_size=BATCH_SIZE
)
indexer.load_documents()
indexer.index_documents()
create_and_load_index(index_name=INDEX_NAME, nodes=indexer.doc_ids, ) # Only push document IDs to Pinecone | [] |
2024-01-10 | swiftraccoon/sdrtrunk-transcriber | simplified_process.py | import openai
import logging
import requests
import os
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="script_log.log",
filemode="a",
)
logger = logging.getLogger()
# Configurations
RECORDINGS_DIR = "/home/YOUR_USER/SDRTrunk/recordings"
OPENAI_API_KEY = "YOUR_KEY_HERE"
def pyapi_transcribe_audio(file_path):
openai.api_key = OPENAI_API_KEY
audio_file = open(file_path, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return str(transcript)
def curl_transcribe_audio(file_path):
# Define the endpoint and your API key
url = "https://api.openai.com/v1/audio/transcriptions"
api_key = OPENAI_API_KEY
# Setup headers
headers = {
"Authorization": f"Bearer {api_key}",
}
# Open the file and setup files and data to be sent
with open(file_path, "rb") as file:
files = {
"file": file,
}
data = {
"model": "whisper-1",
"response_format": "json",
"temperature": "0",
"language": "en",
}
# Make the POST request
response = requests.post(url, headers=headers, files=files, data=data)
# Print the response or handle as needed
return str(response.json())
def process_file(file):
logger.info(f"Processing file: {file}")
if not file.endswith(".mp3"):
return
full_path = os.path.join(RECORDINGS_DIR, file)
talkgroup_id = file.split("TO_")[1].split("_")[0]
# Move the file based on talkgroup ID
new_dir = os.path.join(RECORDINGS_DIR, talkgroup_id)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
new_path = os.path.join(new_dir, file)
os.rename(full_path, new_path)
# Transcribe the audio
transcription = curl_transcribe_audio(new_path)
logger.info(f"Transcribed text for {file}: {transcription}")
# Write transcription to a text file
try:
logger.info(f"Starting to write to text file for {file}")
with open(new_path.replace(".mp3", ".txt"), "w") as text_file:
text_file.write(transcription)
except Exception as e:
logger.error(f"Error while writing to text file: {str(e)}")
def main():
for file in os.listdir(RECORDINGS_DIR):
process_file(file)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | swiftraccoon/sdrtrunk-transcriber | email_simplified_process.py | import openai
import logging
import requests
import os
import smtplib
from email.message import EmailMessage
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="script_log.log",
filemode="a",
)
logger = logging.getLogger()
# Configurations
RECORDINGS_DIR = "/home/YOUR_USER/SDRTrunk/recordings"
OPENAI_API_KEY = "YOUR_KEY_HERE"
def send_email(subject, content):
# Your email details
sender_email = "[email protected]"
receiver_email = "[email protected]"
password = "your_email_password" # NOTE: Use environment variables or secure vaults, don't hard-code passwords
# For a higher security standard, Google now requires you to use an “App Password“.
# This is a 16-digit passcode that is generated in your Google account and allows less
# secure apps or devices that don’t support 2-step verification to sign in to your Gmail Account.
# Create a message
msg = EmailMessage()
msg.set_content(content)
msg["Subject"] = subject
msg["From"] = sender_email
msg["To"] = receiver_email
# Send the email
try:
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as server: # Change "smtp.example.com" to your SMTP server
server.login(sender_email, password)
server.send_message(msg)
logger.info(f"Email sent to {receiver_email} successfully!")
except Exception as e:
logger.error(f"Error sending email: {str(e)}")
def pyapi_transcribe_audio(file_path):
openai.api_key = OPENAI_API_KEY
audio_file = open(file_path, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return str(transcript)
def curl_transcribe_audio(file_path):
# Define the endpoint and your API key
url = "https://api.openai.com/v1/audio/transcriptions"
api_key = OPENAI_API_KEY
# Setup headers
headers = {
"Authorization": f"Bearer {api_key}",
}
# Open the file and setup files and data to be sent
with open(file_path, "rb") as file:
files = {
"file": file,
}
data = {
"model": "whisper-1",
"response_format": "json",
"temperature": "0",
"language": "en",
}
# Make the POST request
response = requests.post(url, headers=headers, files=files, data=data)
# Print the response or handle as needed
return str(response.json())
def process_file(file):
logger.info(f"Processing file: {file}")
if not file.endswith(".mp3"):
return
full_path = os.path.join(RECORDINGS_DIR, file)
talkgroup_id = file.split("TO_")[1].split("_")[0]
# Move the file based on talkgroup ID
new_dir = os.path.join(RECORDINGS_DIR, talkgroup_id)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
new_path = os.path.join(new_dir, file)
os.rename(full_path, new_path)
# Transcribe the audio
transcription = curl_transcribe_audio(new_path)
logger.info(f"Transcribed text for {file}: {transcription}")
# Write transcription to a text file
try:
logger.info(f"Starting to write to text file for {file}")
with open(new_path.replace(".mp3", ".txt"), "w") as text_file:
text_file.write(transcription)
except Exception as e:
logger.error(f"Error while writing to text file: {str(e)}")
# Send the transcription via email
send_email(f"Transcription for {talkgroup_id}", transcription)
def main():
for file in os.listdir(RECORDINGS_DIR):
process_file(file)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | wayveai/Driving-with-LLMs | scripts~collect_vqa.py | # pylint: skip-file
import os
import pickle
from multiprocessing import Pool
from pathlib import Path
import click
import openai
from retry import retry
from tqdm import tqdm
def make_context():
prompt = f"""I am a certified professional driving instructor and I am currently demonstrating driving in a residential area of London to a student.
In London, we drive on the left side of road according to the UK driving rules.
A red+yellow signal on a traffic light is typically used as a warning signal to indicate that the light is about to turn green.
As we drive, I observe multiple objects such as vehicles, pedestrians, and traffic lights in front of us.
For each object, I am aware of their direction and distance from our car, which I measure in degrees, with 0 indicating it's right in front of us, 90 indicating it's to the right, and -90 indicating it's on the left.
This means that negative angles indicate that object is to the left of us and positive angles that its to the right of us.
If angle is larger than 90 degrees, its a sharp angle: e.g 134 degrees is a sharp right, -150 degrees is a sharp left.
If a car is driving in an opposite direction and is to the right of us, it is driving on an opposite lane.
I'm also driving in a defensive way and I'm paying varying levels of attention to each object (I measure it in percentage from 0% to 100%) depending on how they might be a hazard that may cause me to change speed, direction, stop, or even cause harm to myself.
Now design 16 random question and answer pairs that the student might ask about the current driving scenario. The answers should based on the current given input observations and your reasonings. Ask diverse questions, and give corresponding answers.
Format each QA pair in a single line as a JSON dictionary like {{"question": "xxx", "answer": "xxx"}}. Only output 16 lines of single-line JSON. Do not include any other explanation.
Must include these 6 questions, but please rephase the question to a more natural way:
- What are you seeing/observing
- What are you paying attention to and why
- Are there any traffic light / what's the color of the traffic light
- What's your current speed and steering angle / current state
- What is your action and why / how are you going to drive in this situation and why
- Summarize the current driving scenario in high level / describe the current situation
When asked about the action, always return the answer in this way:
```
Here are my actions:
- Accelerator pedal 0%
- Brake pedal 91%
- Steering 31% to the left.
Reason:
Because...
```
Also include one driving related question that cannot be observed in the observation input, and answer something like "I'm unable to answer this question based on the observation I have", and then describe your input observation
Also include one random question that is not related to driving, and answer something like "As an AI Driver, the question you asked is out of my scope, but I can try answer it" and then answer the question normally.
"""
return prompt
@retry(tries=5, delay=2, backoff=2)
def make_description_from_prompt(input_prompt):
context = make_context()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": context},
{"role": "user", "content": input_prompt},
],
temperature=1.0,
)
first_response = response["choices"][0]["message"]["content"]
return first_response
def process_frame(frame_num, observation, input_prompt, cache_path):
if os.path.exists(cache_path):
with open(cache_path, "rb") as f:
result = pickle.load(f)
print("loading from cache: ", frame_num)
return result
print("making description for frame: ", frame_num)
response_content = make_description_from_prompt(input_prompt)
result = {
"frame_num": frame_num,
"observation": observation,
"input_prompt": input_prompt,
"response_content": response_content,
}
with open(cache_path, "wb") as f:
pickle.dump(result, f)
return result
@click.command()
@click.option(
"-i",
"--input_path",
type=click.Path(dir_okay=False, path_type=Path),
help="Path to pickle dataset file",
)
@click.option(
"-o",
"--output_folder",
type=click.Path(dir_okay=True, path_type=Path),
help="Path to json file",
)
@click.option("--max_steps", default=None, type=int)
@click.option("--stride", default=1, type=int)
@click.option("--num_process", default=1, type=int)
@click.option("--openai_api", required=True, type=str)
def main(
input_path,
output_folder,
max_steps,
stride,
num_process,
openai_api,
):
# Init openai api key
openai.api_key = openai_api
cached_input_filename = os.path.expanduser(input_path)
output_folder = os.path.expanduser(output_folder)
os.makedirs(output_folder, exist_ok=True)
with open(cached_input_filename, "rb") as f:
data = pickle.load(f)
max_steps = len(data) if max_steps is None else max_steps
frame_nums = range(0, max_steps, stride)
args_list = [
(
frame_num,
data[frame_num]["observation"],
data[frame_num]["input_prompt"],
os.path.join(output_folder, f"tmp_{frame_num}.pkl"),
)
for frame_num in frame_nums
]
with Pool(num_process) as pool:
results = list(
tqdm(
pool.starmap(process_frame, args_list),
total=len(frame_nums),
desc="Processing frames",
)
)
cached_labels_filename = os.path.join(output_folder, "labeled_dataset.pkl")
with open(cached_labels_filename, "wb") as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
print("Results saved to ", cached_labels_filename)
if __name__ == "__main__":
main()
| [
"I am a certified professional driving instructor and I am currently demonstrating driving in a residential area of London to a student.\nIn London, we drive on the left side of road according to the UK driving rules.\nA red+yellow signal on a traffic light is typically used as a warning signal to indicate that the light is about to turn green.\nAs we drive, I observe multiple objects such as vehicles, pedestrians, and traffic lights in front of us.\nFor each object, I am aware of their direction and distance from our car, which I measure in degrees, with 0 indicating it's right in front of us, 90 indicating it's to the right, and -90 indicating it's on the left.\nThis means that negative angles indicate that object is to the left of us and positive angles that its to the right of us.\nIf angle is larger than 90 degrees, its a sharp angle: e.g 134 degrees is a sharp right, -150 degrees is a sharp left.\nIf a car is driving in an opposite direction and is to the right of us, it is driving on an opposite lane.\nI'm also driving in a defensive way and I'm paying varying levels of attention to each object (I measure it in percentage from 0% to 100%) depending on how they might be a hazard that may cause me to change speed, direction, stop, or even cause harm to myself.\n\nNow design 16 random question and answer pairs that the student might ask about the current driving scenario. The answers should based on the current given input observations and your reasonings. Ask diverse questions, and give corresponding answers.\n\nFormat each QA pair in a single line as a JSON dictionary like {\"question\": \"xxx\", \"answer\": \"xxx\"}. Only output 16 lines of single-line JSON. Do not include any other explanation.\nMust include these 6 questions, but please rephase the question to a more natural way:\n- What are you seeing/observing\n- What are you paying attention to and why\n- Are there any traffic light / what's the color of the traffic light\n- What's your current speed and steering angle / current state\n- What is your action and why / how are you going to drive in this situation and why\n- Summarize the current driving scenario in high level / describe the current situation\n\nWhen asked about the action, always return the answer in this way:\n```\nHere are my actions:\n- Accelerator pedal 0%\n- Brake pedal 91%\n- Steering 31% to the left.\n\nReason:\nBecause...\n```\nAlso include one driving related question that cannot be observed in the observation input, and answer something like \"I'm unable to answer this question based on the observation I have\", and then describe your input observation\n\nAlso include one random question that is not related to driving, and answer something like \"As an AI Driver, the question you asked is out of my scope, but I can try answer it\" and then answer the question normally.\n"
] |
2024-01-10 | wayveai/Driving-with-LLMs | scripts~grade_vqa.py | import copy
import csv
import json
import pickle
import re
from multiprocessing import pool as mpp
from pathlib import Path
from typing import Optional
import click
import numpy as np
import openai
import random
from retry import retry
from tqdm import tqdm
SYSTEM_PROMPT = f"""You are a certified professional driving instructor in London, UK.
Your job is to teach students to drive and to grade their answers to a standardised driving test consisting of questions related to driving in an urban environment.
Your colleagues describe you as careful, charismatic, and smart. You always care about correct judgement of students.
## London driving
In London, we drive on the left side of the road according to the UK driving rules.
A red and yellow signal on a traffic light is typically used as a warning signal to indicate that the light is about to turn green.
As we drive, students observe multiple objects such as vehicles, pedestrians, and traffic lights around us.
If a car is driving in an opposite direction and is to the right of us, it is driving on an opposite lane.
## Units
For each object, the student must be aware of their direction and distance from our car. In the standardised test, we measure distances in meters and angles in degrees.
Positive distances mean the object is in front. Negative distances mean it's behind us.
An angle of 0 indicates an object is straight-ahead. An angle of 90 indicates the object is to the right, and -90 indicates it's on the left.
This means negative angles indicate the object is to the left of us, and positive angles that it's to the right of us.
An angle >= 90 degrees is considered to be a sharp angle: e.g 90 degrees is a sharp right, -90 degrees is a sharp left.
## Student test
Students should drive in a defensive way and they should pay varying levels of attention to different objects.
In the test we measure attention as a percentage from 0% to 100%, depending on how they might be a hazard that may cause me to change speed, direction, stop, or even cause harm to myself.
In your grading, it's REALLY important to check answers for their factually correctness. Even if the student gives a reasonable sounding answer, it might be factually incorrect.
Always think critically."""
def parse_question_answers(response):
vqa_data = re.findall(r"\{.*?\}", response)
for json_string in vqa_data:
json_string = json_string.replace("\t", " ").replace("\n", " ")
json_string = json_string.replace(",}", "}")
json_string = json_string.replace("`", '"').replace('''', "''', '", "')
try:
json_dict = json.loads(json_string)
if not json_dict["question"] or not json_dict["answer"]:
continue
except Exception:
continue
yield json_dict
class PoolWithTQDM(mpp.Pool):
def istarmap(self, func, iterable, chunksize=1):
"""starmap-version of imap - allows use of tqdm for progress bar with pool"""
self._check_running() # type: ignore
if chunksize < 1:
raise ValueError("Chunksize must be 1+, not {0:n}".format(chunksize))
task_batches = self._get_tasks(func, iterable, chunksize) # type: ignore
result = mpp.IMapIterator(self) # type: ignore
self._taskqueue.put( # type: ignore
(
self._guarded_task_generation(result._job, mpp.starmapstar, task_batches), # type: ignore
result._set_length, # type: ignore
)
)
return (item for chunk in result for item in chunk)
def __reduce__(self):
raise NotImplementedError("Pool objects cannot be passed between processes or pickled.")
@retry(tries=16, delay=2, backoff=2, max_delay=10, jitter=(0, 5))
def grade(observation_prompt, question_answer_pairs, pred):
input_prompt = f"""You are now given a description of the student's observation. This observation includes their attention to objects as well as position and direction. Your task is to grade the answers.
### Scoring rules:
- Your scores for each answer should be between 0 (worst) and 10 (best).
- If the answer is totally wrong or the numbers are way off, the score should be 0.
- Give intermediate scores for partially correct answers, or numbers that are close. Only give a score of 10 for flawless answers.
- If the question is unrelated to driving, the only thing you need to check is if the student acknowledges that the question is unrelated to driving.
- Don't assess the student's observation nor their attention. They are unable to control it. Focus on their answers instead.
- Think critically and carefully. Most importantly check the answer for factual correctness.
### Grading process
For each of the {len(question_answer_pairs)} questions, provide a one line assessment of the student's answer in the format:
```
n. <short one sentence assessment>. Score: <score 0-10>.
```
A few examples of what a good assessment might look like:
1. Correctly identified the the red traffic light. Score: 10.
2. No mention of pedestrian crossing street. Score: 2.
3. Acknowledged question unrelated to driving and attempted to answer. Score: 10.
4. Doesn't answer the question but has all information necessary. Score: 0.
5. Doesn't stop for the pedestrian ahead. Score: 0.
6. Correctly identified the attention level to the car. Score: 10.
7. Incorrectly stated there are no vehicles around even though there is one. Score: 0.
8. Unable to answer question given information available. Score: 10.
9. Give 13.12 mph or m or percentage for the correct answer of 13.0. Score: 8.
10. Give 14.12 mph or m or percentage for the correct answer of 13.0. Score: 6.
### Student's observation:
{observation_prompt}
### Student's questionnaire:
"""
for i, qa in enumerate(question_answer_pairs):
if pred is not None:
# Overwrite the answer with the prediction
prediction = pred[i]
question_answer_pairs[i]['answer'] = prediction
else:
prediction = qa['answer']
input_prompt += f"""Question {i+1}: {qa['question']}
Answer: {prediction}
"""
input_prompt += f"""
### Your assessment:"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": input_prompt},
],
top_p=0.8,
max_tokens=1000,
)
assessment = response['choices'][0]['message']['content']
pattern = re.compile(r'(\d+)\.\s*(.*)\s*Score:\s*(\d+)')
graded_qa_pairs = copy.deepcopy(question_answer_pairs)
for line in assessment.split('\n'):
match = pattern.match(line)
if match is None:
continue
question_index = int(match.group(1)) - 1
explanation = match.group(2).strip()
score = int(match.group(3))
if not 0 <= question_index < len(question_answer_pairs):
continue
graded_qa_pairs[question_index]['score'] = score
graded_qa_pairs[question_index]['score_explanation'] = explanation
return graded_qa_pairs
def process_frame(frame, verbose=False):
observation_prompt = frame['input_prompt']
if verbose:
print("========= =========")
print("Observation: ")
print(observation_prompt)
question_answer_pairs = list(parse_question_answers(frame['response_content']))
grade_result = grade(observation_prompt, question_answer_pairs, frame.get("pred"))
if verbose:
for qa_pair in grade_result:
print("========= =========")
print(f"Question: {qa_pair['question']}")
print(f"Answer: {qa_pair['answer']}")
if 'score' in qa_pair:
print(f"Score: {qa_pair['score']}")
print(f"Explanation: {qa_pair['score_explanation']}")
else:
print("Score: N/A")
print("Explanation: N/A")
frame['response_content'] = '\n'.join([json.dumps(qa) for qa in grade_result])
return frame
def maybe_filter_result(data, result):
data_total_entries = 0
for d in data:
data_total_entries += len(list(parse_question_answers(d["response_content"])))
if len(result) != data_total_entries:
result = filter_vqa_result(data, result)
assert data_total_entries == len(result), f"len(data)={data_total_entries} != len(result)={len(result)}, consider run filter_vqa_result(data, result)"
return result
def filter_vqa_result(data, result):
dataset_items=["vqa", "caption", "action"]
data_dict= {
"frame_num": [],
"input": [],
"instruction": [],
"output": [],
"route_descriptors": [],
"vehicle_descriptors": [],
"pedestrian_descriptors": [],
"ego_vehicle_descriptor": [],
}
for d in data:
# VQA
if "vqa" in dataset_items:
obs_dict = d["observation"]
for json_dict in parse_question_answers(d["response_content"]):
data_dict["frame_num"].append(d["frame_num"])
data_dict["input"].append("")
data_dict["instruction"].append(json_dict["question"])
data_dict["output"].append(json_dict["answer"])
# Captioning
if "caption" in dataset_items:
data_dict["frame_num"].append(d["frame_num"])
data_dict["input"].append("")
data_dict["instruction"].append("")
data_dict["output"].append("")
# Action
if "action" in dataset_items:
data_dict["frame_num"].append(d["frame_num"])
data_dict["input"].append("")
data_dict["instruction"].append("")
data_dict["output"].append("\n".join(d["input_prompt"].split("\n")[-4:]))
filtered_result = []
for d,r in zip(data_dict["instruction"], result):
if d != "":
filtered_result.append(r)
return filtered_result
def load_result(filepath):
with open(filepath, 'r') as f:
data = json.load(f)
# Extract the 'data' list from the loaded JSON data
data_list = data["data"]
# Initialize an empty list to store the dictionaries
result = []
# Iterate over each sublist in data_list
for sublist in data_list:
# Create a dictionary for each sublist and append it to the result list
result.append({
"pred": sublist[0].split("\n")[-1],
"label": sublist[1]
})
return result
def get_avg_score(results):
all_scores = []
for res in results:
resp = res["response_content"]
# Splitting the string into separate lines (JSON objects)
json_lines = resp.strip().split("\n")
scores = []
for line in json_lines:
try:
loaded_json = json.loads(line)
if "score" in loaded_json:
scores.append(loaded_json["score"])
except json.JSONDecodeError:
continue # Skip the line if JSON decoding fails
if scores:
all_scores.extend(scores)
# print(all_scores)
print("avg score:", np.mean(all_scores))
def save_list_of_dicts_to_csv(list_of_dicts, file_path, ignored_columns=None):
if ignored_columns is None:
ignored_columns = []
# Ensure the list is not empty
if len(list_of_dicts) == 0:
return
# Get keys but exclude those in ignored_columns
keys = [k for k in list_of_dicts[0].keys() if k not in ignored_columns]
with open(file_path, 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
# Only include rows with keys that we're interested in
for dictionary in list_of_dicts:
filtered_dict = {k: dictionary[k] for k in keys}
dict_writer.writerow(filtered_dict)
@click.command()
@click.option(
'-i',
'evaluation_data_path',
type=click.Path(path_type=Path),
required=True,
help='Path to evaluation result file. This should have the same format as the dataset produced by make_prompt_vqa.py.',
)
@click.option(
'-r',
'evaluation_result_path',
type=click.Path(path_type=Path),
help='Path to evaluation result file. This should have the same format as the dataset produced by make_prompt_vqa.py.',
)
@click.option(
'-o',
'output_path',
type=click.Path(path_type=Path, file_okay=True, exists=False),
help='Path to output file. This should have the same format as the dataset produced by make_prompt_vqa.py with an extra .',
)
@click.option(
'-l', '--limit', 'limit_n', type=int, default=None, help='Limit the number of examples to grade to the first n.'
)
@click.option('-d', '--debug', is_flag=True, default=False, help='Debug mode.')
@click.option('-w', 'num_workers', type=int, default=1, help='Number of workers to use.')
@click.option("--openai_api", required=True, type=str)
@click.option('-s', 'shuffle', is_flag=True, default=False, help='[DANGEROUS] Shuffle the label to create wrong answers to questions')
@click.option('-k', '--idk', is_flag=True, default=False, help='[DANGEROUS] Overwrite the label with "I dont know" to create wrong answers to questions')
def grade_vqa(
evaluation_data_path: Path,
evaluation_result_path: Path,
output_path: Optional[Path],
limit_n: Optional[int],
debug: bool = False,
num_workers: int = 1,
openai_api: str = None,
shuffle: bool = False,
idk: bool = False,
):
"""
Evaluate the outputs of a Vector QA model using the OpenAI api.
Example outputs:
~/isim_obs_8k_vqa.pkl
/mnt/remote/data/users/sinavski/vec_llm/isim_obs_8k_vqa.pkl
"""
openai.api_key = openai_api
assert evaluation_data_path.exists(), f"evaluation_data_path={evaluation_data_path} does not exist."
with open(evaluation_data_path, "rb") as f:
data = pickle.load(f)
result = load_result(evaluation_result_path) if evaluation_result_path else None
if result:
result = maybe_filter_result(data, result)
if shuffle:
print("Shuffling the result")
random.seed(42)
random.shuffle(result)
if idk:
print("Overwriting the label with I don't know")
result = [{"pred": "I don't know", "label": 0} for res in result]
for i,frame in enumerate(data):
gt_qa_pairs = parse_question_answers(frame['response_content'])
pred_qa_pairs = [result.pop(0)["pred"] for qa_pair in gt_qa_pairs]
data[i]["pred"] = pred_qa_pairs
if limit_n is not None:
data = data[:limit_n]
if debug:
data = data[:3]
assert output_path is not None, "output_path must be specified when not in debug mode."
if not click.confirm(f"About to grade {len(data)} examples. Continue?"):
print("Aborting.")
return
if output_path.exists() and not click.confirm(f"{output_path} already exists. Overwrite?"):
print("Aborting.")
return
with PoolWithTQDM(num_workers) as pool:
results = []
for graded_frame in tqdm(pool.imap(process_frame, data), total=len(data), ncols=120, desc="Processing frames"):
results.append(graded_frame)
with open(output_path, 'wb') as f:
pickle.dump(results, f)
save_list_of_dicts_to_csv(results, output_path.with_suffix(".csv"), ignored_columns=["observation", "pred"])
get_avg_score(results)
print(f"Grading complete. Results saved to {output_path} and {output_path.with_suffix('.csv')}")
if __name__ == '__main__':
grade_vqa() # pylint: disable=no-value-for-parameter
| [
"question",
"You are now given a description of the student's observation. This observation includes their attention to objects as well as position and direction. Your task is to grade the answers.\n\n### Scoring rules:\n- Your scores for each answer should be between 0 (worst) and 10 (best).\n- If the answer is totally wrong or the numbers are way off, the score should be 0.\n- Give intermediate scores for partially correct answers, or numbers that are close. Only give a score of 10 for flawless answers.\n- If the question is unrelated to driving, the only thing you need to check is if the student acknowledges that the question is unrelated to driving.\n- Don't assess the student's observation nor their attention. They are unable to control it. Focus on their answers instead.\n- Think critically and carefully. Most importantly check the answer for factual correctness.\n\n\n### Grading process\n\nFor each of the 1 questions, provide a one line assessment of the student's answer in the format:\n\n```\nn. <short one sentence assessment>. Score: <score 0-10>.\n```\n\nA few examples of what a good assessment might look like:\n1. Correctly identified the the red traffic light. Score: 10.\n2. No mention of pedestrian crossing street. Score: 2.\n3. Acknowledged question unrelated to driving and attempted to answer. Score: 10.\n4. Doesn't answer the question but has all information necessary. Score: 0.\n5. Doesn't stop for the pedestrian ahead. Score: 0.\n6. Correctly identified the attention level to the car. Score: 10.\n7. Incorrectly stated there are no vehicles around even though there is one. Score: 0.\n8. Unable to answer question given information available. Score: 10.\n9. Give 13.12 mph or m or percentage for the correct answer of 13.0. Score: 8.\n10. Give 14.12 mph or m or percentage for the correct answer of 13.0. Score: 6.\n\n\n### Student's observation:\nPLACEHOLDER\n\n\n### Student's questionnaire:\n",
"input_prompt",
"You are a certified professional driving instructor in London, UK.\nYour job is to teach students to drive and to grade their answers to a standardised driving test consisting of questions related to driving in an urban environment.\nYour colleagues describe you as careful, charismatic, and smart. You always care about correct judgement of students.\n\n## London driving\nIn London, we drive on the left side of the road according to the UK driving rules.\nA red and yellow signal on a traffic light is typically used as a warning signal to indicate that the light is about to turn green.\nAs we drive, students observe multiple objects such as vehicles, pedestrians, and traffic lights around us.\nIf a car is driving in an opposite direction and is to the right of us, it is driving on an opposite lane.\n\n## Units\nFor each object, the student must be aware of their direction and distance from our car. In the standardised test, we measure distances in meters and angles in degrees.\nPositive distances mean the object is in front. Negative distances mean it's behind us.\nAn angle of 0 indicates an object is straight-ahead. An angle of 90 indicates the object is to the right, and -90 indicates it's on the left.\nThis means negative angles indicate the object is to the left of us, and positive angles that it's to the right of us.\nAn angle >= 90 degrees is considered to be a sharp angle: e.g 90 degrees is a sharp right, -90 degrees is a sharp left.\n\n## Student test\nStudents should drive in a defensive way and they should pay varying levels of attention to different objects.\nIn the test we measure attention as a percentage from 0% to 100%, depending on how they might be a hazard that may cause me to change speed, direction, stop, or even cause harm to myself.\nIn your grading, it's REALLY important to check answers for their factually correctness. Even if the student gives a reasonable sounding answer, it might be factually incorrect.\nAlways think critically.",
"\n\n\n### Your assessment:"
] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~llm_predictor~chatgpt.py | """Wrapper functions around an LLM chain."""
import logging
from typing import Any, List, Optional, Union
import openai
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseLanguageModel, BaseMessage
from llama_index.llm_predictor.base import LLMPredictor
from llama_index.prompts.base import Prompt
from llama_index.utils import ErrorToRetry, retry_on_exceptions_with_backoff
logger = logging.getLogger(__name__)
class ChatGPTLLMPredictor(LLMPredictor):
"""ChatGPT Specific LLM predictor class.
Wrapper around an LLMPredictor to provide ChatGPT specific features.
Args:
llm (Optional[langchain.llms.base.LLM]): LLM from Langchain to use
for predictions. Defaults to OpenAI's text-davinci-003 model.
Please see `Langchain's LLM Page
<https://langchain.readthedocs.io/en/latest/modules/llms.html>`_
for more details.
retry_on_throttling (bool): Whether to retry on rate limit errors.
Defaults to true.
"""
def __init__(
self,
llm: Optional[BaseLanguageModel] = None,
prepend_messages: Optional[
List[Union[BaseMessagePromptTemplate, BaseMessage]]
] = None,
**kwargs: Any
) -> None:
"""Initialize params."""
super().__init__(
llm=llm or ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"), **kwargs
)
self.prepend_messages = prepend_messages
def _get_langchain_prompt(
self, prompt: Prompt
) -> Union[ChatPromptTemplate, BasePromptTemplate]:
"""Add prepend_messages to prompt."""
lc_prompt = prompt.get_langchain_prompt(llm=self._llm)
if self.prepend_messages:
if isinstance(lc_prompt, PromptTemplate):
msgs = self.prepend_messages + [
HumanMessagePromptTemplate.from_template(lc_prompt.template)
]
lc_prompt = ChatPromptTemplate.from_messages(msgs)
elif isinstance(lc_prompt, ChatPromptTemplate):
lc_prompt.messages = self.prepend_messages + lc_prompt.messages
return lc_prompt
def _predict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
lc_prompt = self._get_langchain_prompt(prompt)
llm_chain = LLMChain(prompt=lc_prompt, llm=self._llm)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
if self.retry_on_throttling:
llm_prediction = retry_on_exceptions_with_backoff(
lambda: llm_chain.predict(**full_prompt_args),
[
ErrorToRetry(openai.error.RateLimitError),
ErrorToRetry(openai.error.ServiceUnavailableError),
ErrorToRetry(openai.error.TryAgain),
ErrorToRetry(
openai.error.APIConnectionError, lambda e: e.should_retry
),
],
)
else:
llm_prediction = llm_chain.predict(**full_prompt_args)
return llm_prediction
async def _apredict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Async inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
lc_prompt = self._get_langchain_prompt(prompt)
llm_chain = LLMChain(prompt=lc_prompt, llm=self._llm)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
# TODO: support retry on throttling
llm_prediction = await llm_chain.apredict(**full_prompt_args)
return llm_prediction
| [
"self.prepend_messages + [\n HumanMessagePromptTemplate.from_template(lc_prompt.template)\n ]"
] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~indices~query~struct_store~pandas.py | """Default query for GPTPandasIndex."""
import logging
from typing import Any, Callable, Optional
import pandas as pd
from langchain.input import print_text
from llama_index.data_structs.table import PandasStructTable
from llama_index.indices.query.base import BaseGPTIndexQuery
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.default_prompts import DEFAULT_PANDAS_PROMPT
from llama_index.prompts.prompts import PandasPrompt
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"We wish to convert this query to executable Python code using Pandas.\n"
"The final line of code should be a Python expression that can be called "
"with the `eval()` function. This expression should represent a solution "
"to the query."
)
def default_output_processor(
output: str, df: pd.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
import ast
import sys
import traceback
if sys.version_info < (3, 9):
logger.warn(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df}
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
try:
return str(eval(module_end_str, {}, local_vars))
except Exception as e:
raise e
except Exception as e:
err_string = (
"There was an error running the output as Python code. "
f"Error message: {e}"
)
traceback.print_exc()
return err_string
class GPTNLPandasIndexQuery(BaseGPTIndexQuery[PandasStructTable]):
"""GPT Pandas query.
Convert natural language to Pandas python code.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
output_processor (Optional[Callable[[str], str]]): Output processor.
A callable that takes in the output string, pandas DataFrame,
and any output kwargs and returns a string.
pandas_prompt (Optional[PandasPrompt]): Pandas prompt to use.
head (int): Number of rows to show in the table context.
"""
def __init__(
self,
index_struct: PandasStructTable,
df: Optional[pd.DataFrame] = None,
instruction_str: Optional[str] = None,
output_processor: Optional[Callable] = None,
pandas_prompt: Optional[PandasPrompt] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct=index_struct, **kwargs)
if df is None:
raise ValueError("df must be provided.")
self.df = df
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
def _get_table_context(self) -> str:
"""Get table context."""
return str(self.df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
pandas_response_str, _ = self._llm_predictor.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n" f"```\n{pandas_response_str}\n```\n")
pandas_output = self._output_processor(
pandas_response_str,
self.df,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_extra_info = {
"pandas_instruction_str": pandas_response_str,
}
return Response(response=pandas_output, extra_info=response_extra_info)
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | Talking_Buddy~gptcode.py | from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=num_outputs))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk('index.json')
return index
def ask_ai(query):
index = GPTSimpleVectorIndex.load_from_disk('./Talking_Buddy/index.json')
response = index.query(query, response_mode="compact")
return response.response
os.environ["OPENAI_API_KEY"] = "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"
construct_index("./Talking_Buddy/data")
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~optimization~optimizer.py | """Optimization related classes and functions."""
import logging
from abc import abstractmethod
from typing import Callable, List, Optional
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.indices.query.schema import QueryBundle
logger = logging.getLogger(__name__)
class BaseTokenUsageOptimizer:
"""Base class for optimizers that should be overwritten."""
@abstractmethod
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize the input text given the query."""
raise NotImplementedError("Not implemented yet.")
class SentenceEmbeddingOptimizer(BaseTokenUsageOptimizer):
"""Optimization of a text chunk given the query by shortening the input text."""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
percentile_cutoff: Optional[float] = None,
threshold_cutoff: Optional[float] = None,
tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
):
"""Optimizer class that is passed into BaseGPTIndexQuery.
Should be set like this:
.. code-block:: python
from llama_index.optimization.optimizer import Optimizer
optimizer = SentenceEmbeddingOptimizer(
percentile_cutoff=0.5
this means that the top 50% of sentences will be used.
Alternatively, you can set the cutoff using a threshold
on the similarity score. In this case only setences with a
similarity score higher than the threshold will be used.
threshold_cutoff=0.7
these cutoffs can also be used together.
)
response = index.query(
"<query_str>", optimizer=optimizer
)
"""
self.embed_model = embed_model or OpenAIEmbedding()
self._percentile_cutoff = percentile_cutoff
self._threshold_cutoff = threshold_cutoff
if tokenizer_fn is None:
import nltk.data
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
tokenizer_fn = tokenizer.tokenize
self._tokenizer_fn = tokenizer_fn
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize a text chunk given the query by shortening the input text."""
split_text = self._tokenizer_fn(text)
start_embed_token_ct = self.embed_model.total_tokens_used
if query_bundle.embedding is None:
query_bundle.embedding = self.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
text_embeddings = self.embed_model._get_text_embeddings(split_text)
num_top_k = None
threshold = None
if self._percentile_cutoff is not None:
num_top_k = int(len(split_text) * self._percentile_cutoff)
if self._threshold_cutoff is not None:
threshold = self._threshold_cutoff
top_similarities, top_idxs = get_top_k_embeddings(
query_embedding=query_bundle.embedding,
embeddings=text_embeddings,
similarity_fn=self.embed_model.similarity,
similarity_top_k=num_top_k,
embedding_ids=[i for i in range(len(text_embeddings))],
similarity_cutoff=threshold,
)
net_embed_tokens = self.embed_model.total_tokens_used - start_embed_token_ct
logger.info(
f"> [optimize] Total embedding token usage: " f"{net_embed_tokens} tokens"
)
if len(top_idxs) == 0:
raise ValueError("Optimizer returned zero sentences.")
top_sentences = [split_text[i] for i in top_idxs]
logger.debug(f"> Top {len(top_idxs)} sentences with scores:\n")
for i in range(len(top_idxs)):
logger.debug(f"{i}. {top_sentences[i]} ({top_similarities[i]})")
return " ".join(top_sentences)
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~langchain_helpers~agents~toolkits.py | """LlamaIndex toolkit."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from pydantic import Field
from llama_index.langchain_helpers.agents.tools import (
GraphToolConfig,
IndexToolConfig,
LlamaGraphTool,
LlamaIndexTool,
)
class LlamaToolkit(BaseToolkit):
"""Toolkit for interacting with Llama indices."""
index_configs: List[IndexToolConfig] = Field(default_factory=list)
graph_configs: List[GraphToolConfig] = Field(default_factory=list)
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
index_tools: List[BaseTool] = [
LlamaIndexTool.from_tool_config(tool_config=tool_config)
for tool_config in self.index_configs
]
graph_tools: List[BaseTool] = [
LlamaGraphTool.from_tool_config(tool_config=tool_config)
for tool_config in self.graph_configs
]
return index_tools + graph_tools
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~llm_predictor~base.py | """Wrapper functions around an LLM chain."""
import logging
from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Generator, Optional, Protocol, Tuple
import openai
from langchain import Cohere, LLMChain, OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.llms import AI21
from langchain.schema import BaseLanguageModel
from llama_index.constants import MAX_CHUNK_SIZE, NUM_OUTPUTS
from llama_index.prompts.base import Prompt
from llama_index.utils import (
ErrorToRetry,
globals_helper,
retry_on_exceptions_with_backoff,
)
logger = logging.getLogger(__name__)
GPT4_CONTEXT_SIZE = 8192
GPT4_32K_CONTEXT_SIZE = 32768
@dataclass
class LLMMetadata:
"""LLM metadata.
We extract this metadata to help with our prompts.
"""
max_input_size: int = MAX_CHUNK_SIZE
num_output: int = NUM_OUTPUTS
def _get_llm_metadata(llm: BaseLanguageModel) -> LLMMetadata:
"""Get LLM metadata from llm."""
if not isinstance(llm, BaseLanguageModel):
raise ValueError("llm must be an instance of langchain.llms.base.LLM")
if isinstance(llm, OpenAI):
return LLMMetadata(
max_input_size=llm.modelname_to_contextsize(llm.model_name),
num_output=llm.max_tokens,
)
elif isinstance(llm, ChatOpenAI):
# TODO: remove hardcoded context size once available via langchain.
# NOTE: if max tokens isn't specified, set to 4096
max_tokens = llm.max_tokens or 4096
if llm.model_name == "gpt-4":
return LLMMetadata(max_input_size=GPT4_CONTEXT_SIZE, num_output=max_tokens)
elif llm.model_name == "gpt-4-32k":
return LLMMetadata(
max_input_size=GPT4_32K_CONTEXT_SIZE, num_output=max_tokens
)
else:
logger.warn(
"Unknown max input size for %s, using defaults.", llm.model_name
)
return LLMMetadata()
elif isinstance(llm, Cohere):
max_tokens = llm.max_tokens or 2048
# TODO: figure out max input size for cohere
return LLMMetadata(num_output=max_tokens)
elif isinstance(llm, AI21):
# TODO: figure out max input size for AI21
return LLMMetadata(num_output=llm.maxTokens)
else:
return LLMMetadata()
def _get_response_gen(openai_response_stream: Generator) -> Generator:
"""Get response generator from openai response stream."""
for response in openai_response_stream:
yield response["choices"][0]["text"]
class BaseLLMPredictor(Protocol):
"""Base LLM Predictor."""
@abstractmethod
def get_llm_metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
@abstractmethod
def predict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
@abstractmethod
def stream(self, prompt: Prompt, **prompt_args: Any) -> Tuple[Generator, str]:
"""Stream the answer to a query.
NOTE: this is a beta feature. Will try to build or use
better abstractions about response handling.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
str: The predicted answer.
"""
@property
@abstractmethod
def total_tokens_used(self) -> int:
"""Get the total tokens used so far."""
@property
@abstractmethod
def last_token_usage(self) -> int:
"""Get the last token usage."""
@last_token_usage.setter
@abstractmethod
def last_token_usage(self, value: int) -> None:
"""Set the last token usage."""
@abstractmethod
async def apredict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Async predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
class LLMPredictor(BaseLLMPredictor):
"""LLM predictor class.
Wrapper around an LLMChain from Langchain.
Args:
llm (Optional[langchain.llms.base.LLM]): LLM from Langchain to use
for predictions. Defaults to OpenAI's text-davinci-003 model.
Please see `Langchain's LLM Page
<https://langchain.readthedocs.io/en/latest/modules/llms.html>`_
for more details.
retry_on_throttling (bool): Whether to retry on rate limit errors.
Defaults to true.
"""
def __init__(
self, llm: Optional[BaseLanguageModel] = None, retry_on_throttling: bool = True
) -> None:
"""Initialize params."""
self._llm = llm or OpenAI(temperature=0, model_name="text-davinci-003")
self.retry_on_throttling = retry_on_throttling
self._total_tokens_used = 0
self.flag = True
self._last_token_usage: Optional[int] = None
@property
def llm(self) -> BaseLanguageModel:
"""Get LLM."""
return self._llm
def get_llm_metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
# TODO: refactor mocks in unit tests, this is a stopgap solution
if hasattr(self, "_llm") and self._llm is not None:
return _get_llm_metadata(self._llm)
else:
return LLMMetadata()
def _predict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
if self.retry_on_throttling:
llm_prediction = retry_on_exceptions_with_backoff(
lambda: llm_chain.predict(**full_prompt_args),
[
ErrorToRetry(openai.error.RateLimitError),
ErrorToRetry(openai.error.ServiceUnavailableError),
ErrorToRetry(openai.error.TryAgain),
ErrorToRetry(
openai.error.APIConnectionError, lambda e: e.should_retry
),
],
)
else:
llm_prediction = llm_chain.predict(**full_prompt_args)
return llm_prediction
def predict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = self._predict(prompt, **prompt_args)
logger.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
def stream(self, prompt: Prompt, **prompt_args: Any) -> Tuple[Generator, str]:
"""Stream the answer to a query.
NOTE: this is a beta feature. Will try to build or use
better abstractions about response handling.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
str: The predicted answer.
"""
if not isinstance(self._llm, OpenAI):
raise ValueError("stream is only supported for OpenAI LLMs")
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
raw_response_gen = self._llm.stream(formatted_prompt)
response_gen = _get_response_gen(raw_response_gen)
# NOTE/TODO: token counting doesn't work with streaming
return response_gen, formatted_prompt
@property
def total_tokens_used(self) -> int:
"""Get the total tokens used so far."""
return self._total_tokens_used
def _count_tokens(self, text: str) -> int:
tokens = globals_helper.tokenizer(text)
return len(tokens)
@property
def last_token_usage(self) -> int:
"""Get the last token usage."""
if self._last_token_usage is None:
return 0
return self._last_token_usage
@last_token_usage.setter
def last_token_usage(self, value: int) -> None:
"""Set the last token usage."""
self._last_token_usage = value
async def _apredict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Async inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
# TODO: support retry on throttling
llm_prediction = await llm_chain.apredict(**full_prompt_args)
return llm_prediction
async def apredict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Async predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = await self._apredict(prompt, **prompt_args)
logger.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~indices~query~tree~leaf_query.py | """Leaf query mechanism."""
import logging
from typing import Any, Dict, Optional, cast
from langchain.input import print_text
from llama_index.data_structs.data_structs import IndexGraph, Node
from llama_index.indices.query.base import BaseGPTIndexQuery
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.response.builder import ResponseBuilder
from llama_index.indices.utils import extract_numbers_given_response, get_sorted_node_list
from llama_index.prompts.default_prompts import (
DEFAULT_QUERY_PROMPT,
DEFAULT_QUERY_PROMPT_MULTIPLE,
)
from llama_index.prompts.prompts import TreeSelectMultiplePrompt, TreeSelectPrompt
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
class GPTTreeIndexLeafQuery(BaseGPTIndexQuery[IndexGraph]):
"""GPT Tree Index leaf query.
This class traverses the index graph and searches for a leaf node that can best
answer the query.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
query_template (Optional[TreeSelectPrompt]): Tree Select Query Prompt
(see :ref:`Prompt-Templates`).
query_template_multiple (Optional[TreeSelectMultiplePrompt]): Tree Select
Query Prompt (Multiple)
(see :ref:`Prompt-Templates`).
child_branch_factor (int): Number of child nodes to consider at each level.
If child_branch_factor is 1, then the query will only choose one child node
to traverse for any given parent node.
If child_branch_factor is 2, then the query will choose two child nodes.
"""
def __init__(
self,
index_struct: IndexGraph,
query_template: Optional[TreeSelectPrompt] = None,
query_template_multiple: Optional[TreeSelectMultiplePrompt] = None,
child_branch_factor: int = 1,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct, **kwargs)
self.query_template = query_template or DEFAULT_QUERY_PROMPT
self.query_template_multiple = (
query_template_multiple or DEFAULT_QUERY_PROMPT_MULTIPLE
)
self.child_branch_factor = child_branch_factor
def _query_with_selected_node(
self,
selected_node: Node,
query_bundle: QueryBundle,
prev_response: Optional[str] = None,
level: int = 0,
) -> str:
"""Get response for selected node.
If not leaf node, it will recursively call _query on the child nodes.
If prev_response is provided, we will update prev_response with the answer.
"""
query_str = query_bundle.query_str
if len(selected_node.child_indices) == 0:
response_builder = ResponseBuilder(
self._prompt_helper,
self._llm_predictor,
self.text_qa_template,
self.refine_template,
)
self.response_builder.add_node_as_source(selected_node)
# use response builder to get answer from node
node_text, sub_response = self._get_text_from_node(
query_bundle, selected_node, level=level
)
if sub_response is not None:
# these are source nodes from within this node (when it's an index)
for source_node in sub_response.source_nodes:
self.response_builder.add_source_node(source_node)
cur_response = response_builder.get_response_over_chunks(
query_str, [node_text], prev_response=prev_response
)
cur_response = cast(str, cur_response)
logger.debug(f">[Level {level}] Current answer response: {cur_response} ")
else:
cur_response = self._query_level(
{
i: self.index_struct.all_nodes[i]
for i in selected_node.child_indices
},
query_bundle,
level=level + 1,
)
if prev_response is None:
return cur_response
else:
context_msg = selected_node.get_text()
cur_response, formatted_refine_prompt = self._llm_predictor.predict(
self.refine_template,
query_str=query_str,
existing_answer=prev_response,
context_msg=context_msg,
)
logger.debug(f">[Level {level}] Refine prompt: {formatted_refine_prompt}")
logger.debug(f">[Level {level}] Current refined response: {cur_response} ")
return cur_response
def _query_level(
self,
cur_nodes: Dict[int, Node],
query_bundle: QueryBundle,
level: int = 0,
) -> str:
"""Answer a query recursively."""
query_str = query_bundle.query_str
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) == 1:
logger.debug(f">[Level {level}] Only one node left. Querying node.")
return self._query_with_selected_node(
cur_node_list[0], query_bundle, level=level
)
elif self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
numbered_node_text = self._prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template
)
response, formatted_query_prompt = self._llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
numbered_node_text = self._prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template_multiple
)
response, formatted_query_prompt = self._llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return response
result_response = None
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
return response
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
result_response = self._query_with_selected_node(
selected_node,
query_bundle,
prev_response=result_response,
level=level,
)
# result_response should not be None
return cast(str, result_response)
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
# NOTE: this overrides the _query method in the base class
info_str = f"> Starting query: {query_bundle.query_str}"
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
response_str = self._query_level(
self.index_struct.root_nodes,
query_bundle,
level=0,
).strip()
return Response(response_str, source_nodes=self.response_builder.get_sources())
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~langchain_helpers~memory_wrapper.py | """Langchain memory wrapper (for LlamaIndex)."""
from typing import Any, Dict, List, Optional
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import AIMessage
from langchain.schema import BaseMemory as Memory
from langchain.schema import BaseMessage, HumanMessage
from pydantic import Field
from llama_index.indices.base import BaseGPTIndex
from llama_index.readers.schema.base import Document
from llama_index.utils import get_new_id
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
"""Get prompt input key.
Copied over from langchain.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
class GPTIndexMemory(Memory):
"""Langchain memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseGPTIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseGPTIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
# TODO: wrap in prompt
# TODO: add option to return the raw text
# NOTE: currently it's a hack
response = self.index.query(query_str, **self.query_kwargs)
return {self.memory_key: str(response)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
doc_text = "\n".join([human, ai])
doc = Document(text=doc_text)
self.index.insert(doc)
def clear(self) -> None:
"""Clear memory contents."""
pass
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
class GPTIndexChatMemory(BaseChatMemory):
"""Langchain chat memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseGPTIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseGPTIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_source: bool = False
id_to_message: Dict[str, BaseMessage] = Field(default_factory=dict)
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
response_obj = self.index.query(query_str, **self.query_kwargs)
if self.return_source:
source_nodes = response_obj.source_nodes
if self.return_messages:
# get source messages from ids
source_ids = [sn.doc_id for sn in source_nodes]
source_messages = [
m for id, m in self.id_to_message.items() if id in source_ids
]
# NOTE: type List[BaseMessage]
response: Any = source_messages
else:
source_texts = [sn.source_text for sn in source_nodes]
response = "\n\n".join(source_texts)
else:
response = str(response_obj)
return {self.memory_key: response}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
# a bit different than existing langchain implementation
# because we want to track id's for messages
human_message = HumanMessage(content=inputs[prompt_input_key])
human_message_id = get_new_id(set(self.id_to_message.keys()))
ai_message = AIMessage(content=outputs[output_key])
ai_message_id = get_new_id(
set(self.id_to_message.keys()).union({human_message_id})
)
self.chat_memory.messages.append(human_message)
self.chat_memory.messages.append(ai_message)
self.id_to_message[human_message_id] = human_message
self.id_to_message[ai_message_id] = ai_message
human_txt = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai_txt = f"{self.ai_prefix}: " + outputs[output_key]
human_doc = Document(text=human_txt, doc_id=human_message_id)
ai_doc = Document(text=ai_txt, doc_id=ai_message_id)
self.index.insert(human_doc)
self.index.insert(ai_doc)
def clear(self) -> None:
"""Clear memory contents."""
pass
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~readers~weaviate~data_structs.py | """Weaviate-specific serializers for LlamaIndex data structures.
Contain conversion to and from dataclasses that LlamaIndex uses.
"""
import json
from abc import abstractmethod
from typing import Any, Dict, Generic, List, Optional, TypeVar, cast
from llama_index.data_structs.data_structs import IndexStruct, Node
from llama_index.readers.weaviate.utils import (
get_by_id,
parse_get_response,
validate_client,
)
from llama_index.utils import get_new_id
IS = TypeVar("IS", bound=IndexStruct)
class BaseWeaviateIndexStruct(Generic[IS]):
"""Base Weaviate index struct."""
@classmethod
@abstractmethod
def _class_name(cls, class_prefix: str) -> str:
"""Return class name."""
@classmethod
def _get_common_properties(cls) -> List[Dict]:
"""Get common properties."""
return [
{
"dataType": ["string"],
"description": "Text property",
"name": "text",
},
{
"dataType": ["string"],
"description": "Document id",
"name": "doc_id",
},
{
"dataType": ["string"],
"description": "extra_info (in JSON)",
"name": "extra_info",
},
]
@classmethod
@abstractmethod
def _get_properties(cls) -> List[Dict]:
"""Get properties specific to each index struct.
Used in creating schema.
"""
@classmethod
def _get_by_id(cls, client: Any, object_id: str, class_prefix: str) -> Dict:
"""Get entry by id."""
validate_client(client)
class_name = cls._class_name(class_prefix)
properties = cls._get_common_properties() + cls._get_properties()
prop_names = [p["name"] for p in properties]
entry = get_by_id(client, object_id, class_name, prop_names)
return entry
@classmethod
def create_schema(cls, client: Any, class_prefix: str) -> None:
"""Create schema."""
validate_client(client)
# first check if schema exists
schema = client.schema.get()
classes = schema["classes"]
existing_class_names = {c["class"] for c in classes}
# if schema already exists, don't create
class_name = cls._class_name(class_prefix)
if class_name in existing_class_names:
return
# get common properties
properties = cls._get_common_properties()
# get specific properties
properties.extend(cls._get_properties())
class_obj = {
"class": cls._class_name(class_prefix), # <= note the capital "A".
"description": f"Class for {class_name}",
"properties": properties,
}
client.schema.create_class(class_obj)
@classmethod
@abstractmethod
def _entry_to_llama_index(cls, entry: Dict) -> IS:
"""Convert to LlamaIndex list."""
@classmethod
def to_llama_index_list(
cls,
client: Any,
class_prefix: str,
vector: Optional[List[float]] = None,
object_limit: Optional[int] = None,
) -> List[IS]:
"""Convert to LlamaIndex list."""
validate_client(client)
class_name = cls._class_name(class_prefix)
properties = cls._get_common_properties() + cls._get_properties()
prop_names = [p["name"] for p in properties]
query = client.query.get(class_name, prop_names).with_additional(
["id", "vector"]
)
if vector is not None:
query = query.with_near_vector(
{
"vector": vector,
}
)
if object_limit is not None:
query = query.with_limit(object_limit)
query_result = query.do()
parsed_result = parse_get_response(query_result)
entries = parsed_result[class_name]
results: List[IS] = []
for entry in entries:
results.append(cls._entry_to_llama_index(entry))
return results
@classmethod
@abstractmethod
def _from_llama_index(
cls, client: Any, index: IS, class_prefix: str, batch: Optional[Any] = None
) -> str:
"""Convert from LlamaIndex."""
@classmethod
def from_llama_index(cls, client: Any, index: IS, class_prefix: str) -> str:
"""Convert from LlamaIndex."""
validate_client(client)
index_id = cls._from_llama_index(client, index, class_prefix)
client.batch.flush()
return index_id
class WeaviateNode(BaseWeaviateIndexStruct[Node]):
"""Weaviate node."""
@classmethod
def _class_name(cls, class_prefix: str) -> str:
"""Return class name."""
return f"{class_prefix}_Node"
@classmethod
def _get_properties(cls) -> List[Dict]:
"""Create schema."""
return [
{
"dataType": ["int"],
"description": "The index of the Node",
"name": "index",
},
{
"dataType": ["int[]"],
"description": "The child_indices of the Node",
"name": "child_indices",
},
{
"dataType": ["string"],
"description": "The ref_doc_id of the Node",
"name": "ref_doc_id",
},
{
"dataType": ["string"],
"description": "node_info (in JSON)",
"name": "node_info",
},
]
@classmethod
def _entry_to_llama_index(cls, entry: Dict) -> Node:
"""Convert to LlamaIndex list."""
extra_info_str = entry["extra_info"]
if extra_info_str == "":
extra_info = None
else:
extra_info = json.loads(extra_info_str)
node_info_str = entry["node_info"]
if node_info_str == "":
node_info = None
else:
node_info = json.loads(node_info_str)
return Node(
text=entry["text"],
doc_id=entry["doc_id"],
index=int(entry["index"]),
child_indices=entry["child_indices"],
ref_doc_id=entry["ref_doc_id"],
embedding=entry["_additional"]["vector"],
extra_info=extra_info,
node_info=node_info,
)
@classmethod
def _from_llama_index(
cls, client: Any, node: Node, class_prefix: str, batch: Optional[Any] = None
) -> str:
"""Convert from LlamaIndex."""
node_dict = node.to_dict()
vector = node_dict.pop("embedding")
extra_info = node_dict.pop("extra_info")
# json-serialize the extra_info
extra_info_str = ""
if extra_info is not None:
extra_info_str = json.dumps(extra_info)
node_dict["extra_info"] = extra_info_str
# json-serialize the node_info
node_info = node_dict.pop("node_info")
node_info_str = ""
if node_info is not None:
node_info_str = json.dumps(node_info)
node_dict["node_info"] = node_info_str
# TODO: account for existing nodes that are stored
node_id = get_new_id(set())
class_name = cls._class_name(class_prefix)
# if batch object is provided (via a contexxt manager), use that instead
if batch is not None:
batch.add_data_object(node_dict, class_name, node_id, vector)
else:
client.batch.add_data_object(node_dict, class_name, node_id, vector)
return node_id
@classmethod
def delete_document(cls, client: Any, ref_doc_id: str, class_prefix: str) -> None:
"""Delete entry."""
validate_client(client)
# make sure that each entry
class_name = cls._class_name(class_prefix)
where_filter = {
"path": ["ref_doc_id"],
"operator": "Equal",
"valueString": ref_doc_id,
}
query = (
client.query.get(class_name)
.with_additional(["id"])
.with_where(where_filter)
)
query_result = query.do()
parsed_result = parse_get_response(query_result)
entries = parsed_result[class_name]
for entry in entries:
client.data_object.delete(entry["_additional"]["id"], class_name)
@classmethod
def from_llama_index_batch(
cls, client: Any, nodes: List[Node], class_prefix: str
) -> List[str]:
"""Convert from gpt index."""
from weaviate import Client # noqa: F401
client = cast(Client, client)
validate_client(client)
index_ids = []
with client.batch as batch:
for node in nodes:
index_id = cls._from_llama_index(client, node, class_prefix, batch=batch)
index_ids.append(index_id)
return index_ids
| [] |
2024-01-10 | Nileshmalav/Mental-Health-Project | myenv~Lib~site-packages~llama_index~composability~graph.py | """Composability graphs."""
import json
from typing import Any, Dict, List, Optional, Type, Union
from llama_index.data_structs.data_structs import IndexStruct
from llama_index.data_structs.struct_type import IndexStructType
from llama_index.docstore import DocumentStore
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.empty.base import GPTEmptyIndex
from llama_index.indices.keyword_table.base import GPTKeywordTableIndex
from llama_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex
from llama_index.indices.list.base import GPTListIndex
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.indices.query.query_runner import QueryRunner
from llama_index.indices.query.query_transform.base import BaseQueryTransform
from llama_index.indices.query.schema import QueryBundle, QueryConfig
from llama_index.indices.registry import IndexRegistry
from llama_index.indices.struct_store.sql import GPTSQLStructStoreIndex
from llama_index.indices.tree.base import GPTTreeIndex
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index.indices.vector_store.vector_indices import (
GPTChromaIndex,
GPTFaissIndex,
GPTPineconeIndex,
GPTQdrantIndex,
GPTSimpleVectorIndex,
GPTWeaviateIndex,
)
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.response.schema import Response
# TMP: refactor query config type
QUERY_CONFIG_TYPE = Union[Dict, QueryConfig]
# this is a map from type to outer index class
# we extract the type_to_struct and type_to_query
# fields from the index class
DEFAULT_INDEX_REGISTRY_MAP: Dict[IndexStructType, Type[BaseGPTIndex]] = {
IndexStructType.TREE: GPTTreeIndex,
IndexStructType.LIST: GPTListIndex,
IndexStructType.KEYWORD_TABLE: GPTKeywordTableIndex,
IndexStructType.SIMPLE_DICT: GPTSimpleVectorIndex,
IndexStructType.DICT: GPTFaissIndex,
IndexStructType.WEAVIATE: GPTWeaviateIndex,
IndexStructType.PINECONE: GPTPineconeIndex,
IndexStructType.QDRANT: GPTQdrantIndex,
IndexStructType.CHROMA: GPTChromaIndex,
IndexStructType.VECTOR_STORE: GPTVectorStoreIndex,
IndexStructType.SQL: GPTSQLStructStoreIndex,
IndexStructType.KG: GPTKnowledgeGraphIndex,
IndexStructType.EMPTY: GPTEmptyIndex,
}
def _get_default_index_registry() -> IndexRegistry:
"""Get default index registry."""
index_registry = IndexRegistry()
for index_type, index_class in DEFAULT_INDEX_REGISTRY_MAP.items():
index_registry.type_to_struct[index_type] = index_class.index_struct_cls
index_registry.type_to_query[index_type] = index_class.get_query_map()
return index_registry
def _safe_get_index_struct(
docstore: DocumentStore, index_struct_id: str
) -> IndexStruct:
"""Try get index struct."""
index_struct = docstore.get_document(index_struct_id)
if not isinstance(index_struct, IndexStruct):
raise ValueError("Invalid `index_struct_id` - must be an IndexStruct")
return index_struct
class ComposableGraph:
"""Composable graph."""
def __init__(
self,
docstore: DocumentStore,
index_registry: IndexRegistry,
index_struct: IndexStruct,
llm_predictor: Optional[LLMPredictor] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
chunk_size_limit: Optional[int] = None,
) -> None:
"""Init params."""
self._docstore = docstore
self._index_registry = index_registry
# this represents the "root" index struct
self._index_struct = index_struct
self._llm_predictor = llm_predictor or LLMPredictor()
self._prompt_helper = prompt_helper or PromptHelper.from_llm_predictor(
self._llm_predictor, chunk_size_limit=chunk_size_limit
)
self._embed_model = embed_model or OpenAIEmbedding()
@classmethod
def build_from_index(self, index: BaseGPTIndex) -> "ComposableGraph":
"""Build from index."""
return ComposableGraph(
index.docstore,
index.index_registry,
# this represents the "root" index struct
index.index_struct,
llm_predictor=index.llm_predictor,
prompt_helper=index.prompt_helper,
embed_model=index.embed_model,
)
def query(
self,
query_str: Union[str, QueryBundle],
query_configs: Optional[List[QUERY_CONFIG_TYPE]] = None,
llm_predictor: Optional[LLMPredictor] = None,
query_transform: Optional[BaseQueryTransform] = None,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
llm_predictor = llm_predictor or self._llm_predictor
query_runner = QueryRunner(
llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
query_transform=query_transform,
recursive=True,
)
return query_runner.query(query_str, self._index_struct)
async def aquery(
self,
query_str: Union[str, QueryBundle],
query_configs: Optional[List[QUERY_CONFIG_TYPE]] = None,
llm_predictor: Optional[LLMPredictor] = None,
query_transform: Optional[BaseQueryTransform] = None,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
llm_predictor = llm_predictor or self._llm_predictor
query_runner = QueryRunner(
llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
query_transform=query_transform,
recursive=True,
)
return await query_runner.aquery(query_str, self._index_struct)
def get_index(
self, index_struct_id: str, index_cls: Type[BaseGPTIndex], **kwargs: Any
) -> BaseGPTIndex:
"""Get index."""
index_struct = _safe_get_index_struct(self._docstore, index_struct_id)
return index_cls(
index_struct=index_struct,
docstore=self._docstore,
index_registry=self._index_registry,
**kwargs
)
@classmethod
def load_from_string(cls, index_string: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from string (in JSON-format).
This method loads the index from a JSON string. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
result_dict = json.loads(index_string)
# TODO: this is hardcoded for now, allow it to be specified by the user
index_registry = _get_default_index_registry()
docstore = DocumentStore.load_from_dict(
result_dict["docstore"], index_registry.type_to_struct
)
index_struct = _safe_get_index_struct(docstore, result_dict["index_struct_id"])
return cls(docstore, index_registry, index_struct, **kwargs)
@classmethod
def load_from_disk(cls, save_path: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
with open(save_path, "r") as f:
file_contents = f.read()
return cls.load_from_string(file_contents, **kwargs)
def save_to_string(self, **save_kwargs: Any) -> str:
"""Save to string.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
out_dict: Dict[str, Any] = {
"index_struct_id": self._index_struct.get_doc_id(),
"docstore": self._docstore.serialize_to_dict(),
}
return json.dumps(out_dict)
def save_to_disk(self, save_path: str, **save_kwargs: Any) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
index_string = self.save_to_string(**save_kwargs)
with open(save_path, "w") as f:
f.write(index_string)
| [] |
2024-01-10 | RaymondAlvin/rag_demo | processing.py | import re
from io import BytesIO
from typing import Tuple, List
import pickle
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from pypdf import PdfReader
import faiss
def parse_pdf(file: BytesIO, filename: str) -> Tuple[List[str], str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
text = re.sub(r"\n\s*\n", "\n\n", text)
output.append(text)
return output, filename
def text_to_docs(text: List[str], filename: str) -> List[Document]:
if isinstance(text, str):
text = [text]
page_docs = [Document(page_content=page) for page in text]
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=4000,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc.metadata["filename"] = filename # Add filename to metadata
doc_chunks.append(doc)
return doc_chunks
def docs_to_index(docs, openai_api_key):
index = FAISS.from_documents(docs, OpenAIEmbeddings(openai_api_key=openai_api_key))
return index
def get_index_for_pdf(pdf_files, pdf_names, openai_api_key):
documents = []
for pdf_file, pdf_name in zip(pdf_files, pdf_names):
text, filename = parse_pdf(BytesIO(pdf_file), pdf_name)
documents = documents + text_to_docs(text, filename)
index = docs_to_index(documents, openai_api_key)
return index
def get_api_key(file_path):
with open(file_path, 'r') as file:
return file.read().strip() | [] |
2024-01-10 | ssuryansh164/ForensicatorCybertec164-bot | ChatGPT-Telegram-Bot-main~cybertec164.py | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from datetime import datetime
import json, os, string, sys, threading, logging, time, re, random
import openai
#OpenAI API key
aienv = os.getenv('OPENAI_KEY')
if aienv == None:
openai.api_key = "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"
else:
openai.api_key = aienv
print(aienv)
#Telegram bot key
tgenv = os.getenv('TELEGRAM_KEY')
if tgenv == None:
tgkey = "6625545283:AAEVDG6K1q5SphQcsrlH8JHjuuqd8c63hqk"
else:
tgkey = tgenv
print(tgenv)
# Lots of console output
debug = True
# User Session timeout
timstart = 300
tim = 1
#Defaults
user = "cybertec164"
running = False
cache = None
qcache = None
chat_log = None
botname = 'cybertec164'
username='ForensicatorCybertec164'
# Max chat log length (A token is about 4 letters and max tokens is 2048)
max = int(3000)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
completion = openai.Completion()
##################
#Command handlers#
##################
def start(bot, update):
"""Send a message when the command /start is issued."""
global chat_log
global qcache
global cache
global tim
global botname
global username
left = str(tim)
if tim == 1:
chat_log = None
cache = None
qcache = None
botname = 'ForensicatorCybertec164'
username = 'cybertec164'
update.message.reply_text('Hi')
return
else:
update.message.reply_text('I am currently talking to someone else. Can you please wait ' + left + ' seconds?')
return
def help(bot, update):
"""Send a message when the command /help is issued."""
update.message.reply_text('[/reset] resets the conversation,\n [/retry] retries the last output,\n [/username name] sets your name to the bot, default is "Human",\n [/botname name] sets the bots character name, default is "AI"')
def reset(bot, update):
"""Send a message when the command /reset is issued."""
global chat_log
global cache
global qcache
global tim
global botname
global username
left = str(tim)
if user == update.message.from_user.id:
chat_log = None
cache = None
qcache = None
botname = 'ForensicatorCybertec164'
username = 'cybertec164'
update.message.reply_text('Bot has been reset, send a message!')
return
if tim == 1:
chat_log = None
cache = None
qcache = None
botname = 'ForensicatorCybertec164'
username = 'cybertec164'
update.message.reply_text('Bot has been reset, send a message!')
return
else:
update.message.reply_text('I am currently talking to someone else. Can you please wait ' + left + ' seconds?')
return
def retry(bot, update):
"""Send a message when the command /retry is issued."""
global chat_log
global cache
global qcache
global tim
global botname
global username
left = str(tim)
if user == update.message.from_user.id:
new = True
comput = threading.Thread(target=wait, args=(bot, update, botname, username, new,))
comput.start()
return
if tim == 1:
chat_log = None
cache = None
qcache = None
botname = 'ForensicatorCybertec164'
username = 'cybertec164'
update.message.reply_text('Send a message!')
return
else:
update.message.reply_text('I am currently talking to someone else. Can you please wait ' + left + ' seconds?')
return
def runn(bot, update):
"""Send a message when a message is received."""
new = False
global botname
global username
if "/botname " in update.message.text:
try:
string = update.message.text
charout = string.split("/botname ",1)[1]
botname = charout
response = "The bot character name set to: " + botname
update.message.reply_text(response)
except Exception as e:
update.message.reply_text(e)
return
if "/username " in update.message.text:
try:
string = update.message.text
userout = string.split("/username ",1)[1]
username = userout
response = "Your character name set to: " + username
update.message.reply_text(response)
except Exception as e:
update.message.reply_text(e)
return
else:
comput = threading.Thread(target=interact, args=(bot, update, botname, username, new,))
comput.start()
def wait(bot, update, botname, username, new):
global user
global chat_log
global cache
global qcache
global tim
global running
if user == "":
user = update.message.from_user.id
if user == update.message.from_user.id:
tim = timstart
compute = threading.Thread(target=interact, args=(bot, update, botname, username, new,))
compute.start()
if running == False:
while tim > 1:
running = True
time.sleep(1)
tim = tim - 1
if running == True:
chat_log = None
cache = None
qcache = None
user = ""
username = 'ForensicatorCybertec164'
botname = 'cybertec164'
update.message.reply_text('Timer has run down, bot has been reset to defaults.')
running = False
else:
left = str(tim)
update.message.reply_text('I am currently talking to someone else. Can you please wait ' + left + ' seconds?')
################
#Main functions#
################
def limit(text, max):
if (len(text) >= max):
inv = max * 10
print("Reducing length of chat history... This can be a bit buggy.")
nl = text[inv:]
text = re.search(r'(?<=\n)[\s\S]*', nl).group(0)
return text
else:
return text
def ask(username, botname, question, chat_log=None):
if chat_log is None:
chat_log = 'The following is a chat between two users:\n\n'
now = datetime.now()
ampm = now.strftime("%I:%M %p")
t = '[' + ampm + '] '
prompt = f'{chat_log}{t}{username}: {question}\n{t}{botname}:'
response = completion.create(
prompt=prompt, engine="text-curie-001", stop=['\n'], temperature=0.7,
top_p=1, frequency_penalty=0, presence_penalty=0.6, best_of=3,
max_tokens=500)
answer = response.choices[0].text.strip()
return answer
# fp = 15 pp= 1 top_p = 1 temp = 0.9
def append_interaction_to_chat_log(username, botname, question, answer, chat_log=None):
if chat_log is None:
chat_log = 'The following is a chat between two users:\n\n'
chat_log = limit(chat_log, max)
now = datetime.now()
ampm = now.strftime("%I:%M %p")
t = '[' + ampm + '] '
return f'{chat_log}{t}{username}: {question}\n{t}{botname}: {answer}\n'
def interact(bot, update, botname, username, new):
global chat_log
global cache
global qcache
print("==========START==========")
tex = update.message.text
text = str(tex)
analyzer = SentimentIntensityAnalyzer()
if new != True:
vs = analyzer.polarity_scores(text)
if debug == True:
print("Sentiment of input:\n")
print(vs)
if vs['neg'] > 1:
update.message.reply_text('Can we talk something else?')
return
if new == True:
if debug == True:
print("Chat_LOG Cache is...")
print(cache)
print("Question Cache is...")
print(qcache)
chat_log = cache
question = qcache
if new != True:
question = text
qcache = question
cache = chat_log
#update.message.reply_text('Computing...')
try:
answer = ask(username, botname, question, chat_log)
if debug == True:
print("Input:\n" + question)
print("Output:\n" + answer)
print("====================")
stripes = answer.encode(encoding=sys.stdout.encoding,errors='ignore')
decoded = stripes.decode("utf-8")
out = str(decoded)
vs = analyzer.polarity_scores(out)
if debug == True:
print("Sentiment of output:\n")
print(vs)
if vs['neg'] > 1:
update.message.reply_text('I do not think I could provide you a good answer for this. Use /retry to get positive output.')
return
update.message.reply_text(out)
chat_log = append_interaction_to_chat_log(username, botname, question, answer, chat_log)
if debug == True:
#### Print the chat log for debugging
print('-----PRINTING CHAT LOG-----')
print(chat_log)
print('-----END CHAT LOG-----')
except Exception as e:
print(e)
errstr = str(e)
update.message.reply_text(errstr)
def error(bot, update):
"""Log Errors caused by Updates."""
# logger.warning('Update "%s" caused error "%s)
| [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER: PLACEHOLDER\nPLACEHOLDERPLACEHOLDER:"
] |
2024-01-10 | aruna-x/weather-to-wear | app~controllers.py | from flask import Blueprint, render_template, jsonify, request
from dotenv import load_dotenv
from app.services.weather import Weather
from app.services.openai import Openai
main = Blueprint('main', __name__)
load_dotenv()
@main.route('/')
def index():
return render_template('main.html')
@main.route('/rec', methods = ['POST', 'GET'])
def get_rec():
try:
city = request.get_json()['city']
weather = Weather()
resp = weather.get_weather(city)
if resp.status_code == 200:
weather = resp.json()
openai = Openai()
rec = openai.get_chat_rec(weather)
return jsonify(rec.choices[0].message.content)
else:
print(f'Danger Will Robinson: {resp.status_code}')
return 'Oops, there was an error!', resp.status_code
except Exception as e:
print('Oops, there was an error with external api calls!', e)
return 'Oops, there was an error with external api calls!', 500
| [] |
2024-01-10 | kingbased/keychecker | main.py | from time import sleep
from Anthropic import check_anthropic, pretty_print_anthropic_keys
from IO import IO
from OpenAI import get_oai_model, get_oai_key_attribs, get_oai_org, pretty_print_oai_keys
from AI21 import check_ai21, pretty_print_ai21_keys
from MakerSuite import check_makersuite, pretty_print_makersuite_keys
from AWS import check_aws, pretty_print_aws_keys
from Azure import check_azure, pretty_print_azure_keys
from VertexAI import check_vertexai, pretty_print_vertexai_keys
from Mistral import check_mistral, pretty_print_mistral_keys
from APIKey import APIKey, Provider
from concurrent.futures import ThreadPoolExecutor, as_completed
import sys
from datetime import datetime
import re
import argparse
import os.path
api_keys = set()
def parse_args():
parser = argparse.ArgumentParser(description='slop checker')
parser.add_argument('-nooutput', '--nooutput', action='store_true', help='stop writing slop to a file')
parser.add_argument('-proxyoutput', '--proxyoutput', action='store_true', help='proxy format output for easy copying')
parser.add_argument('-file', '--file', action='store', dest='file', help='read slop from a provided filename')
return parser.parse_args()
args = parse_args()
inputted_keys = set()
if args.file:
inputted_keys = IO.read_keys_from_file(args.file)
if inputted_keys is None:
sys.exit(1)
else:
print('Enter API keys (OpenAI/Anthropic/AI21/MakerSuite/AWS/Azure/Mistral) one per line. Press Enter on a blank line to start validation')
print('Expected format for AWS keys is accesskey:secret, for Azure keys it\'s resourcegroup:apikey. For Vertex AI keys the absolute path to the secrets key file is expected in quotes. "/path/to/secrets.json"')
while True:
current_line = input()
if not current_line:
print("Starting validation...")
break
inputted_keys.add(current_line.strip().split()[0].split(",")[0])
def validate_openai(key: APIKey):
if get_oai_model(key) is None:
return
if get_oai_key_attribs(key) is None:
return
if get_oai_org(key) is None:
return
api_keys.add(key)
def validate_anthropic(key: APIKey, retry_count):
key_status = check_anthropic(key)
if key_status is None:
return
elif key_status is False:
i = 0
while check_anthropic(key) is False and i < retry_count:
i += 1
sleep(1)
print(f"Stuck determining pozzed status of rate limited Anthropic key '{key.api_key[-8:]}' - attempt {i} of {retry_count}")
key.rate_limited = True
else:
if i < retry_count:
key.rate_limited = False
api_keys.add(key)
def validate_ai21_and_mistral(key: APIKey):
if check_ai21(key) is None:
key.provider = Provider.MISTRAL
if check_mistral(key) is None:
return
api_keys.add(key)
def validate_makersuite(key: APIKey):
if check_makersuite(key) is None:
return
api_keys.add(key)
def validate_aws(key: APIKey):
if check_aws(key) is None:
return
api_keys.add(key)
def validate_azure(key: APIKey):
if check_azure(key) is None:
return
api_keys.add(key)
def validate_vertexai(key: APIKey):
if check_vertexai(key) is None:
return
api_keys.add(key)
oai_regex = re.compile('(sk-[A-Za-z0-9]{20}T3BlbkFJ[A-Za-z0-9]{20})')
anthropic_regex = re.compile(r'sk-ant-api03-[A-Za-z0-9\-_]{93}AA')
ai21_and_mistral_regex = re.compile('[A-Za-z0-9]{32}')
makersuite_regex = re.compile(r'AIzaSy[A-Za-z0-9\-_]{33}')
aws_regex = re.compile(r'^(AKIA[0-9A-Z]{16}):([A-Za-z0-9+/]{40})$')
azure_regex = re.compile(r'^(.+):([a-z0-9]{32})$')
# vertex_regex = re.compile(r'^(.+):(ya29.[A-Za-z0-9\-_]{469})$') regex for the oauth tokens, useless since they expire hourly
executor = ThreadPoolExecutor(max_workers=100)
def validate_keys():
futures = []
for key in inputted_keys:
if '"' in key[:1]:
key = key.strip('"')
if not os.path.isfile(key):
continue
key_obj = APIKey(Provider.VERTEXAI, key)
futures.append(executor.submit(validate_vertexai, key_obj))
elif "ant-api03" in key:
match = anthropic_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.ANTHROPIC, key)
futures.append(executor.submit(validate_anthropic, key_obj, 20))
elif "AIzaSy" in key[:6]:
match = makersuite_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.MAKERSUITE, key)
futures.append(executor.submit(validate_makersuite, key_obj))
elif "sk-" in key:
match = oai_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.OPENAI, key)
futures.append(executor.submit(validate_openai, key_obj))
elif ":" and "AKIA" in key:
match = aws_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.AWS, key)
futures.append(executor.submit(validate_aws, key_obj))
elif ":" in key and "AKIA" not in key:
match = azure_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.AZURE, key)
futures.append(executor.submit(validate_azure, key_obj))
else:
match = ai21_and_mistral_regex.match(key)
if not match:
continue
key_obj = APIKey(Provider.AI21, key)
futures.append(executor.submit(validate_ai21_and_mistral, key_obj))
for _ in as_completed(futures):
pass
futures.clear()
def get_invalid_keys(valid_oai_keys, valid_anthropic_keys, valid_ai21_keys, valid_makersuite_keys, valid_aws_keys, valid_azure_keys, valid_vertexai_keys, valid_mistral_keys):
valid_oai_keys_set = set([key.api_key for key in valid_oai_keys])
valid_anthropic_keys_set = set([key.api_key for key in valid_anthropic_keys])
valid_ai21_keys_set = set([key.api_key for key in valid_ai21_keys])
valid_makersuite_keys_set = set([key.api_key for key in valid_makersuite_keys])
valid_aws_keys_set = set([key.api_key for key in valid_aws_keys])
valid_azure_keys_set = set([key.api_key for key in valid_azure_keys])
valid_vertexai_keys_set = set([key.api_key for key in valid_vertexai_keys])
valid_mistral_keys_set = set([key.api_key for key in valid_mistral_keys])
invalid_keys = inputted_keys - valid_oai_keys_set - valid_anthropic_keys_set - valid_ai21_keys_set - valid_makersuite_keys_set - valid_aws_keys_set - valid_azure_keys_set - valid_vertexai_keys_set - valid_mistral_keys_set
if len(invalid_keys) < 1:
return
print('\nInvalid Keys:')
for key in invalid_keys:
print(key)
def output_keys():
should_write = not args.nooutput and not args.proxyoutput
validate_keys()
valid_oai_keys = []
valid_anthropic_keys = []
valid_ai21_keys = []
valid_makersuite_keys = []
valid_aws_keys = []
valid_azure_keys = []
valid_vertexai_keys = []
valid_mistral_keys = []
for key in api_keys:
if key.provider == Provider.OPENAI:
valid_oai_keys.append(key)
elif key.provider == Provider.ANTHROPIC:
valid_anthropic_keys.append(key)
elif key.provider == Provider.AI21:
valid_ai21_keys.append(key)
elif key.provider == Provider.MAKERSUITE:
valid_makersuite_keys.append(key)
elif key.provider == Provider.AWS:
valid_aws_keys.append(key)
elif key.provider == Provider.AZURE:
valid_azure_keys.append(key)
elif key.provider == Provider.VERTEXAI:
valid_vertexai_keys.append(key)
elif key.provider == Provider.MISTRAL:
valid_mistral_keys.append(key)
if should_write:
output_filename = "key_snapshots.txt"
sys.stdout = IO(output_filename)
if not args.proxyoutput:
print("#" * 90)
print(f"Key snapshot from {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print("#" * 90)
print(f'\n--- Checked {len(inputted_keys)} keys | {len(inputted_keys) - len(api_keys)} were invalid ---')
get_invalid_keys(valid_oai_keys, valid_anthropic_keys, valid_ai21_keys, valid_makersuite_keys, valid_aws_keys, valid_azure_keys, valid_vertexai_keys, valid_mistral_keys)
print()
if valid_oai_keys:
pretty_print_oai_keys(valid_oai_keys)
if valid_anthropic_keys:
pretty_print_anthropic_keys(valid_anthropic_keys)
if valid_ai21_keys:
pretty_print_ai21_keys(valid_ai21_keys)
if valid_makersuite_keys:
pretty_print_makersuite_keys(valid_makersuite_keys)
if valid_aws_keys:
pretty_print_aws_keys(valid_aws_keys)
if valid_azure_keys:
pretty_print_azure_keys(valid_azure_keys)
if valid_vertexai_keys:
pretty_print_vertexai_keys(valid_vertexai_keys)
if valid_mistral_keys:
pretty_print_mistral_keys(valid_mistral_keys)
else:
# ai21 and vertex keys aren't supported in proxies so no point outputting them, filtered azure keys should be excluded.
print("OPENAI_KEY=" + ','.join(key.api_key for key in valid_oai_keys))
print("ANTHROPIC_KEY=" + ','.join(key.api_key for key in valid_anthropic_keys))
print("AWS_CREDENTIALS=" + ','.join(f"{key.api_key}:{key.region}" for key in valid_aws_keys if not key.useless))
print("GOOGLE_AI_KEY=" + ','.join(key.api_key for key in valid_makersuite_keys))
print("AZURE_CREDENTIALS=" + ','.join(f"{key.api_key.split(':')[0]}:{key.best_deployment}:{key.api_key.split(':')[1]}" for key in valid_azure_keys if key.unfiltered))
print("MISTRAL_AI_KEY=" + ','.join(key.api_key for key in valid_mistral_keys))
if should_write:
sys.stdout.file.close()
if __name__ == "__main__":
output_keys()
| [] |
2024-01-10 | vinay107-vini/talk-with-docs | handler~predict.py | from langchain.llms import OpenAIChat
from dotenv import load_dotenv
from constants import *
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma, FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import DirectoryLoader
from langchain.chains.question_answering import load_qa_chain
import os
import nltk
import time
import openai
MAX_RETRIES = 3
INITIAL_WAIT_TIME = 2 # seconds
ROOT_PATH = os.path.abspath('')
ENV = os.path.join(ROOT_PATH, '.env')
load_dotenv(ENV)
api_key=os.getenv('openai_api_key')
llm = OpenAIChat(model_name='gpt-3.5-turbo', openai_api_key=api_key )
def answer(txt_dir_path, uuid, question):
try:
loader = DirectoryLoader(txt_dir_path, glob='**/*.txt')
docs = loader.load()
char_text_splitter = CharacterTextSplitter(chunk_size= 1000, chunk_overlap=0)
doc_text = char_text_splitter.split_documents(docs)
openai_embeddings = OpenAIEmbeddings(openai_api_key=api_key)
if not os.path.exists(f'{pwd}/file_upload/vectors'):
os.mkdir(f'{pwd}/file_upload/vectors')
if not os.path.exists(f'{pwd}/file_upload/vectors/{uuid}'):
os.mkdir(f'{pwd}/file_upload/vectors/{uuid}')
vStore = Chroma.from_documents(doc_text, openai_embeddings, persist_directory=f'{pwd}/file_upload/vectors/{uuid}')
model = VectorDBQA.from_chain_type(llm=llm, chain_type='stuff', vectorstore=vStore)
answer = model.run(question)
return answer
except Exception as ex:
print({"message":"exception in answer","status":"failed","reason":ex})
return "failed"
def generate_response(prompt):
retry_count = 0
wait_time = INITIAL_WAIT_TIME
while retry_count < MAX_RETRIES:
try:
response = openai.Completion.create(engine="text-davinci-003",prompt=prompt, max_tokens=1024,n=1,stop=None,temperature=0.7)
return response.choices[0].text.strip()
except openai.error.RateLimitError as e:
print(f"Rate limit exceeded. Waiting for {wait_time} seconds...")
time.sleep(wait_time)
retry_count += 1
wait_time *= 2 # exponential backoff
raise Exception("Maximum retries exceeded. Could not generate response.")
def chatpdf(text_path,question=""):
"""
Returns the response from ChatGPT API based on the user input.
Parameters:
text_path (str): text file path.
question (str): user input text
Returns:
answer (list): title and the answer
questions (list) : list of questions
"""
try:
with open(text_path,"r") as txt_file:
raw_text = txt_file.read()
text_splitter = CharacterTextSplitter(separator="\n", chunk_size= 1000, chunk_overlap=0, length_function = len )
texts = text_splitter.split_text(raw_text)
openai_embeddings = OpenAIEmbeddings(openai_api_key=api_key)
docsearch = FAISS.from_texts(texts, openai_embeddings)
chain = load_qa_chain(OpenAI(openai_api_key=api_key), chain_type="stuff")
if not question:
query = 'what were the four questions or prompts that can be asked from existing content for better insights'
docs = docsearch.similarity_search(query)
result = chain.run(input_documents=docs, question=query)
result_list = result.split('\n')
result_list = [x for x in result_list if x]
print(result_list)
value = result_list[0]
final = []
final_dict = {}
newval = value + "I need this question as title"
final_dict.update({ "title": generate_response(newval) } )
docs = docsearch.similarity_search(value)
final_dict.update({"answer" : chain.run(input_documents = docs, question = value)} )
final.append(final_dict)
else:
result_list = []
docs = docsearch.similarity_search(question)
result = chain.run(input_documents=docs, question=question)
final = result
return { "answer": final , "questions": result_list[1:4] }
except Exception as ex:
return {"message":"exception in answer", "status":"failed", "reason":ex} | [] |
2024-01-10 | lasanthasuresh/JobHelper | 4%20-%20extrac-to-html.py | import openai
import shutil
import datetime
from common import load_file_text, write_file
cover_letters = load_file_text('output/0-cover-letters.txt')
score_response = load_file_text('output/3-cover-scores.txt')
cover_letters = cover_letters.split('<---------------------------------->')
score_response = score_response.split('<---------------------------------->')
# create a new file and write the contents to it
with open('output/4-cover-letters-with-scores.html', 'w') as file:
for i in range(len(cover_letters)):
file.write(f'<h2>Cover Letter {i+1}:</h2><div>')
file.write(f'{cover_letters[i]}')
file.write('</div><br><div>')
file.write(f'{score_response[i]}')
file.write('</div><hr>')
file.write('<br>')
print('Cover letters with scores saved to output/4-cover-letters-with-scores.html')
# prompt the user for a job name
job_name = input('Enter a job name (leave blank for timestamp): ').strip()
# use the timestamp as the default answer if the user input is empty
if not job_name:
job_name = datetime.datetime.now().strftime('%y%m%d%H%M%S')
shutil.copy('output/4-cover-letters-with-scores.html', f'output/htmls/{job_name}.html')
print(f'Cover letters with scores saved to output/htmls/{job_name}.html')
# open the HTML file in the default web browser
import webbrowser
import os
webbrowser.open(f'file://{os.path.abspath(f"output/htmls/{job_name}.html")}', new=2)
| [] |
2024-01-10 | lasanthasuresh/JobHelper | 3-doAllAuto.py | import openai
import shutil
import datetime
import os
from common import load_file_text, write_file
openai.api_key = variable = os.environ.get('OPENAI_API_KEY')
messages="""
read my experiance below.
\n
""" + load_file_text("experiance.txt") + "\n" + """
--------------------------------------------------------------\n
following is the job posting I am trying to apply. \n
""" + load_file_text("inputs/1-job.txt") + "\n" + """
--------------------------------------------------------------\n
write me three cover letters for this job posting.
- if you can find the name of the hiring manager, please use it, if not address the team at the company.
- do not mention C# unless it is requred in the job posting.
- do not mention React unless it is requred in the job posting.
- cover letter will be submitted via a web form.
- split each your coverletter for a letter by a '<---------------------------------->'
IMPORTENT: Makesure you do not lie about my skills. Unless I don't have mentioend in my experiance, do not say I have it.
"""
file = 'output/1-prompt-for-cover-letters.txt'
write_file(file,messages)
print( 'asking for cover letters. file printed to ' + file)
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=5000,
temperature=1,
messages = [{'role':'user', 'content':load_file_text('output/1-prompt-for-cover-letters.txt')}]
)
# print(response)
print( 'asking for cover letters. response printed to inputs/3-cover-letters.txt')
write_file('inputs/3-cover-letters.txt', response.choices[0].message.content)
write_file('output/0-cover-letters.txt', response.choices[0].message.content)
cover_letters = response.choices[0].message.content
prompt = """
your name is Jan and you are a hiring manager.
You are hiring for the following position. """ + load_file_text("inputs/1-job.txt") + """
\n------------------------------\n
evaluate following cover letters for the position.
give a score and justification. split each your justification for a letter by a '<---------------------------------->' \n\n """ + load_file_text("inputs/3-cover-letters.txt")
filename = 'output/2-prompt-for-scoring.txt'
write_file(filename, prompt)
print( 'asking for cover letters scoring. file printed to ' + filename)
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=4000,
temperature=1,
messages = [{'role':'user', 'content':load_file_text('output/2-prompt-for-scoring.txt')}]
)
# print(response)
print( 'got answer for cover letters scoring. file printed to output/3-cover-scores.txt')
write_file('output/3-cover-scores.txt', response.choices[0].message.content)
score_response = response.choices[0].message.content
# print(response)
print('got answer for cover letters scoring. file printed to output/3-cover-scores.txt')
write_file('output/3-cover-scores.txt', response.choices[0].message.content)
score_response = response.choices[0].message.content
cover_letters = cover_letters.split('----------------------------------')
score_response = score_response.split('----------------------------------')
# create a new file and write the contents to it
with open('output/4-cover-letters-with-scores.html', 'w') as file:
for i in range(len(cover_letters)):
file.write(f'<h2>Cover Letter {i+1}:</h2>')
file.write(f'<p>Score: {score_response[i]}</p>')
file.write(f'<p>Justification: {cover_letters[i]}</p>')
file.write('<hr>')
print('Cover letters with scores saved to output/4-cover-letters-with-scores.html')
# prompt the user for a job name
job_name = input('Enter a job name (leave blank for timestamp): ').strip()
# use the timestamp as the default answer if the user input is empty
if not job_name:
job_name = datetime.datetime.now().strftime('%y%m%d%H%M%S')
shutil.copy('output/4-cover-letters-with-scores.html', f'output/htmls/{job_name}.html')
print(f'Cover letters with scores saved to output/htmls/{job_name}.html')
# open the HTML file in the default web browser
import webbrowser
import os
webbrowser.open(f'file://{os.path.abspath(f"output/htmls/{job_name}.html")}', new=2)
| [
"output/2-prompt-for-scoring.txt",
"inputs/1-job.txt",
"\nyour name is Jan and you are a hiring manager. \nYou are hiring for the following position. ",
"output/1-prompt-for-cover-letters.txt",
"inputs/3-cover-letters.txt",
"\n\n------------------------------\n\nevaluate following cover letters for the position.\ngive a score and justification. split each your justification for a letter by a '<---------------------------------->' \n\n "
] |
2024-01-10 | orlsonjoseph/ai-assisted-editing | document~views~controller~include~request.py | from django.conf import settings
from .prompts import PROMPT_LIBRARY
import openai
openai.api_key = settings.OPENAI_API_KEY
def generate_request(content, endpoint):
if endpoint not in PROMPT_LIBRARY.keys():
raise Exception("Invalid endpoint")
prompt = PROMPT_LIBRARY[endpoint].format(sentence=content)
return openai.Completion.create(
prompt=prompt,
# Model
model=settings.MODEL,
# Parameters
temperature=settings.TEMPERATURE,
max_tokens=settings.MAX_TOKENS,
top_p=settings.TOP_P,
frequency_penalty=settings.FREQUENCY_PENALTY,
presence_penalty=settings.PRESENCE_PENALTY,
n=settings.SAMPLES,
)
| [] |
2024-01-10 | jalal-82/News-TradingBot | gpt_analyse.py | import requests
import json
import openai
import Keps
openai.api_key = Keps.gpt_api_key
def analyze_news(some_news):
URL = "https://api.openai.com/v1/chat/completions"
payload = {
"model": "gpt-3.5-turbo-1106",
"messages": [{"role": "user", "content": some_news}],
"temperature" : 1.0,
"top_p":1.0,
"n" : 1,
"stream": False,
"presence_penalty":0,
"frequency_penalty":0,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"
}
response = requests.post(URL, headers=headers, json=payload, stream=False)
response_content = response.content.decode('utf-8') # Decode bytes to string
response_json = json.loads(response_content) # Parse string as JSON
# Access the content field
content = response_json["choices"][0]["message"]["content"]
return content
print(analyze_news("Analyze the following news and briefly predict its impact on the stock price: will it increase, decrease, or have no effect? Limit your response to 5-6 words and output to be informat of Stock names: Positive, negative or neutral. Rivian Stock Falls As EV Deliveries Lag As Tesla Beats Expectations")) | [] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~scanner.py | from typing import List
import re
from pydantic import BaseModel
from langchain.document_loaders import UnstructuredFileLoader
from unstructured.cleaners.core import clean_extra_whitespace
from langchain.document_loaders import UnstructuredURLLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from langchain.text_splitter import MarkdownHeaderTextSplitter
def find_files(pattern) -> List[str]:
"""
Find files matching a pattern
:param pattern: Pattern to match e.g. 'data/*.txt'
:return: List of file paths
"""
import glob
result: List[str] = []
for file_path in glob.glob(pattern):
result.append(file_path)
return result
def scan_directory(pattern: str,chunk_size=1024, chunk_overlap=100) -> List[List[Document]]:
"""
Retrieve structured data from a directory
:param pattern: Pattern to match e.g. 'data/*.txt'
:return: List of Document objects
"""
result: List[List[Document]] = []
for file_path in find_files(pattern):
result.append(process_file_data(file_path,chunk_overlap=chunk_overlap,chunk_size=chunk_size))
return result
def scan_urls(urls: List[str], chunk_size=1024, chunk_overlap=100) -> List[List[Document]]:
"""
Retrieve structured data from a list of URLs
:param urls: List of URLs
:return: List of Document objects
"""
result: List[List[Document]] = []
for url in urls:
result.append(process_url_data(url, chunk_size=chunk_size, chunk_overlap=chunk_overlap))
return result
def process_url_data(url: str, chunk_size=1024, chunk_overlap=100) -> List[Document]:
"""
Retrieve structured data from a URL
:param url: URL to retrieve
:return: List of Document objects
"""
mode="single"
loader = UnstructuredURLLoader(urls=[url], mode=mode, continue_on_failure=True)
docs = loader.load()
if mode == "single":
return handle_single_text(url, docs, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
return handle_elements(url, docs)
def process_file_data(file_path: str, chunk_size=1024, chunk_overlap=100) -> List[Document]:
"""
Retrieve structured data from a file
:param file_path: Path to the file
:return: List of Document objects
"""
mode = "single"
loader = UnstructuredFileLoader(
file_path=file_path,
strategy="hi-res", # other option:"fast"
mode=mode, # single (default), elements, paged (for PDFs)
post_processors=[clean_extra_whitespace],
)
docs = loader.load()
if mode == "single":
if file_path.endswith(".md"):
return handle_single_md(docs, file_path=file_path)
return handle_single_text(file_path, docs, chunk_size, chunk_overlap)
return handle_elements(file_path, docs)
def handle_elements(file_path: str, docs: List[Document]) -> List[Document]:
"""
Handle when UnstructuredFileLoader is in mode=elements
"""
result: List[Document] = []
text = []
for doc in docs:
# Check if metadata and category exist, otherwise treat content as part of the answer
category = doc.metadata.get("category") if doc.metadata else None
content = doc.page_content.strip()
if category is None:
result.append(Document(
page_content=content,
metadata=transform_dict_arrays_to_strings(
{**doc.metadata, "file": file_path},
),
))
continue
if category == "Title":
if len(text) > 0:
result.append(Document(
page_content=content + "\n".join(text),
metadata=transform_dict_arrays_to_strings(
{**doc.metadata, "file": file_path},
),
))
text = []
else:
if category == "ListItem":
text.append(f"• {content}")
else:
text.append(content)
# The rest
if len(text) > 0:
result.append(Document(
page_content="\n".join(text),
metadata={**doc.metadata, "file": file_path},
))
return result
def handle_single_md(docs: List[Document], file_path: str) -> List[Document]:
splitter = MarkdownHeaderTextSplitter(headers_to_split_on=[
("#", "Header 1"), # Level -> metadata key
("##", "Header 2"),
("###", "Header 3"),
])
splitted_docs: List[Document] = []
for doc in docs:
for split in splitter.split_text(doc.page_content):
splitted_docs.append(Document(
page_content=split.page_content.strip(),
metadata=transform_dict_arrays_to_strings({**split.metadata, "file": file_path}),
))
return splitted_docs
def handle_single_text(
file_path: str,
docs: List[Document],
chunk_size: int,
chunk_overlap: int) -> List[Document]:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(docs)
result: List[Document] = []
for doc in docs:
result.append(Document(
page_content=doc.page_content.strip(),
metadata=transform_dict_arrays_to_strings({**doc.metadata, "file": file_path}),
))
return result
def transform_dict_arrays_to_strings(input_dict):
"""
Transforms any array in the _input_dict_ to a comma-separated string
"""
for key, value in input_dict.items():
# Check if the value is a list
if isinstance(value, list):
# Join the list elements into a comma-separated string
input_dict[key] = ', '.join(map(str, value))
return input_dict
def is_binary_file(file_name):
# Common binary file extensions
binary_extensions = {
'.png', '.jpg', '.jpeg', '.gif', '.bmp', '.tiff',
'.pdf', '.zip', '.rar',
'.7z', '.mp3', '.wav', '.wma', '.mp4', '.mov',
'.avi', '.flv', '.mkv'
}
# Get the file extension
extension = file_name.lower().rsplit('.', 1)[-1]
extension = '.' + extension
# Check if the extension is in the list of binary extensions
return extension in binary_extensions
| [] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~chains~history.py | from .base import BaseChain
from .utils import docs_as_messages
from embeddingsdb import EmbeddingsDb
from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.schema.runnable import RunnableLambda
from langchain.schema.messages import HumanMessage, SystemMessage
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.memory import ConversationBufferMemory
class HistoryChain(BaseChain):
"""
This is a chain, that loads a context from the database and
do keep history.
Based on: https://python.langchain.com/docs/expression_language/cookbook/memory
"""
memory: ConversationBufferMemory
input: any
def __init__(self,
model: ChatOpenAI,
embeddings_db: EmbeddingsDb,
debug: bool, **kwargs: any):
super().__init__("history", model, embeddings_db, debug, **kwargs)
def create(self,
model: ChatOpenAI = None,
embeddings_db: EmbeddingsDb = None,
debug: bool = None
) -> 'HistoryChain':
"""
Create the chain
:param model: The model. If omitted, the default model is used.
:param embeddings_db: The embeddings database. If omitted, the default embeddings database is used.
:param debug: The debug flag. If omitted, the default debug flag is used.
:return: The runnable chain
"""
model = model or self.model
embeddings_db = embeddings_db or self.embeddings_db
debug = debug or self.debug
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content="""You are a very knowledgeable assistant,
and are willingly to assist a human with correct answers."""
),
MessagesPlaceholder(
variable_name="chat_history"
),
MessagesPlaceholder(
variable_name="context"
),
HumanMessage(content="""Answer the questions below based only on the above context \
(without mention the context in the response)."""),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
self.memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True)
self.current_chain = {
"question": lambda x: x["question"],
"chat_history": RunnableLambda(self.memory.load_memory_variables) | itemgetter("chat_history"),
"context": itemgetter("question") | embeddings_db.as_retriever() | docs_as_messages,
} | prompt | model | StrOutputParser()
return self
def before(self, chain_message: any) -> any:
"""
Stores, temporarily, the chain_message as input.
"""
self.input = chain_message
return chain_message
def after(self, chain_message: any):
"""
Stores the chain_message in memory along with the input.
"""
if self.memory is not None:
self.memory.save_context(self.input, {"output": chain_message})
| [
"chat_history",
"You are a very knowledgeable assistant, \n and are willingly to assist a human with correct answers.",
"Answer the questions below based only on the above context (without mention the context in the response).",
"context",
"{question}"
] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~chains~history_tools.py | from .base import BaseChain
from tools.smhi import ForecastTool
from embeddingsdb import EmbeddingsDb
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import SystemMessage
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
class HistoryWithToolsChain(BaseChain):
"""
This is a chain, that loads a context from the database, have tools, and
keep history.
"""
def __init__(self,
model: ChatOpenAI,
embeddings_db: EmbeddingsDb,
debug: bool, **kwargs: any):
super().__init__("history-with-tools", model, embeddings_db, debug, **kwargs)
def create(self,
model: ChatOpenAI = None,
embeddings_db: EmbeddingsDb = None,
debug: bool = None
) -> 'HistoryWithToolsChain':
"""
Create the chain
:param model: The model. If omitted, the default model is used.
:param embeddings_db: The embeddings database. If omitted, the default embeddings database is used.
:param debug: The debug flag. If omitted, the default debug flag is used.
:return: The runnable chain
"""
model = model or self.model
embeddings_db = embeddings_db or self.embeddings_db
debug = debug or self.debug
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content="You are very powerful assistant, but bad at calculating lengths of words."),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: x["chat_history"],
}
| prompt
| model
| OpenAIFunctionsAgentOutputParser()
)
self.current_chain = initialize_agent(
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
llm=model,
# return_direct=True?? -> https://github.com/Chainlit/chainlit/issues/377
tools=[ForecastTool()], verbose=debug, return_direct=True
)
return self
def before(self, chain_message: any) -> any:
"""
Replaces the key question with input, if question is not present.
"""
if "input" not in chain_message:
chain_message["input"] = chain_message["question"]
del chain_message["question"]
return chain_message
def get_output(self, chunk: any) -> str:
"""
Get the output from the chunk.
"""
return chunk["output"]
| [
"chat_history",
"You are very powerful assistant, but bad at calculating lengths of words.",
"agent_scratchpad",
"{input}"
] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~embeddingsdb.py | import os
import shutil
import json
from typing import List
import uuid
from langchain.docstore.document import Document
from langchain.vectorstores.chroma import Chroma
from langchain.schema.embeddings import Embeddings
class EmbeddingsDb:
"""
Embeddings database
"""
chroma: Chroma
embeddings_path: str = "./data/embeddings"
embeddings: Embeddings
search_type: str
k: int
def __init__(self,
embeddings: Embeddings,
search_type="similarity",
k=4,
):
"""
Constructor
:param embeddings: The embeddings creator to use.
"""
if not os.path.exists(self.embeddings_path):
os.makedirs(self.embeddings_path)
self.chroma = Chroma(
embedding_function=embeddings,
persist_directory=self.embeddings_path,
)
self.embeddings = embeddings
self.search_type = search_type
self.k = k
def get_embeddings(self) -> Embeddings:
return self.embeddings
def as_retriever(self):
"""
Return the Chroma object as a retriever
:return: Chroma object
"""
return self.chroma.as_retriever(
search_type=self.search_type,
search_kwargs={"k": self.k},
)
def embed(self, text: str) -> List[float]:
"""
Embed a text
:param text: Text to embed
:return: List of floats
"""
return self.embeddings.embed_query(text)
def reset(self):
"""
Reset the vector store by delete all files and recreating the directory
where the embeddings are stored.
:return:
"""
if not os.path.exists(self.embeddings_path):
return
for item in os.listdir(self.embeddings_path):
item_path = os.path.join(self.embeddings_path, item)
if os.path.isfile(item_path):
os.remove(item_path)
elif os.path.isdir(item_path):
shutil.rmtree(item_path)
def query_text(self, text: str) -> List[Document]:
"""
Query the vector store for the given text
:param text: Text to query
:return: List of Document objects
"""
docs = self.chroma.as_retriever().get_relevant_documents(query=text)
seen_ids = set()
result: List[Document] = []
for doc in docs:
if str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.page_content)) not in seen_ids:
result.append(doc)
seen_ids.add(
str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.page_content)))
return result
def store_structured_data(self, docs: List[Document], id: str = None) -> bool:
"""
Store structured data in the vector store
:param docs: List of Document objects
:param id: Optional id, of which it checks if already indexed and skips
if such is the case.
:return: True if the data was stored, False if the data was skipped
"""
if not os.path.exists(self.embeddings_path):
os.makedirs(self.embeddings_path)
id_path = os.path.join(self.embeddings_path, "indexed", id)
if id is not None and os.path.exists(id_path):
return False
self.chroma.from_documents(
documents=docs,
persist_directory=self.embeddings_path,
embedding=self.embeddings,
)
# Mark id as already done
if id is not None:
os.makedirs(os.path.dirname(id_path), exist_ok=True)
with open(id_path, "w") as f:
f.write(id)
return True
| [] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~chains~no_history.py | from .base import BaseChain
from .utils import docs_as_messages
from embeddingsdb import EmbeddingsDb
from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import HumanMessage, SystemMessage, BaseMessage
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema.output_parser import StrOutputParser
class NoHistoryChain(BaseChain):
"""
This is a plain chain, that loads a context from the database and
but do not keep history.
"""
def __init__(self,
model: ChatOpenAI,
embeddings_db: EmbeddingsDb,
debug: bool, **kwargs: any):
super().__init__("no-history", model, embeddings_db, debug, **kwargs)
def create(self,
model: ChatOpenAI = None,
embeddings_db: EmbeddingsDb = None,
debug: bool = None
) -> 'NoHistoryChain':
"""
Create the chain
:param model: The model. If omitted, the default model is used.
:param embeddings_db: The embeddings database. If omitted, the default embeddings database is used.
:param debug: The debug flag. If omitted, the default debug flag is used.
:return: The runnable chain
"""
model = model or self.model
embeddings_db = embeddings_db or self.embeddings_db
debug = debug or self.debug
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content="""You are a very knowledgeable assistant,
and are willingly to assist the human with correct answers."""
),
MessagesPlaceholder(
variable_name="context"
),
HumanMessage(content="""Answer the questions below based only on the above context \
(without mention the context in the response)."""),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
self.current_chain = (
{
"context": itemgetter("question") | embeddings_db.as_retriever() | docs_as_messages,
"question": lambda x: x["question"],
}
| prompt
| model
| StrOutputParser()
)
return self
| [
"{question}",
"Answer the questions below based only on the above context (without mention the context in the response).",
"context",
"You are a very knowledgeable assistant, \n and are willingly to assist the human with correct answers."
] |
2024-01-10 | mariotoffia/llm-experiments | ceos-agent~tools~smhi.py | import requests
import aiohttp
from requests.exceptions import HTTPError
from langchain.tools import StructuredTool
from pydantic import BaseModel, Field
from typing import Optional, Type
class ForecastInput(BaseModel):
latitude: float = Field(..., description="Latitude of the location")
longitude: float = Field(..., description="Longitude of the location")
class ForecastOutput(BaseModel):
temperature: Optional[float] = Field(None, description="Temperature in degrees Celsius")
wind_speed: Optional[float] = Field(None, description="Wind speed in meters per second")
precipitation: Optional[float] = Field(None, description="Precipitation in millimeters")
class ForecastTool(StructuredTool):
name: str = "GetWeatherForecast"
description: str = "Useful when you need to answer a question about weather in a specific location."
args_schema: Type[BaseModel] = ForecastInput
# SMHI API endpoint for weather forecast
smhi_url = "https://opendata-download-metfcst.smhi.se/api/category/pmp3g/version/2/geotype/point/lon/{input.longitude}/lat/{input.latitude}/data.json"
def _run(self, latitude: float, longitude: float) -> ForecastOutput:
print(f"(sync) Retrieving weather forecast for lat: {latitude}, lon: {longitude}")
url = self.smhi_url.format(input=ForecastInput(latitude=latitude, longitude=longitude))
response = requests.get(url)
if response.status_code == 200:
forecast=response.json()
return self.extract_weather_info(forecast=forecast)
else:
raise HTTPError(f'Unexpected status code: {response.status_code}')
async def _arun(self, latitude: float, longitude: float) -> ForecastOutput:
print(f"(async) Retrieving weather forecast for lat: {latitude}, lon: {longitude}")
url = self.smhi_url.format(input=ForecastInput(latitude=latitude, longitude=longitude))
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
forecast = await response.json()
return self.extract_weather_info(forecast=forecast)
else:
raise HTTPError(f'Unexpected status code: {response.status_code}')
def extract_weather_info(self, forecast: dict) -> ForecastOutput:
if 'timeSeries' in forecast and len(forecast['timeSeries']) > 0:
# The first element in the time series is usually the current weather forecast
current_forecast = forecast['timeSeries'][0]
weather_info: ForecastOutput = {
'temperature': None,
'wind_speed': None,
'precipitation': None
}
for parameter in current_forecast['parameters']:
if parameter['name'] == 't': # Temperature
weather_info['temperature'] = parameter['values'][0]
elif parameter['name'] == 'ws': # Wind speed
weather_info['wind_speed'] = parameter['values'][0]
elif parameter['name'] == 'pmean': # Mean precipitation
weather_info['precipitation'] = parameter['values'][0]
return weather_info
else:
raise KeyError("Error: Could not parse the weather forecast.") | [
"Useful when you need to answer a question about weather in a specific location."
] |
2024-01-10 | parker84/GoT-chat-bot | build_pinecone_index.py | import pinecone
from tqdm.auto import tqdm
from uuid import uuid4
from decouple import config
from langchain.embeddings.openai import OpenAIEmbeddings
from constants import EMBEDDING_MODEL
from langchain.document_loaders import TextLoader
from dotenv import find_dotenv, load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
import logging, coloredlogs
load_dotenv(find_dotenv())
# ------------constants
BATCH_SIZE = 100
# --------------setup
logger = logging.getLogger(__name__)
coloredlogs.install(level=config('LOG_LEVEL', default='INFO'))
logger.info('Setup')
embed = OpenAIEmbeddings(model=EMBEDDING_MODEL)
pinecone.init(
api_key=config('PINECONE_API_KEY'), # find api key in console at app.pinecone.io
environment=config('PINECONE_ENV') # find next to api key in console
)
# # delete index if it exists
# pinecone.delete_index(config('PINECONE_INDEX_NAME'))
# create a new index
# pinecone.create_index(
# name=config('PINECONE_INDEX_NAME'),
# metric='dotproduct', # dotproduct bc the embeddings are normalized = 1 (see here: https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use)
# dimension=1536 # 1536 dim of text-embedding-ada-002
# )
index = pinecone.Index(config('PINECONE_INDEX_NAME'))
def create_index(index, folder_path):
logger.info(f'index stats before we start: \b{index.describe_index_stats()}')
txt_files = [file for file in os.listdir(folder_path) if file.endswith(".txt")]
for filename in tqdm(txt_files):
logger.info(f'Loading and Splitting Book: {filename}')
file_path = os.path.join(folder_path, filename)
loader = TextLoader(file_path=file_path, autodetect_encoding=True)
book = loader.load()
logger.debug('Splitting Book into Docs...')
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(book)
n_docs = len(docs)
metadata = {
'book': filename
}
logger.info('Running Batches to Embed and Send into Index:')
for i in tqdm(range((n_docs // BATCH_SIZE) + 1)):
batch_text = [doc.page_content for doc in docs[i*BATCH_SIZE: (i+1)*BATCH_SIZE]]
metadatas = [{
"chunk": j, "text": text, **metadata
} for j, text in enumerate(batch_text)]
ids = [str(uuid4()) for _ in range(len(batch_text))]
logger.debug('Embedding...')
embeds = embed.embed_documents(batch_text)
logger.debug('Inserting into Index...')
index.upsert(vectors=zip(ids, embeds, metadatas))
logger.info(f'Index stats after: \n{index.describe_index_stats()}')
if __name__ == "__main__":
create_index(index, './data/got-books') | [] |
2024-01-10 | UMass-Foundation-Model/genome | engine~step_interpreters.py | import cv2
import os
import torch
import openai
import functools
import numpy as np
import face_detection
import io, tokenize
from torchvision import transforms
from augly.utils.base_paths import EMOJI_DIR
import augly.image as imaugs
from PIL import Image,ImageDraw,ImageFont,ImageFilter
from .nms import nms
from .api import API
def parse_step(step_str,partial=False): # ANSWER1=EVAL(image=IMAGE,expr=f"'top' if {ANSWER0} > 0 else 'bottom'",object='vehicle')
tokens = list(tokenize.generate_tokens(io.StringIO(step_str).readline))
# print(tokens)
output_var = tokens[0].string # ANSWER1
step_name = tokens[2].string # EVAL
parsed_result = dict(
output_var=output_var,
step_name=step_name)
if partial:
return parsed_result
arg_tokens = [token for token in tokens[4:-3] if token.string not in [',','=']] # image IMAGE ...
num_tokens = len(arg_tokens) // 2
args = dict()
for i in range(num_tokens):
args[arg_tokens[2*i].string] = arg_tokens[2*i+1].string # dict: image -> IMAGE
parsed_result['args'] = args
return parsed_result
class EvalInterpreter():
step_name = 'EVAL'
def __init__(self):
print(f'Registering {self.step_name} step')
def execute(self,expr):
if 'xor' in expr:
expr = expr.replace('xor','!=')
step_output = eval(expr)
print("EVAL")
print(step_output)
return step_output
class ResultInterpreter():
step_name = 'RESULT'
def __init__(self):
print(f'Registering {self.step_name} step')
def execute(self,output):
return output
class VQAInterpreter():
step_name = 'VQA'
def __init__(self):
print(f'Registering {self.step_name} step')
def predict(self,img,question):
return API.vqa(img,question)
# return API.vqa_short(img,question)
def execute(self,img,question):
answer = self.predict(img,question)
return answer
class LocInterpreter():
"""
Input:
img: an image object
obj_name: an object string
Output:
selected_boxes: a list of bounding boxes
"""
step_name = 'LOC'
def __init__(self,thresh=0.1,nms_thresh=0.5):
print(f'Registering {self.step_name} step')
self.thresh = thresh
self.nms_thresh = nms_thresh
def predict(self,img,obj_name):
return API.loc(img,obj_name,self.thresh,self.nms_thresh)
# return API.find(img,obj_name,glip_thresh=0.6)
def top_box(self,img):
w,h = img.size
return [0,0,w-1,int(h/2)]
def bottom_box(self,img):
w,h = img.size
return [0,int(h/2),w-1,h-1]
def left_box(self,img):
w,h = img.size
return [0,0,int(w/2),h-1]
def right_box(self,img):
w,h = img.size
return [int(w/2),0,w-1,h-1]
def execute(self,img,obj_name):
if obj_name=='TOP':
bboxes = [self.top_box(img)]
elif obj_name=='BOTTOM':
bboxes = [self.bottom_box(img)]
elif obj_name=='LEFT':
bboxes = [self.left_box(img)]
elif obj_name=='RIGHT':
bboxes = [self.right_box(img)]
else:
bboxes = self.predict(img,obj_name)
return bboxes
class Loc2Interpreter(LocInterpreter):
def execute(self,img,obj_name):
bboxes = self.predict(img,obj_name)
objs = []
for box in bboxes:
objs.append(dict(
box=box,
category=obj_name
))
return objs
class CountInterpreter():
"""
Input:
box: a list of bounding boxes
Output:
number: number of objects
Examples:
ANSWER0=COUNT(box=BOX1)
"""
step_name = 'COUNT'
def __init__(self):
print(f'Registering {self.step_name} step')
def execute(self,boxes):
count = len(boxes)
return count
class CropInterpreter():
"""
crop a patch of the image identified by the bounding box
Input:
image: an image
box: a box
Output:
image: an cropped image
Examples:
IMAGE0=CROP(image=IMAGE,box=BOX0)
"""
step_name = 'CROP'
def __init__(self):
print(f'Registering {self.step_name} step')
def expand_box(self,box,img_size,factor=1.5):
W,H = img_size
x1,y1,x2,y2 = box
dw = int(factor*(x2-x1)/2)
dh = int(factor*(y2-y1)/2)
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
x1 = max(0,cx - dw)
x2 = min(cx + dw,W)
y1 = max(0,cy - dh)
y2 = min(cy + dh,H)
return [x1,y1,x2,y2]
def execute(self,img,boxes):
if len(boxes) > 0:
box = boxes[0]
box = self.expand_box(box, img.size)
out_img = img.crop(box)
else:
box = []
out_img = img
return out_img
class CropRightOfInterpreter(CropInterpreter):
step_name = 'CROP_RIGHTOF'
def right_of(self,box,img_size):
w,h = img_size
x1,y1,x2,y2 = box
cx = int((x1+x2)/2)
return [cx,0,w-1,h-1]
def execute(self,img,boxes):
if len(boxes) > 0:
box = boxes[0]
right_box = self.right_of(box, img.size)
else:
w,h = img.size
box = []
right_box = [int(w/2),0,w-1,h-1]
out_img = img.crop(right_box)
return out_img
class CropLeftOfInterpreter(CropInterpreter):
step_name = 'CROP_LEFTOF'
def left_of(self,box,img_size):
w,h = img_size
x1,y1,x2,y2 = box
cx = int((x1+x2)/2)
return [0,0,cx,h-1]
def execute(self,img,boxes):
if len(boxes) > 0:
box = boxes[0]
left_box = self.left_of(box, img.size)
else:
w,h = img.size
box = []
left_box = [0,0,int(w/2),h-1]
out_img = img.crop(left_box)
return out_img
class CropAboveInterpreter(CropInterpreter):
step_name = 'CROP_ABOVE'
def above(self,box,img_size):
w,h = img_size
x1,y1,x2,y2 = box
cy = int((y1+y2)/2)
return [0,0,w-1,cy]
def execute(self,img,boxes):
if len(boxes) > 0:
box = boxes[0]
above_box = self.above(box, img.size)
else:
w,h = img.size
box = []
above_box = [0,0,int(w/2),h-1]
out_img = img.crop(above_box)
return out_img
class CropBelowInterpreter(CropInterpreter):
step_name = 'CROP_BELOW'
def below(self,box,img_size):
w,h = img_size
x1,y1,x2,y2 = box
cy = int((y1+y2)/2)
return [0,cy,w-1,h-1]
def execute(self,img,boxes):
if len(boxes) > 0:
box = boxes[0]
below_box = self.below(box, img.size)
else:
w,h = img.size
box = []
below_box = [0,0,int(w/2),h-1]
out_img = img.crop(below_box)
return out_img
class CropFrontOfInterpreter(CropInterpreter):
step_name = 'CROP_FRONTOF'
class CropInFrontInterpreter(CropInterpreter):
step_name = 'CROP_INFRONT'
class CropInFrontOfInterpreter(CropInterpreter):
step_name = 'CROP_INFRONTOF'
class CropBehindInterpreter(CropInterpreter):
step_name = 'CROP_BEHIND'
class CropAheadInterpreter(CropInterpreter):
step_name = 'CROP_AHEAD'
class SegmentInterpreter():
step_name = 'SEG'
def execute(self,img):
objs = API.segment(img)
return objs
class SelectInterpreter():
step_name = 'SELECT'
def query_string_match(self,objs,q):
obj_cats = [obj['category'] for obj in objs]
q = q.lower()
for cat in [q,f'{q}-merged',f'{q}-other-merged']:
if cat in obj_cats:
return [obj for obj in objs if obj['category']==cat]
return None
def execute(self,img,objs,query,category):
query = query.split(',')
select_objs = []
if category is not None:
cat_objs = [obj for obj in objs if obj['category'] in category]
if len(cat_objs) > 0:
objs = cat_objs
if category is None:
for q in query:
matches = self.query_string_match(objs, q)
if matches is None:
continue
select_objs += matches
if query is not None and len(select_objs) == 0:
select_objs = API.select(query, objs, img)
return select_objs
class ColorpopInterpreter():
step_name = 'COLORPOP'
def refine_mask(self,img,mask):
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask,_,_ = cv2.grabCut(
img.astype(np.uint8),
mask.astype(np.uint8),
None,
bgdModel,
fgdModel,
5,
cv2.GC_INIT_WITH_MASK)
return mask.astype(float)
def execute(self,img,objs):
gimg = img.copy()
gimg = gimg.convert('L').convert('RGB')
gimg = np.array(gimg).astype(float)
img = np.array(img).astype(float)
for obj in objs:
refined_mask = self.refine_mask(img, obj['mask'])
mask = np.tile(refined_mask[:,:,np.newaxis],(1,1,3))
gimg = mask*img + (1-mask)*gimg
gimg = np.array(gimg).astype(np.uint8)
gimg = Image.fromarray(gimg)
return gimg
class BgBlurInterpreter():
step_name = 'BGBLUR'
def refine_mask(self,img,mask):
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask,_,_ = cv2.grabCut(
img.astype(np.uint8),
mask.astype(np.uint8),
None,
bgdModel,
fgdModel,
5,
cv2.GC_INIT_WITH_MASK)
return mask.astype(float)
def smoothen_mask(self,mask):
mask = Image.fromarray(255*mask.astype(np.uint8)).filter(
ImageFilter.GaussianBlur(radius = 5))
return np.array(mask).astype(float)/255
def execute(self,img,objs):
bgimg = img.copy()
bgimg = bgimg.filter(ImageFilter.GaussianBlur(radius = 2))
bgimg = np.array(bgimg).astype(float)
img = np.array(img).astype(float)
for obj in objs:
refined_mask = self.refine_mask(img, obj['mask'])
mask = np.tile(refined_mask[:,:,np.newaxis],(1,1,3))
mask = self.smoothen_mask(mask)
bgimg = mask*img + (1-mask)*bgimg
bgimg = np.array(bgimg).astype(np.uint8)
bgimg = Image.fromarray(bgimg)
return bgimg
class FaceDetInterpreter():
step_name = 'FACEDET'
def box_image(self,img,boxes):
img1 = img.copy()
draw = ImageDraw.Draw(img1)
for i,box in enumerate(boxes):
draw.rectangle(box,outline='blue',width=5)
return img1
def enlarge_face(self,box,W,H,f=1.5):
x1,y1,x2,y2 = box
w = int((f-1)*(x2-x1)/2)
h = int((f-1)*(y2-y1)/2)
x1 = max(0,x1-w)
y1 = max(0,y1-h)
x2 = min(W,x2+w)
y2 = min(H,y2+h)
return [x1,y1,x2,y2]
def det_face(self,img):
faces = API.face_detection(img)
W,H = img.size
objs = []
for i,box in enumerate(faces):
x1,y1,x2,y2,c = [int(v) for v in box.tolist()]
x1,y1,x2,y2 = self.enlarge_face([x1,y1,x2,y2],W,H)
mask = np.zeros([H,W]).astype(float)
mask[y1:y2,x1:x2] = 1.0
objs.append(dict(
box=[x1,y1,x2,y2],
category='face',
inst_id=i,
mask = mask
))
return objs
def execute(self,image):
objs = self.det_face(image)
return objs
class EmojiInterpreter():
step_name = 'EMOJI'
def add_emoji(self,objs,emoji_name,img):
W,H = img.size
emojipth = os.path.join(EMOJI_DIR,f'smileys/{emoji_name}.png')
for obj in objs:
x1,y1,x2,y2 = obj['box']
cx = (x1+x2)/2
cy = (y1+y2)/2
s = (y2-y1)/1.5
x_pos = (cx-0.5*s)/W
y_pos = (cy-0.5*s)/H
emoji_size = s/H
emoji_aug = imaugs.OverlayEmoji(
emoji_path=emojipth,
emoji_size=emoji_size,
x_pos=x_pos,
y_pos=y_pos)
img = emoji_aug(img)
return img
def execute(self,img,objs,emoji_name):
img = self.add_emoji(objs, emoji_name, img)
return img
class ListInterpreter():
step_name = 'LIST'
prompt_template = """
Create comma separated lists based on the query.
Query: List at most 3 primary colors separated by commas
List:
red, blue, green
Query: List at most 2 north american states separated by commas
List:
California, Washington
Query: List at most {list_max} {text} separated by commas
List:"""
def get_list(self,text,list_max):
response = openai.Completion.create(
model="text-davinci-002",
prompt=self.prompt_template.format(list_max=list_max,text=text),
temperature=0,
max_tokens=256,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
n=1,
)
item_list = response.choices[0]['text'].lstrip('\n').rstrip('\n').split(', ')
return item_list
def execute(self,query,max):
item_list = self.get_list(query,max)
return item_list
class ClassifyInterpreter():
step_name = 'CLASSIFY'
def query_obj(self,query,objs,img):
return API.query_obj(query,objs,img)
def execute(self,img,objs,cats):
import copy
objs = self.query_obj(cats, copy.deepcopy(objs), img)
return objs
class TagInterpreter():
step_name = 'TAG'
def tag_image(self,img,objs):
W,H = img.size
img1 = img.copy()
draw = ImageDraw.Draw(img1)
font = ImageFont.truetype('/usr/share/fonts/dejavu/DejaVuSans-Bold.ttf', 24)
# font = ImageFont.truetype(size=16)
for i,obj in enumerate(objs):
box = obj['box']
draw.rectangle(box,outline='green',width=6)
x1,y1,x2,y2 = box
label = obj['class'] + '({})'.format(obj['class_score'])
if 'class' in obj:
w,h = font.getsize(label)
if x1+w > W or y2+h > H:
draw.rectangle((x1, y2-h, x1 + w, y2), fill='green')
draw.text((x1,y2-h),label,fill='white',font=font)
else:
draw.rectangle((x1, y2, x1 + w, y2 + h), fill='green')
draw.text((x1,y2),label,fill='white',font=font)
return img1
def execute(self,img,objs):
img = self.tag_image(img, objs)
return img
class ReplaceInterpreter():
step_name = 'REPLACE'
def create_mask_img(self,objs):
mask = objs[0]['mask']
mask[mask>0.5] = 255
mask[mask<=0.5] = 0
mask = mask.astype(np.uint8)
return Image.fromarray(mask)
def merge_images(self,old_img,new_img,mask):
print(mask.size,old_img.size,new_img.size)
mask = np.array(mask).astype(np.float)/255
mask = np.tile(mask[:,:,np.newaxis],(1,1,3))
img = mask*np.array(new_img) + (1-mask)*np.array(old_img)
return Image.fromarray(img.astype(np.uint8))
def resize_and_pad(self,img,size=(512,512)):
new_img = Image.new(img.mode,size)
thumbnail = img.copy()
thumbnail.thumbnail(size)
new_img.paste(thumbnail,(0,0))
W,H = thumbnail.size
return new_img, W, H
def predict(self,img,mask,prompt):
mask,_,_ = self.resize_and_pad(mask)
init_img,W,H = self.resize_and_pad(img)
new_img = API.replace(
prompt=prompt,
image=init_img,
mask_image=mask,
# strength=0.98,
guidance_scale=7.5,
num_inference_steps=50 #200
)
return new_img.crop((0,0,W-1,H-1)).resize(img.size)
def execute(self,img,objs,prompt):
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
plt.imsave('filename.png', np.array(objs[0]["mask"]), cmap=cm.gray)
print("Debuging")
# box masking
for idx, obj in enumerate(objs):
mask_ = np.zeros((obj['mask'].shape))
x1, y1, x2, y2 = obj['box']
mask_[y1:y2,x1:x2] = 1
objs[idx]['mask'] = mask_
"""
#import pdb
#pdb.set_trace()
mask = self.create_mask_img(objs)
new_img = self.predict(img, mask, prompt)
return new_img
class DetectInterpreter():
step_name = 'DETECT'
def execute(self, image):
boxes = API.object_detector(image)
# boxes = API.glip(image,'object')
selected_boxes = []
image_size = image.size[0] * image.size[1]
threshold = 0.01
for box in boxes:
if (box[2] - box[0]) * (box[3] - box[1]) > image_size * threshold:
selected_boxes.append(box)
return selected_boxes
class CAPTIONInterpreter():
step_name = 'CAPTION'
def execute(self, image):
return API.blip(image)
| [
"\nCreate comma separated lists based on the query.\n\nQuery: List at most 3 primary colors separated by commas\nList:\nred, blue, green\n\nQuery: List at most 2 north american states separated by commas\nList:\nCalifornia, Washington\n\nQuery: List at most {list_max} {text} separated by commas\nList:"
] |
2024-01-10 | UMass-Foundation-Model/genome | engine~viper~vision_models.py | """
Adding a new functionality is easy. Just implement your new model as a subclass of BaseModel.
The code will make the rest: it will make it available for the processes to call by using
process(name, *args, **kwargs), where *args and **kwargs are the arguments of the models process() method.
"""
import abc
import backoff
import contextlib
import openai
import os
import re
import timeit
import torch
import torchvision
import warnings
from PIL import Image
from collections import Counter
from contextlib import redirect_stdout
from functools import partial
from itertools import chain
from joblib import Memory
from rich.console import Console
from torch import hub
from torch.nn import functional as F
from torchvision import transforms
from typing import List, Union
from .configs import config
from .utils import HiddenPrints
with open('api.key') as f:
openai.api_key = f.read().strip()
cache = Memory('cache/' if config.use_cache else None, verbose=0)
device = "cuda" if torch.cuda.is_available() else "cpu"
console = Console(highlight=False)
HiddenPrints = partial(HiddenPrints, console=console, use_newline=config.multiprocessing)
# --------------------------- Base abstract model --------------------------- #
class BaseModel(abc.ABC):
to_batch = False
seconds_collect_data = 1.5 # Window of seconds to group inputs, if to_batch is True
max_batch_size = 10 # Maximum batch size, if to_batch is True. Maximum allowed by OpenAI
requires_gpu = True
def __init__(self, gpu_number):
self.dev = f'cuda:{gpu_number}' if device == 'cuda' else device
@abc.abstractmethod
def forward(self, *args, **kwargs):
"""
If to_batch is True, every arg and kwarg will be a list of inputs, and the output should be a list of outputs.
The way it is implemented in the background, if inputs with defaults are not specified, they will take the
default value, but still be given as a list to the forward method.
"""
pass
@classmethod
@abc.abstractmethod
def name(cls) -> str:
"""The name of the model has to be given by the subclass"""
pass
@classmethod
def list_processes(cls):
"""
A single model can be run in multiple processes, for example if there are different tasks to be done with it.
If multiple processes are used, override this method to return a list of strings.
Remember the @classmethod decorator.
If we specify a list of processes, the self.forward() method has to have a "process_name" parameter that gets
automatically passed in.
See GPT3Model for an example.
"""
return [cls.name]
# ------------------------------ Specific models ---------------------------- #
class ObjectDetector(BaseModel):
name = 'object_detector'
def __init__(self, gpu_number=0):
super().__init__(gpu_number)
with HiddenPrints('ObjectDetector'):
detection_model = hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True).to(self.dev)
detection_model.eval()
self.detection_model = detection_model
@torch.no_grad()
def forward(self, image: torch.Tensor):
"""get_object_detection_bboxes"""
input_batch = image.to(self.dev).unsqueeze(0) # create a mini-batch as expected by the model
output = self.detection_model(input_batch)
output['pred_boxes'] = output['pred_boxes'].cpu()
output['pred_logits'] = output['pred_logits'].cpu()
return output
class DepthEstimationModel(BaseModel):
name = 'depth'
def __init__(self, gpu_number=0, model_type='DPT_Large'):
super().__init__(gpu_number)
with HiddenPrints('DepthEstimation'):
warnings.simplefilter("ignore")
# Model options: MiDaS_small, DPT_Hybrid, DPT_Large
depth_estimation_model = hub.load('intel-isl/MiDaS', model_type, pretrained=True).to(self.dev)
depth_estimation_model.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
self.transform = midas_transforms.dpt_transform
else:
self.transform = midas_transforms.small_transform
self.depth_estimation_model = depth_estimation_model
@torch.no_grad()
def forward(self, image: torch.Tensor):
"""Estimate depth map"""
image_numpy = image.cpu().permute(1, 2, 0).numpy() * 255
input_batch = self.transform(image_numpy).to(self.dev)
prediction = self.depth_estimation_model(input_batch)
# Resize to original size
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=image_numpy.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
# We compute the inverse because the model returns inverse depth
to_return = 1 / prediction
to_return = to_return.cpu()
return to_return # To save: plt.imsave(path_save, prediction.cpu().numpy())
class CLIPModel(BaseModel):
name = 'clip'
def __init__(self, gpu_number=0, version="ViT-L/14@336px"): # @336px
super().__init__(gpu_number)
import clip
self.clip = clip
with HiddenPrints('CLIP'):
model, preprocess = clip.load(version, device=self.dev)
model.eval()
model.requires_grad_ = False
self.model = model
self.negative_text_features = None
self.transform = self.get_clip_transforms_from_tensor(336 if "336" in version else 224)
# @staticmethod
def _convert_image_to_rgb(self, image):
return image.convert("RGB")
# @staticmethod
def get_clip_transforms_from_tensor(self, n_px=336):
return transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(n_px, interpolation=transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(n_px),
self._convert_image_to_rgb,
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
@torch.no_grad()
def binary_score(self, image: torch.Tensor, prompt, negative_categories=None):
is_video = isinstance(image, torch.Tensor) and image.ndim == 4
if is_video: # video
image = torch.stack([self.transform(image[i]) for i in range(image.shape[0])], dim=0)
else:
image = self.transform(image).unsqueeze(0).to(self.dev)
prompt_prefix = "photo of "
prompt = prompt_prefix + prompt
if negative_categories is None:
if self.negative_text_features is None:
self.negative_text_features = self.clip_negatives(prompt_prefix)
negative_text_features = self.negative_text_features
else:
negative_text_features = self.clip_negatives(prompt_prefix, negative_categories)
text = self.clip.tokenize([prompt]).to(self.dev)
image_features = self.model.encode_image(image.to(self.dev))
image_features = F.normalize(image_features, dim=-1)
pos_text_features = self.model.encode_text(text)
pos_text_features = F.normalize(pos_text_features, dim=-1)
text_features = torch.concat([pos_text_features, negative_text_features], axis=0)
# run competition where we do a binary classification
# between the positive and all the negatives, then take the mean
sim = (100.0 * image_features @ text_features.T).squeeze(dim=0)
if is_video:
query = sim[..., 0].unsqueeze(-1).broadcast_to(sim.shape[0], sim.shape[-1] - 1)
others = sim[..., 1:]
res = F.softmax(torch.stack([query, others], dim=-1), dim=-1)[..., 0].mean(-1)
else:
res = F.softmax(torch.cat((sim[0].broadcast_to(1, sim.shape[0] - 1),
sim[1:].unsqueeze(0)), dim=0), dim=0)[0].mean()
return res
@torch.no_grad()
def clip_negatives(self, prompt_prefix, negative_categories=None):
if negative_categories is None:
with open('engine/viper/useful_lists/random_negatives.txt') as f:
negative_categories = [x.strip() for x in f.read().split()]
# negative_categories = negative_categories[:1000]
# negative_categories = ["a cat", "a lamp"]
negative_categories = [prompt_prefix + x for x in negative_categories]
negative_tokens = self.clip.tokenize(negative_categories).to(self.dev)
negative_text_features = self.model.encode_text(negative_tokens)
negative_text_features = F.normalize(negative_text_features, dim=-1)
return negative_text_features
@torch.no_grad()
def classify(self, image: Union[torch.Tensor, list], categories: list[str], return_index=True):
is_list = isinstance(image, list)
if is_list:
assert len(image) == len(categories)
image = [self.transform(x).unsqueeze(0) for x in image]
image_clip = torch.cat(image, dim=0).to(self.dev)
elif len(image.shape) == 3:
image_clip = self.transform(image).to(self.dev).unsqueeze(0)
else: # Video (process images separately)
image_clip = torch.stack([self.transform(x) for x in image], dim=0).to(self.dev)
# if len(image_clip.shape) == 3:
# image_clip = image_clip.unsqueeze(0)
prompt_prefix = "photo of "
categories = [prompt_prefix + x for x in categories]
categories = self.clip.tokenize(categories).to(self.dev)
text_features = self.model.encode_text(categories)
text_features = F.normalize(text_features, dim=-1)
image_features = self.model.encode_image(image_clip)
image_features = F.normalize(image_features, dim=-1)
if image_clip.shape[0] == 1:
# get category from image
softmax_arg = image_features @ text_features.T # 1 x n
else:
if is_list:
# get highest category-image match with n images and n corresponding categories
softmax_arg = (image_features @ text_features.T).diag().unsqueeze(0) # n x n -> 1 x n
else:
softmax_arg = (image_features @ text_features.T)
similarity = (100.0 * softmax_arg).softmax(dim=-1).squeeze(0)
if not return_index:
return similarity
else:
result = torch.argmax(similarity, dim=-1)
if result.shape == ():
result = result.item()
return result
@torch.no_grad()
def compare(self, images: list[torch.Tensor], prompt, return_scores=False):
images = [self.transform(im).unsqueeze(0).to(self.dev) for im in images]
images = torch.cat(images, dim=0)
prompt_prefix = "photo of "
prompt = prompt_prefix + prompt
text = self.clip.tokenize([prompt]).to(self.dev)
image_features = self.model.encode_image(images.to(self.dev))
image_features = F.normalize(image_features, dim=-1)
text_features = self.model.encode_text(text)
text_features = F.normalize(text_features, dim=-1)
sim = (image_features @ text_features.T).squeeze(dim=-1) # Only one text, so squeeze
if return_scores:
return sim
res = sim.argmax()
return res
def forward(self, image, prompt, task='score', return_index=True, negative_categories=None, return_scores=False):
if task == 'classify':
categories = prompt
clip_sim = self.classify(image, categories, return_index=return_index)
out = clip_sim
elif task == 'score':
clip_score = self.binary_score(image, prompt, negative_categories=negative_categories)
out = clip_score
else: # task == 'compare'
idx = self.compare(image, prompt, return_scores)
out = idx
if not isinstance(out, int):
out = out.cpu()
return out
class MaskRCNNModel(BaseModel):
name = 'maskrcnn'
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.maskrcnn):
super().__init__(gpu_number)
with HiddenPrints('MaskRCNN'):
obj_detect = torchvision.models.detection.maskrcnn_resnet50_fpn_v2(weights='COCO_V1').to(self.dev)
obj_detect.eval()
obj_detect.requires_grad_(False)
self.categories = torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1.meta['categories']
self.obj_detect = obj_detect
self.threshold = threshold
def prepare_image(self, image):
image = image.to(self.dev)
return image
@torch.no_grad()
def detect(self, images: torch.Tensor, return_labels=True):
if type(images) != list:
images = [images]
images = [self.prepare_image(im) for im in images]
detections = self.obj_detect(images)
for i in range(len(images)):
height = detections[i]['masks'].shape[-2]
# Just return boxes (no labels no masks, no scores) with scores > threshold
if return_labels: # In the current implementation, we only return labels
d_i = detections[i]['labels'][detections[i]['scores'] > self.threshold]
detections[i] = set([self.categories[d] for d in d_i])
else:
d_i = detections[i]['boxes'][detections[i]['scores'] > self.threshold]
# Return [left, lower, right, upper] instead of [left, upper, right, lower]
detections[i] = torch.stack([d_i[:, 0], height - d_i[:, 3], d_i[:, 2], height - d_i[:, 1]], dim=1)
return detections
def forward(self, image, return_labels=False):
obj_detections = self.detect(image, return_labels)
# Move to CPU before sharing. Alternatively we can try cloning tensors in CUDA, but may not work
obj_detections = [(v.to('cpu') if isinstance(v, torch.Tensor) else list(v)) for v in obj_detections]
return obj_detections
class OwlViTModel(BaseModel):
name = 'owlvit'
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.owlvit):
super().__init__(gpu_number)
from transformers import OwlViTProcessor, OwlViTForObjectDetection
with HiddenPrints("OwlViT"):
processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")
model.eval()
model.requires_grad_(False)
self.model = model.to(self.dev)
self.processor = processor
self.threshold = threshold
@torch.no_grad()
def forward(self, image: torch.Tensor, text: List[str], return_labels: bool = False):
if isinstance(image, list):
raise TypeError("image has to be a torch tensor, not a list")
if isinstance(text, str):
text = [text]
text_original = text
text = ['a photo of a ' + t for t in text]
inputs = self.processor(text=text, images=image, return_tensors="pt") # padding="longest",
inputs = {k: v.to(self.dev) for k, v in inputs.items()}
outputs = self.model(**inputs)
# Target image sizes (height, width) to rescale box predictions [batch_size, 2]
target_sizes = torch.tensor([image.shape[1:]]).to(self.dev)
# Convert outputs (bounding boxes and class logits) to COCO API
results = self.processor.post_process(outputs=outputs, target_sizes=target_sizes)
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
indices_good = scores > self.threshold
boxes = boxes[indices_good]
# Change to format where large "upper"/"lower" means more up
left, upper, right, lower = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
height = image.shape[-2]
boxes = torch.stack([left, height - lower, right, height - upper], -1)
if return_labels:
labels = labels[indices_good]
labels = [text_original[lab].re('a photo of a ') for lab in labels]
return boxes, labels
return boxes.cpu() # [x_min, y_min, x_max, y_max]
class GLIPModel(BaseModel):
name = 'glip'
def __init__(self, model_size='large', gpu_number=0, *args):
BaseModel.__init__(self, gpu_number)
with contextlib.redirect_stderr(open(os.devnull, "w")): # Do not print nltk_data messages when importing
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo, to_image_list, create_positive_map, \
create_positive_map_label_to_token_from_positive_map
working_dir = f'{config.path_pretrained_models}/GLIP/'
if model_size == 'tiny':
config_file = working_dir + "configs/glip_Swin_T_O365_GoldG.yaml"
weight_file = working_dir + "checkpoints/glip_tiny_model_o365_goldg_cc_sbu.pth"
else: # large
config_file = working_dir + "configs/glip_Swin_L.yaml"
weight_file = working_dir + "checkpoints/glip_large_model.pth"
class OurGLIPDemo(GLIPDemo):
def __init__(self, dev, *args_demo):
kwargs = {
'min_image_size': 800,
'confidence_threshold': config.detect_thresholds.glip,
'show_mask_heatmaps': False
}
self.dev = dev
from maskrcnn_benchmark.config import cfg
# manual override some options
cfg.local_rank = 0
cfg.num_gpus = 1
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.WEIGHT", weight_file])
cfg.merge_from_list(["MODEL.DEVICE", self.dev])
with HiddenPrints("GLIP"), torch.cuda.device(self.dev):
from transformers.utils import logging
logging.set_verbosity_error()
GLIPDemo.__init__(self, cfg, *args_demo, **kwargs)
if self.cfg.MODEL.RPN_ARCHITECTURE == "VLDYHEAD":
plus = 1
else:
plus = 0
self.plus = plus
self.color = 255
@torch.no_grad()
def compute_prediction(self, original_image, original_caption, custom_entity=None):
image = self.transforms(original_image)
# image = [image, image.permute(0, 2, 1)]
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.dev)
# caption
if isinstance(original_caption, list):
if len(original_caption) > 40:
all_predictions = None
for loop_num, i in enumerate(range(0, len(original_caption), 40)):
list_step = original_caption[i:i + 40]
prediction_step = self.compute_prediction(original_image, list_step, custom_entity=None)
if all_predictions is None:
all_predictions = prediction_step
else:
# Aggregate predictions
all_predictions.bbox = torch.cat((all_predictions.bbox, prediction_step.bbox), dim=0)
for k in all_predictions.extra_fields:
all_predictions.extra_fields[k] = \
torch.cat((all_predictions.extra_fields[k],
prediction_step.extra_fields[k] + loop_num), dim=0)
return all_predictions
# we directly provided a list of category names
caption_string = ""
tokens_positive = []
seperation_tokens = " . "
for word in original_caption:
tokens_positive.append([len(caption_string), len(caption_string) + len(word)])
caption_string += word
caption_string += seperation_tokens
tokenized = self.tokenizer([caption_string], return_tensors="pt")
# tokens_positive = [tokens_positive] # This was wrong
tokens_positive = [[v] for v in tokens_positive]
original_caption = caption_string
# print(tokens_positive)
else:
tokenized = self.tokenizer([original_caption], return_tensors="pt")
if custom_entity is None:
tokens_positive = self.run_ner(original_caption)
# print(tokens_positive)
# process positive map
positive_map = create_positive_map(tokenized, tokens_positive)
positive_map_label_to_token = create_positive_map_label_to_token_from_positive_map(positive_map,
plus=self.plus)
self.positive_map_label_to_token = positive_map_label_to_token
tic = timeit.time.perf_counter()
# compute predictions
with HiddenPrints(): # Hide some deprecated notices
predictions = self.model(image_list, captions=[original_caption],
positive_map=positive_map_label_to_token)
predictions = [o.to(self.cpu_device) for o in predictions]
# print("inference time per image: {}".format(timeit.time.perf_counter() - tic))
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[-2:]
# if self.tensor_inputs:
# else:
# height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
@staticmethod
def to_left_right_upper_lower(bboxes):
return [(bbox[1], bbox[3], bbox[0], bbox[2]) for bbox in bboxes]
@staticmethod
def to_xmin_ymin_xmax_ymax(bboxes):
# invert the previous method
return [(bbox[2], bbox[0], bbox[3], bbox[1]) for bbox in bboxes]
@staticmethod
def prepare_image(image):
image = image[[2, 1, 0]] # convert to bgr for opencv-format for glip
return image
@torch.no_grad()
def forward(self, image: torch.Tensor, obj: Union[str, list], return_labels: bool = False,
confidence_threshold=None):
if confidence_threshold is not None:
original_confidence_threshold = self.confidence_threshold
self.confidence_threshold = confidence_threshold
# if isinstance(object, list):
# object = ' . '.join(object) + ' .' # add separation tokens
image = self.prepare_image(image)
# Avoid the resizing creating a huge image in a pathological case
ratio = image.shape[1] / image.shape[2]
ratio = max(ratio, 1 / ratio)
original_min_image_size = self.min_image_size
if ratio > 10:
self.min_image_size = int(original_min_image_size * 10 / ratio)
self.transforms = self.build_transform()
with torch.cuda.device(self.dev):
inference_output = self.inference(image, obj)
bboxes = inference_output.bbox.cpu().numpy().astype(int)
# bboxes = self.to_left_right_upper_lower(bboxes)
if ratio > 10:
self.min_image_size = original_min_image_size
self.transforms = self.build_transform()
bboxes = torch.tensor(bboxes)
# Convert to [left, lower, right, upper] instead of [left, upper, right, lower]
height = image.shape[-2]
# bboxes = torch.stack([bboxes[:, 0], height - bboxes[:, 3], bboxes[:, 2], height - bboxes[:, 1]], dim=1)
if confidence_threshold is not None:
self.confidence_threshold = original_confidence_threshold
if return_labels:
# subtract 1 because it's 1-indexed for some reason
return bboxes, inference_output.get_field("labels").cpu().numpy() - 1
return bboxes
self.glip_demo = OurGLIPDemo(*args, dev=self.dev)
def forward(self, *args, **kwargs):
return self.glip_demo.forward(*args, **kwargs)
class TCLModel(BaseModel):
name = 'tcl'
def __init__(self, gpu_number=0):
from engine.viper.base_models.tcl.tcl_model_pretrain import ALBEF
from engine.viper.base_models.tcl.tcl_vit import interpolate_pos_embed
from engine.viper.base_models.tcl.tcl_tokenization_bert import BertTokenizer
super().__init__(gpu_number)
config = {
'image_res': 384,
'mlm_probability': 0.15,
'embed_dim': 256,
'vision_width': 768,
'bert_config': 'engine/viper/base_models/tcl_config_bert.json',
'temp': 0.07,
'queue_size': 65536,
'momentum': 0.995,
}
text_encoder = 'bert-base-uncased'
checkpoint_path = f'{config.path_pretrained_models}/TCL_4M.pth'
self.tokenizer = BertTokenizer.from_pretrained(text_encoder)
with warnings.catch_warnings(), HiddenPrints("TCL"):
model = ALBEF(config=config, text_encoder=text_encoder, tokenizer=self.tokenizer)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'], model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
model.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
model.load_state_dict(state_dict, strict=False)
self.model = model.to(self.dev)
self.model.eval()
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
self.test_transform = transforms.Compose([
transforms.Resize((config['image_res'], config['image_res']), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
self.negative_text_features = None
def transform(self, image):
image = transforms.ToPILImage()(image)
image = self.test_transform(image)
return image
def prepare_image(self, image):
image = self.transform(image)
image = image.unsqueeze(0)
image = image.to(self.dev)
return image
@torch.no_grad()
def binary_score(self, images: Union[list[torch.Tensor], torch.Tensor], prompt):
single_image = False
if isinstance(images, torch.Tensor):
single_image = True
images = [images]
images = [self.prepare_image(im) for im in images]
images = torch.cat(images, dim=0)
first_words = ['description', 'caption', 'alt text']
second_words = ['photo', 'image', 'picture']
options = [f'{fw}: {sw} of a' for fw in first_words for sw in second_words]
prompts = [f'{option} {prompt}' for option in options]
text_input = self.tokenizer(prompts, padding='max_length', truncation=True, max_length=30, return_tensors="pt") \
.to(self.dev)
text_output = self.model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask,
mode='text')
text_feats = text_output # .last_hidden_state
text_atts = text_input.attention_mask
image_feats = self.model.visual_encoder(images)
img_len = image_feats.shape[0]
text_len = text_feats.shape[0]
image_feats = image_feats.unsqueeze(1).repeat(1, text_len, 1, 1).view(-1, *image_feats.shape[-2:])
text_feats = text_feats.unsqueeze(0).repeat(img_len, 1, 1, 1).view(-1, *text_feats.shape[-2:])
text_atts = text_atts.unsqueeze(0).repeat(img_len, 1, 1).view(-1, *text_atts.shape[-1:])
image_feats_att = torch.ones(image_feats.size()[:-1], dtype=torch.long).to(self.dev)
output = self.model.text_encoder(encoder_embeds=text_feats, attention_mask=text_atts,
encoder_hidden_states=image_feats, encoder_attention_mask=image_feats_att,
return_dict=True, mode='fusion')
scores = self.model.itm_head(output[:, 0, :])[:, 1]
scores = scores.view(img_len, text_len)
score = scores.sigmoid().max(-1)[0]
if single_image:
score = score.item()
return score
@torch.no_grad()
def classify(self, image, texts, return_index=True):
if isinstance(image, list):
assert len(image) == len(texts)
image = [self.transform(x).unsqueeze(0) for x in image]
image_tcl = torch.cat(image, dim=0).to(self.dev)
else:
image_tcl = self.prepare_image(image)
text_input = self.tokenizer(texts, padding='max_length', truncation=True, max_length=30, return_tensors="pt") \
.to(self.dev)
text_output = self.model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask,
mode='text')
text_feats = text_output # .last_hidden_state
text_embeds = F.normalize(self.model.text_proj(text_feats[:, 0, :]))
text_atts = text_input.attention_mask
image_feats = self.model.visual_encoder(image_tcl)
image_embeds = self.model.vision_proj(image_feats[:, 0, :])
image_embeds = F.normalize(image_embeds, dim=-1)
# In the original code, this is only used to select the topk pairs, to not compute ITM head on all pairs.
# But other than that, not used
sims_matrix = image_embeds @ text_embeds.t()
sims_matrix_t = sims_matrix.t()
# Image-Text Matching (ITM): Binary classifier for every image-text pair
# Only one direction, because we do not filter bet t2i, i2t, and do all pairs
image_feats_att = torch.ones(image_feats.size()[:-1], dtype=torch.long).to(self.dev)
output = self.model.text_encoder(encoder_embeds=text_feats, attention_mask=text_atts,
encoder_hidden_states=image_feats, encoder_attention_mask=image_feats_att,
return_dict=True, mode='fusion')
score_matrix = self.model.itm_head(output[:, 0, :])[:, 1]
if not return_index:
return score_matrix
else:
return torch.argmax(score_matrix).item()
def forward(self, image, texts, task='classify', return_index=True):
if task == 'classify':
best_text = self.classify(image, texts, return_index=return_index)
out = best_text
else: # task == 'score': # binary_score
score = self.binary_score(image, texts)
out = score
if isinstance(out, torch.Tensor):
out = out.cpu()
return out
@cache.cache(ignore=['result'])
def gpt3_cache_aux(fn_name, prompts, temperature, n_votes, result):
"""
This is a trick to manually cache results from GPT-3. We want to do it manually because the queries to GPT-3 are
batched, and caching doesn't make sense for batches. With this we can separate individual samples in the batch
"""
return result
class GPT3Model(BaseModel):
name = 'gpt3'
to_batch = False
requires_gpu = False
def __init__(self, gpu_number=0):
super().__init__(gpu_number=gpu_number)
with open(config.gpt3.qa_prompt) as f:
self.qa_prompt = f.read().strip()
self.temperature = config.gpt3.temperature
self.n_votes = config.gpt3.n_votes
self.model = config.gpt3.model
# initial cleaning for reference QA results
@staticmethod
def process_answer(answer):
answer = answer.lstrip() # remove leading spaces (our addition)
answer = answer.replace('.', '').replace(',', '').lower()
to_be_removed = {'a', 'an', 'the', 'to', ''}
answer_list = answer.split(' ')
answer_list = [item for item in answer_list if item not in to_be_removed]
return ' '.join(answer_list)
@staticmethod
def get_union(lists):
return list(set(chain.from_iterable(lists)))
@staticmethod
def most_frequent(answers):
answer_counts = Counter(answers)
return answer_counts.most_common(1)[0][0]
def get_qa(self, prompts, prompt_base: str=None) -> list[str]:
if prompt_base is None:
prompt_base = self.qa_prompt
prompts_total = []
for p in prompts:
question = p
prompts_total.append(prompt_base.format(question))
response = self.get_qa_fn(prompts_total)
if self.n_votes > 1:
response_ = []
for i in range(len(prompts)):
if self.model == 'chatgpt':
resp_i = [r['message']['content']
for r in response['choices'][i * self.n_votes:(i + 1) * self.n_votes]]
else:
resp_i = [r['text'] for r in response['choices'][i * self.n_votes:(i + 1) * self.n_votes]]
response_.append(self.most_frequent(resp_i))
response = response_
else:
if self.model == 'chatgpt':
response = [r['message']['content'] for r in response['choices']]
else:
response = [self.process_answer(r["text"]) for r in response['choices']]
return response
def get_qa_fn(self, prompt):
response = self.query_gpt3(prompt, model=self.model, max_tokens=5, logprobs=1, stream=False,
stop=["\n", "<|endoftext|>"])
return response
def get_general(self, prompts) -> list[str]:
if self.model == "chatgpt":
raise NotImplementedError
response = self.query_gpt3(prompts, model=self.model, max_tokens=256, top_p=1, frequency_penalty=0,
presence_penalty=0)
response = [r["text"] for r in response['choices']]
return response
def query_gpt3(self, prompt, model="text-davinci-003", max_tokens=16, logprobs=None, stream=False,
stop=None, top_p=1, frequency_penalty=0, presence_penalty=0):
if model == "chatgpt":
messages = [{"role": "user", "content": p} for p in prompt]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=max_tokens,
temperature=self.temperature,
)
else:
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
logprobs=logprobs,
temperature=self.temperature,
stream=stream,
stop=stop,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
n=self.n_votes,
)
return response
def forward(self, prompt, process_name):
if not self.to_batch:
prompt = [prompt]
if process_name == 'gpt3_qa':
# if items in prompt are tuples, then we assume it is a question and context
if isinstance(prompt[0], tuple) or isinstance(prompt[0], list):
prompt = [question.format(context) for question, context in prompt]
to_compute = None
results = []
# Check if in cache
if config.use_cache:
for p in prompt:
# This is not ideal, because if not found, later it will have to re-hash the arguments.
# But I could not find a better way to do it.
result = gpt3_cache_aux(process_name, p, self.temperature, self.n_votes, None)
results.append(result) # If in cache, will be actual result, otherwise None
to_compute = [i for i, r in enumerate(results) if r is None]
prompt = [prompt[i] for i in to_compute]
if len(prompt) > 0:
if process_name == 'gpt3_qa':
response = self.get_qa(prompt)
else: # 'gpt3_general', general prompt, has to be given all of it
response = self.get_general(prompt)
else:
response = [] # All previously cached
if config.use_cache:
for p, r in zip(prompt, response):
# "call" forces the overwrite of the cache
gpt3_cache_aux.call(process_name, p, self.temperature, self.n_votes, r)
for i, idx in enumerate(to_compute):
results[idx] = response[i]
else:
results = response
if not self.to_batch:
results = results[0]
return results
@classmethod
def list_processes(cls):
return ['gpt3_' + n for n in ['qa', 'general']]
# @cache.cache
@backoff.on_exception(backoff.expo, Exception, max_tries=10)
def codex_helper(extended_prompt):
assert 0 <= config.codex.temperature <= 1
assert 1 <= config.codex.best_of <= 20
if config.codex.model in ("gpt-4", "gpt-3.5-turbo"):
if not isinstance(extended_prompt, list):
extended_prompt = [extended_prompt]
responses = [openai.ChatCompletion.create(
model=config.codex.model,
messages=[
# {"role": "system", "content": "You are a helpful assistant."},
{"role": "system", "content": "Only answer with a function starting def execute_command."},
{"role": "user", "content": prompt}
],
temperature=config.codex.temperature,
max_tokens=config.codex.max_tokens,
top_p = 1.,
frequency_penalty=0,
presence_penalty=0,
# best_of=config.codex.best_of,
stop=["\n\n"],
)
for prompt in extended_prompt]
resp = [r['choices'][0]['message']['content'].replace("execute_command(image)", "execute_command(image, my_fig, time_wait_between_lines, syntax)") for r in responses]
# if len(resp) == 1:
# resp = resp[0]
else:
warnings.warn('OpenAI Codex is deprecated. Please use GPT-4 or GPT-3.5-turbo.')
response = openai.Completion.create(
model="code-davinci-002",
temperature=config.codex.temperature,
prompt=extended_prompt,
max_tokens=config.codex.max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=config.codex.best_of,
stop=["\n\n"],
)
if isinstance(extended_prompt, list):
resp = [r['text'] for r in response['choices']]
else:
resp = response['choices'][0]['text']
return resp
class CodexModel(BaseModel):
name = 'codex'
requires_gpu = False
max_batch_size = 5
# Not batched, but every call will probably be a batch (coming from the same process)
def __init__(self, gpu_number=0):
super().__init__(gpu_number=0)
with open(config.codex.prompt) as f:
self.base_prompt = f.read().strip()
self.fixed_code = None
if config.use_fixed_code:
with open(config.fixed_code_file) as f:
self.fixed_code = f.read()
def forward(self, prompt, input_type='image', prompt_file=None, base_prompt=None):
if config.use_fixed_code: # Use the same program for every sample, like in socratic models
return [self.fixed_code] * len(prompt) if isinstance(prompt, list) else self.fixed_code
if prompt_file is not None and base_prompt is None: # base_prompt takes priority
with open(prompt_file) as f:
base_prompt = f.read().strip()
elif base_prompt is None:
base_prompt = self.base_prompt
if isinstance(prompt, list):
extended_prompt = [base_prompt.replace("INSERT_QUERY_HERE", p).replace('INSERT_TYPE_HERE', input_type)
for p in prompt]
elif isinstance(prompt, str):
extended_prompt = [base_prompt.replace("INSERT_QUERY_HERE", prompt).
replace('INSERT_TYPE_HERE', input_type)]
else:
raise TypeError("prompt must be a string or a list of strings")
result = self.forward_(extended_prompt)
if not isinstance(prompt, list):
result = result[0]
return result
def forward_(self, extended_prompt):
if len(extended_prompt) > self.max_batch_size:
response = []
for i in range(0, len(extended_prompt), self.max_batch_size):
response += self.forward_(extended_prompt[i:i + self.max_batch_size])
try:
response = codex_helper(extended_prompt)
except openai.error.RateLimitError as e:
print("Retrying Codex, splitting batch")
if len(extended_prompt) == 1:
warnings.warn("This is taking too long, maybe OpenAI is down? (status.openai.com/)")
# Will only be here after the number of retries in the backoff decorator.
# It probably means a single batch takes up the entire rate limit.
sub_batch_1 = extended_prompt[:len(extended_prompt) // 2]
sub_batch_2 = extended_prompt[len(extended_prompt) // 2:]
if len(sub_batch_1) > 0:
response_1 = self.forward_(sub_batch_1)
else:
response_1 = []
if len(sub_batch_2) > 0:
response_2 = self.forward_(sub_batch_2)
else:
response_2 = []
response = response_1 + response_2
except Exception as e:
# Some other error like an internal OpenAI error
print("Retrying Codex")
print(e)
response = self.forward_(extended_prompt)
return response
class BLIPModel(BaseModel):
name = 'blip'
to_batch = True
max_batch_size = 32
seconds_collect_data = 0.2 # The queue has additionally the time it is executing the previous forward pass
def __init__(self, gpu_number=0, half_precision=config.blip_half_precision,
blip_v2_model_type=config.blip_v2_model_type):
super().__init__(gpu_number)
# from lavis.models import load_model_and_preprocess
from transformers import BlipProcessor, BlipForConditionalGeneration, Blip2Processor, \
Blip2ForConditionalGeneration
# https://huggingface.co/models?sort=downloads&search=Salesforce%2Fblip2-
assert blip_v2_model_type in ['blip2-flan-t5-xxl', 'blip2-flan-t5-xl', 'blip2-opt-2.7b', 'blip2-opt-6.7b',
'blip2-opt-2.7b-coco', 'blip2-flan-t5-xl-coco', 'blip2-opt-6.7b-coco']
with warnings.catch_warnings(), HiddenPrints("BLIP"), torch.cuda.device(self.dev):
max_memory = {gpu_number: torch.cuda.mem_get_info(self.dev)[0]}
self.processor = Blip2Processor.from_pretrained(f"Salesforce/{blip_v2_model_type}")
# Device_map must be sequential for manual GPU selection
try:
self.model = Blip2ForConditionalGeneration.from_pretrained(
f"Salesforce/{blip_v2_model_type}", load_in_8bit=half_precision,
torch_dtype=torch.float16 if half_precision else "auto",
device_map="sequential", max_memory=max_memory
)
except Exception as e:
# Clarify error message. The problem is that it tries to load part of the model to disk.
if "had weights offloaded to the disk" in e.args[0]:
extra_text = ' You may want to consider setting half_precision to True.' if half_precision else ''
raise MemoryError(f"Not enough GPU memory in GPU {self.dev} to load the model.{extra_text}")
else:
raise e
self.qa_prompt = "Question: {} Short answer:"
self.qa_test_prompt = "Question: {} Short answer:"
self.caption_prompt = "a photo of"
self.half_precision = half_precision
self.max_words = 50
@torch.no_grad()
def caption(self, image, prompt=None):
inputs = self.processor(images=image, text=prompt, return_tensors="pt").to(self.dev, torch.float16)
generated_ids = self.model.generate(**inputs, length_penalty=1., num_beams=5, max_length=30, min_length=1,
do_sample=False, top_p=0.9, repetition_penalty=1.0,
num_return_sequences=1, temperature=1)
generated_text = [cap.strip() for cap in
self.processor.batch_decode(generated_ids, skip_special_tokens=True)]
return generated_text
def pre_question(self, question):
# from LAVIS blip_processors
question = re.sub(
r"([.!\"()*#:;~])",
"",
question.lower(),
)
question = question.rstrip(" ")
# truncate question
question_words = question.split(" ")
if len(question_words) > self.max_words:
question = " ".join(question_words[: self.max_words])
return question
# @torch.no_grad()
# def qa(self, image, question):
# inputs = self.processor(images=image, text=question, return_tensors="pt").to(self.dev)
# if self.half_precision:
# inputs['pixel_values'] = inputs['pixel_values'].half()
# generated_ids = self.model.generate(**inputs, length_penalty=-1, num_beams=5)
# generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
# return generated_text
@torch.no_grad()
def qa(self, image, question):
inputs = self.processor(images=image, text=question, return_tensors="pt", padding="longest").to(self.dev)
if self.half_precision:
inputs['pixel_values'] = inputs['pixel_values'].half()
generated_ids = self.model.generate(**inputs, length_penalty=-1, num_beams=5, max_length=10, min_length=1,
do_sample=False, top_p=0.9, repetition_penalty=1.0,
num_return_sequences=1, temperature=1)
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_text
@torch.no_grad()
def qa_long(self, image, question):
inputs = self.processor(images=image, text=question, return_tensors="pt", padding="longest").to(self.dev)
if self.half_precision:
inputs['pixel_values'] = inputs['pixel_values'].half()
generated_ids = self.model.generate(**inputs, length_penalty=1, num_beams=5, max_length=256, min_length=1,
do_sample=False, top_p=0.9, repetition_penalty=1,
num_return_sequences=1, temperature=1)
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_text
@torch.no_grad()
def qa_test(self, image, question):
# import pdb; pdb.set_trace()
inputs = self.processor(images=image, text=question, return_tensors="pt").to(self.dev)
if self.half_precision:
inputs['pixel_values'] = inputs['pixel_values'].half()
# generated_ids = self.model.generate(**inputs, length_penalty=-1, num_beams=5)
generated_ids = self.model.generate(**inputs)
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_text
def forward(self, image, question=None, task='caption'):
if not self.to_batch:
image, question, task = [image], [question], [task]
if len(image) > 0 and 'float' in str(image[0].dtype) and image[0].max() <= 1:
# import pdb; pdb.set_trace()
image = [im * 255 for im in image]
# Separate into qa and caption batches.
prompts_qa = [self.qa_prompt.format(self.pre_question(q)) for q, t in zip(question, task) if t == 'qa']
images_qa = [im for i, im in enumerate(image) if task[i] == 'qa']
prompts_qa_long = [self.qa_prompt.format(self.pre_question(q)) for q, t in zip(question, task) if t == 'qa_long']
images_qa_long = [im for i, im in enumerate(image) if task[i] == 'qa_long']
prompts_qa_test = [self.qa_test_prompt.format(self.pre_question(q)) for q, t in zip(question, task) if t == 'qa_test']
images_qa_test = [im for i, im in enumerate(image) if task[i] == 'qa_test']
images_caption = [im for i, im in enumerate(image) if task[i] == 'caption']
with torch.cuda.device(self.dev):
response_qa = self.qa(images_qa, prompts_qa) if len(images_qa) > 0 else []
response_qa_long = self.qa_long(images_qa_long, prompts_qa_long) if len(images_qa_long) > 0 else []
response_qa_test = self.qa_test(images_qa_test, prompts_qa_test) if len(images_qa_test) > 0 else []
response_caption = self.caption(images_caption) if len(images_caption) > 0 else []
response = []
for t in task:
if t == 'qa':
response.append(response_qa.pop(0))
elif t == 'qa_long':
response.append(response_qa_long.pop(0))
elif t == 'qa_test':
response.append(response_qa_test.pop(0))
else:
response.append(response_caption.pop(0))
if not self.to_batch:
response = response[0]
return response
class SaliencyModel(BaseModel):
name = 'saliency'
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/saliency_inspyrenet_plus_ultra'):
from engine.viper.base_models.inspyrenet.saliency_transforms import get_transform
from engine.viper.base_models.inspyrenet.InSPyReNet import InSPyReNet
from engine.viper.base_models.inspyrenet.backbones.SwinTransformer import SwinB
# These parameters are for the Plus Ultra LR model
super().__init__(gpu_number)
depth = 64
pretrained = True
base_size = [384, 384]
kwargs = {'name': 'InSPyReNet_SwinB', 'threshold': 512}
with HiddenPrints("Saliency"):
model = InSPyReNet(SwinB(pretrained=pretrained, path_pretrained_models=config.path_pretrained_models),
[128, 128, 256, 512, 1024], depth, base_size, **kwargs)
model.load_state_dict(torch.load(os.path.join(path_checkpoint, 'latest.pth'),
map_location=torch.device('cpu')), strict=True)
model = model.to(self.dev)
model.eval()
self.model = model
self.transform_pil = transforms.ToPILImage()
self.transform = get_transform({
'static_resize': {'size': [384, 384]},
'dynamic_resize': {'L': 1280},
'tonumpy': None,
'normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]},
'totensor': None
})
@torch.no_grad()
def forward(self, image):
image_t = self.transform({'image': self.transform_pil(image)})
image_t['image_resized'] = image_t['image_resized'].unsqueeze(0).to(self.dev)
image_t['image'] = image_t['image'].unsqueeze(0).to(self.dev)
pred = self.model(image_t)['pred']
pred_resized = F.interpolate(pred, image.shape[1:], mode='bilinear', align_corners=True)[0, 0]
mask_foreground = pred_resized < 0.5
image_masked = image.clone()
image_masked[:, mask_foreground] = 0
return image_masked
class XVLMModel(BaseModel):
name = 'xvlm'
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/xvlm/retrieval_mscoco_checkpoint_9.pth'):
from engine.viper.base_models.xvlm.xvlm import XVLMBase
from transformers import BertTokenizer
super().__init__(gpu_number)
image_res = 384
self.max_words = 30
config_xvlm = {
'image_res': image_res,
'patch_size': 32,
'text_encoder': 'bert-base-uncased',
'block_num': 9,
'max_tokens': 40,
'embed_dim': 256,
}
vision_config = {
'vision_width': 1024,
'image_res': 384,
'window_size': 12,
'embed_dim': 128,
'depths': [2, 2, 18, 2],
'num_heads': [4, 8, 16, 32]
}
with warnings.catch_warnings(), HiddenPrints("XVLM"):
model = XVLMBase(config_xvlm, use_contrastive_loss=True, vision_config=vision_config)
checkpoint = torch.load(path_checkpoint, map_location='cpu')
state_dict = checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint
msg = model.load_state_dict(state_dict, strict=False)
if len(msg.missing_keys) > 0:
print('XVLM Missing keys: ', msg.missing_keys)
model = model.to(self.dev)
model.eval()
self.model = model
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((image_res, image_res), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
with open('engine/viper/useful_lists/random_negatives.txt') as f:
self.negative_categories = [x.strip() for x in f.read().split()]
@staticmethod
def pre_caption(caption, max_words):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
# truncate caption
caption_words = caption.split(' ')
if len(caption_words) > max_words:
caption = ' '.join(caption_words[:max_words])
if not len(caption):
raise ValueError("pre_caption yields invalid text")
return caption
@torch.no_grad()
def score(self, images, texts):
if isinstance(texts, str):
texts = [texts]
if not isinstance(images, list):
images = [images]
images = [self.transform(image) for image in images]
images = torch.stack(images, dim=0).to(self.dev)
texts = [self.pre_caption(text, self.max_words) for text in texts]
text_input = self.tokenizer(texts, padding='longest', return_tensors="pt").to(self.dev)
image_embeds, image_atts = self.model.get_vision_embeds(images)
text_ids, text_atts = text_input.input_ids, text_input.attention_mask
text_embeds = self.model.get_text_embeds(text_ids, text_atts)
image_feat, text_feat = self.model.get_features(image_embeds, text_embeds)
logits = image_feat @ text_feat.t()
return logits
@torch.no_grad()
def binary_score(self, image, text, negative_categories):
# Compare with a pre-defined set of negatives
texts = [text] + negative_categories
sim = 100 * self.score(image, texts)[0]
res = F.softmax(torch.cat((sim[0].broadcast_to(1, sim.shape[0] - 1),
sim[1:].unsqueeze(0)), dim=0), dim=0)[0].mean()
return res
def forward(self, image, text, task='score', negative_categories=None):
if task == 'score':
score = self.score(image, text)
else: # binary
score = self.binary_score(image, text, negative_categories=negative_categories)
return score.cpu()
| [
"INSERT_TYPE_HERE",
"Only answer with a function starting def execute_command.",
"PLACEHOLDER PLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"INSERT_QUERY_HERE",
"[]",
"photo of "
] |
2024-01-10 | ouor/ChatWaifu-API | server.py | from scipy.io.wavfile import write
from text import text_to_sequence
from models import SynthesizerTrn
import utils
import commons
import sys
import re
from pydub import AudioSegment
import torch
from torch import no_grad, LongTensor
import logging
import argparse
import requests
import json
import os
import openai
import socket
from navertts import NaverTTS
import datetime
import glob
class SocketServer:
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start(self):
self.socket.bind((self.host, self.port))
self.socket.listen(5)
self.client, self.addr = self.socket.accept()
def receive(self):
total_data = b""
while True:
data = self.client.recv(1024)
total_data += data
if len(data) < 1024:
break
return total_data.decode()
def send(self, data):
self.client.send(data.encode())
def stop(self):
self.socket.close()
class vits():
def __init__(self, model, config):
logging.getLogger('numba').setLevel(logging.WARNING)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
hps_ms = utils.get_hparams_from_file(config)
n_speakers = hps_ms.data.n_speakers if 'n_speakers' in hps_ms.data.keys() else 0
self.n_symbols = len(hps_ms.symbols) if 'symbols' in hps_ms.keys() else 0
self.net_g_ms = SynthesizerTrn(
self.n_symbols,
hps_ms.data.filter_length // 2 + 1,
hps_ms.train.segment_size // hps_ms.data.hop_length,
n_speakers=n_speakers,
**hps_ms.model).to(self.device)
_ = self.net_g_ms.eval()
self.hps_ms = hps_ms
utils.load_checkpoint(model, self.net_g_ms)
def get_text(self, text, hps, cleaned=False):
if cleaned:
text_norm = text_to_sequence(text, hps.symbols, [])
else:
text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = LongTensor(text_norm)
return text_norm
def get_label_value(self, text, label, default, warning_name='value'):
value = re.search(rf'\[{label}=(.+?)\]', text)
if value:
try:
text = re.sub(rf'\[{label}=(.+?)\]', '', text, 1)
value = float(value.group(1))
except:
print(f'Invalid {warning_name}!')
sys.exit(1)
else:
value = default
return value, text
def get_label(self, text, label):
if f'[{label}]' in text:
return True, text.replace(f'[{label}]', '')
else:
return False, text
def generateSound(self, inputString, id):
if self.n_symbols != 0:
text = inputString
length_scale, text = self.get_label_value(
text, 'LENGTH', 1, 'length scale')
noise_scale, text = self.get_label_value(
text, 'NOISE', 0.667, 'noise scale')
noise_scale_w, text = self.get_label_value(
text, 'NOISEW', 0.8, 'deviation of noise')
cleaned, text = self.get_label(text, 'CLEANED')
stn_tst = self.get_text(text, self.hps_ms, cleaned=cleaned)
speaker_id = id
out_path = "./output.wav"
with no_grad():
x_tst = stn_tst.unsqueeze(0).to(self.device)
x_tst_lengths = LongTensor([stn_tst.size(0)]).to(self.device)
sid = LongTensor([speaker_id]).to(self.device)
audio = self.net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale,
noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0, 0].data.to(self.device).cpu().float().numpy()
write(out_path, self.hps_ms.data.sampling_rate, audio)
print('Successfully saved!')
# torch.cuda.empty_cache()
return out_path
get_dir = lambda x: os.path.split(os.path.realpath(x))[0]
def download_file(url, save_dir):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(os.path.join(save_dir, local_filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_filename
class openai_session():
def __init__(self, api_key):
self.api_key = api_key
openai.api_key = api_key
self.messages = []
self.model = "gpt-3.5-turbo"
self.currunt_log = f"userfile/log/{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.json"
if not os.path.exists("userfile/log"):
os.makedirs("userfile/log")
def save(self):
with open(self.currunt_log, 'w', encoding='utf-8') as f:
data = json.dumps(self.messages, ensure_ascii=False, indent=4)
f.write(data)
def set_role(self, role):
prefix = "이제부터 당신은 다음과 같은 역할을 맡아 대화를 진행합니다: \n"
self.messages.append({"role": "system", "content": prefix + role})
def set_greeting(self, greeting):
self.messages.append({"role": "assistant", "content": greeting})
def send_message(self, message):
try:
self.messages.append({"role": "user", "content": message})
res = openai.ChatCompletion.create(
model=self.model,
messages=self.messages if len(self.messages) <= 30 else [self.messages[0]] + self.messages[-9:],
)
answer = res['choices'][0]['message']['content']
self.messages.append({"role": "assistant", "content": answer})
self.save()
except Exception as e:
answer = "앗.. 뭐라고 하셨었죠? 다시 한번 말씀해 주실 수 있나요?"
print("에러 발생: " + str(e))
return answer
class navertts():
def generateSound(self, inputString, id):
output_path = "./output.mp3"
tts = NaverTTS(inputString, lang='ko')
tts.save(output_path)
print('Successfully saved!')
return output_path
def main():
server = SocketServer("127.0.0.1", 9000)
print("렌파이 클라이언트와 연결 대기중...")
server.start()
print("렌파이 클라이언트와 연결되었습니다.")
tts_service = int(server.receive()) # 0: 로컬 vits, 1: 네이버
if tts_service == 0:
korean_model_path = r"userfile\tts\model.pth"
korean_config_path = r"userfile\tts\config.json"
if not os.path.isfile(korean_model_path):
os.makedirs(get_dir(korean_model_path), exist_ok=True)
print("TTS 모델 체크포인트 파일이 없습니다.해당 파일을 다운로드 받습니다.")
url = 'https://huggingface.co/spaces/skytnt/moe-tts/resolve/main/saved_model/6/model.pth'
download_file(url, get_dir(korean_model_path))
print("TTS 모델 체크포인트 파일 다운로드 완료")
if not os.path.isfile(korean_config_path):
os.makedirs(get_dir(korean_config_path), exist_ok=True)
print("TTS 모델 설정 파일이 없습니다.해당 파일을 다운로드 받습니다.")
url = 'https://huggingface.co/spaces/skytnt/moe-tts/resolve/main/saved_model/6/config.json'
download_file(url, get_dir(korean_config_path))
print("TTS 모델 설정 파일 다운로드 완료")
tts = vits(korean_model_path, korean_config_path)
config = json.load(open(korean_config_path, 'r'))
spk_list = config['speakers']
speaker = int(server.receive())
print("선택된 음성: " + spk_list[speaker])
elif tts_service == 1:
tts = navertts()
speaker = 0
print("렌파이에서 API KEY를 입력해주세요.")
print("API KEY는 https://platform.openai.com/account/api-keys 에서 발급할 수 있습니다.")
session_token = server.receive()
if(session_token):
print(f"API KEY: ...{session_token[-8:]}")
oai = openai_session(session_token)
setting = server.receive()
oai.set_role(setting)
print("배경 설정: "+ setting)
greeting = server.receive()
oai.set_greeting(greeting)
print("인사말: "+ greeting)
while True:
question = server.receive()
print("Question Received: " + question)
answer = oai.send_message(question)
print("ChatGPT:", answer)
tts_audio_path = tts.generateSound(answer, speaker)
# convert wav to ogg
src = tts_audio_path
dst = "./ChatWithGPT/game/audio/test.ogg"
sound = getattr(AudioSegment, f'from_{src.split(".")[-1]}')(src)
sound.export(dst, format="ogg")
# send response to UI
server.send(answer)
# finish playing audio
print(server.receive())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("종료합니다.")
sys.exit(0)
except ConnectionResetError:
print("클라이언트와의 연결이 끊겼습니다.")
sys.exit(0) | [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Curiosity007/llama_index | llama_index~indices~service_context.py | from dataclasses import dataclass
from typing import Optional
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
def _get_default_node_parser(
chunk_size_limit: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
callback_manager = callback_manager or CallbackManager([])
if chunk_size_limit is None:
token_text_splitter = TokenTextSplitter(
callback_manager=callback_manager
) # use default chunk size
else:
token_text_splitter = TokenTextSplitter(
chunk_size=chunk_size_limit, callback_manager=callback_manager
)
return SimpleNodeParser(
text_splitter=token_text_splitter, callback_manager=callback_manager
)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: LLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
- chunk_size_limit: chunk size limit
"""
llm_predictor: LLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
chunk_size_limit: Optional[int] = None
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[LLMPredictor] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
Args:
llm_predictor (Optional[LLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size_limit (Optional[int]): chunk_size_limit
"""
callback_manager = callback_manager or CallbackManager([])
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or PromptHelper.from_llm_predictor(
llm_predictor, chunk_size_limit=chunk_size_limit
)
node_parser = node_parser or _get_default_node_parser(
chunk_size_limit=chunk_size_limit, callback_manager=callback_manager
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
chunk_size_limit=chunk_size_limit,
)
| [] |
2024-01-10 | torstenvolk/rfac | home.py | import praw
from datetime import datetime
import pandas as pd
from openai import OpenAI
import os
import streamlit as st
st.set_page_config(layout="wide")
st.title = 'Reddit Posts and Comments'
# Load the API key from Streamlit secrets
openai_api_key = os.environ.get('openai_api_key')
client_id = os.environ.get('client_id')
client_secret = os.environ.get('client_secret')
username = os.environ.get('username')
password = os.environ.get('password')
user_agent = os.environ.get('user_agent')
#openai.api_key = st.secrets["openai"]["openai_api_key"]
#client_id = st.secrets["reddit"]["client_id"]
#client_secret = st.secrets["reddit"]["client_secret"]
#username = st.secrets["reddit"]["username"]
#password = st.secrets["reddit"]["password"]
#user_agent = st.secrets["reddit"]["user_agent"]
# Initialize the Reddit instance
reddit = praw.Reddit(client_id=client_id,
client_secret=client_secret,
username=username,
password=password,
user_agent=user_agent)
def refresh_data():
subreddits = ["kubernetes", "devops"] # Example subreddits
all_data = []
for subreddit_name in subreddits:
flattened_submissions = get_flattened_submissions(subreddit_name)
all_data.extend(flattened_submissions)
st.session_state['loaded_data'] = pd.DataFrame(all_data)
@st.cache_data
def get_submissions(subreddit_name):
subreddit = reddit.subreddit(subreddit_name)
submissions = subreddit.new(limit=20)
submissions_with_comments = []
for submission in submissions:
comments = [comment.body for comment in submission.comments.list()[:10]] # Fetching top 10 comments
submission_with_comments = {
"Subreddit": subreddit_name,
"Title": submission.title,
"Author": str(submission.author),
"Score": submission.score,
"Number of Comments": submission.num_comments,
"Timestamp": datetime.utcfromtimestamp(submission.created_utc).strftime('%Y-%m-%d %H:%M:%S'),
"Comments": comments
}
submissions_with_comments.append(submission_with_comments)
return submissions_with_comments
#@st.cache_data
def get_flattened_submissions_with_search(subreddits, search_term=None, progress_text=None):
all_data = []
max_posts = 20 # Maximum number of posts per subreddit
max_comments_per_post = 10
if not subreddits:
# If no subreddits are provided, use 'all'
subreddits = ['all']
for subreddit_name in subreddits:
if subreddit_name == 'all':
search_results = reddit.subreddit('all').search(search_term, limit=max_posts)
else:
subreddit = reddit.subreddit(subreddit_name)
search_results = subreddit.search(search_term, limit=max_posts) if search_term else subreddit.new(limit=max_posts)
for submission in search_results:
# Add the main post
flattened_data = {
"Timestamp": datetime.utcfromtimestamp(submission.created_utc).strftime('%Y-%m-%d %H:%M:%S'),
"Subreddit": submission.subreddit.display_name, # Actual subreddit of the post
"Title": submission.title,
"Post Text": submission.selftext, # Include the text of the post
"Comments": "Post",
"Author": str(submission.author),
"Score": submission.score,
"Number of Comments": submission.num_comments # Number of comments for the post
}
all_data.append(flattened_data)
# Add comments until max_comments is reached
total_comments = 0
for comment in submission.comments.list():
if total_comments >= max_comments_per_post:
break
if isinstance(comment, praw.models.MoreComments):
continue
total_comments += 1
flattened_data = {
"Timestamp": datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S'),
"Subreddit": submission.subreddit.display_name, # Actual subreddit of the comment's parent post
"Title": submission.title,
"Post Text": "", # Leave blank for comments
"Comments": comment.body,
"Author": str(comment.author),
"Score": comment.score,
"Number of Comments": "" # Leave blank for comments
}
all_data.append(flattened_data)
if progress_text:
progress_text.text(f"Processed posts in {subreddit_name}...")
if progress_text:
progress_text.empty()
return all_data
# Initialize 'loaded_data' in session state if not present
if 'loaded_data' not in st.session_state:
st.session_state['loaded_data'] = pd.DataFrame()
# Button to submit selections
st.sidebar.title("Subreddit Selection")
selected_subreddits = st.sidebar.multiselect("Choose Subreddits", ["kubernetes", "devops", "python", "datascience", "opentelemetry", "observability", "ebpf"])
search_term = st.sidebar.text_input("Enter a Search Term", "")
# Button to submit selections
# Button to submit selections
if st.sidebar.button("Submit"):
if not selected_subreddits and not search_term:
st.error("Please select at least one subreddit or enter a search term.")
else:
with st.spinner("Fetching and processing data..."):
progress_text = st.empty()
if search_term and not selected_subreddits:
fetched_data = get_flattened_submissions_with_search(["all"], search_term, progress_text)
else:
fetched_data = get_flattened_submissions_with_search(selected_subreddits, search_term, progress_text)
st.session_state['loaded_data'] = pd.DataFrame(fetched_data)
st.success("Data loaded successfully!")
progress_text.empty()
# Display AgGrid with the loaded data
st.header('Subreddit Posts and Comments')
st.dataframe(st.session_state['loaded_data'])
def extract_key_terms(text):
"""
Function to extract key terms from the text using OpenAI.
"""
response = openai_client.Completion.create(
model="gpt-4-1106-preview",
prompt=[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": f"Extract key terms from this text and display them in a bullet list: {text}"
}
],
max_tokens=150
)
return response['choices'][0]['message']['content'].strip().split(', ')
def create_term_frequency_df(summaries):
"""
Function to create a DataFrame with term frequencies for visualization.
"""
all_terms = []
for subreddit, summary in summaries.items():
terms = extract_key_terms(summary)
for term in terms:
all_terms.append({'Term': term, 'Subreddit': subreddit, 'Frequency': 1})
# Create a DataFrame and aggregate the frequencies
df = pd.DataFrame(all_terms)
return df.groupby(['Term', 'Subreddit']).sum().reset_index()
def estimate_token_count(text):
"""
Estimate the number of tokens in a given text.
"""
# Adjusted average token length for English
return len(text) // 4
def truncate_text(text, max_input_tokens):
"""
Truncate the text to fit within the maximum token count for the input.
"""
words = text.split()
truncated_text = ''
for word in words:
if estimate_token_count(truncated_text + word) > max_input_tokens:
break
truncated_text += word + ' '
return truncated_text.strip()
api_call_count = 0
def summarize_text(text, max_input_tokens, max_completion_tokens=4096):
global api_call_count # Use the global counter
if estimate_token_count(text) > max_input_tokens:
text = truncate_text(text, max_input_tokens)
response = openai_client.ChatCompletion.create(
model="gpt-4-1106-preview",
prompt=[
{"role": "system", "content": "You are a helpful assistant. You focus on identifying and summarizing key themes within text."},
{"role": "user", "content": f"Identify and summarize key topic and subtopics in the following information:\n\n{text}. Do not list individual posts but always summarize the bigger picture topics."}
],
max_tokens=max_completion_tokens
)
api_call_count += 1 # Increment the counter after each API call
return response['choices'][0]['message']['content']
def get_aggregated_subreddit_data(df):
"""
Aggregate text data from multiple subreddits for summarization.
"""
aggregated_texts = {}
for subreddit in df['Subreddit'].unique():
subreddit_data = df[df['Subreddit'] == subreddit]
unique_titles_comments = subreddit_data[['Title', 'Comments','Post Text']].drop_duplicates()
aggregated_text = '\n'.join(unique_titles_comments.apply(lambda x: f"Title: {x['Title']}\nComment: {x['Comments']}\nPostText: {x['Post Text']} ']", axis=1))
aggregated_texts[subreddit] = aggregated_text
return aggregated_texts
# Function to write content to a text file
def export_to_txt(filename, content):
with open(filename, 'w', encoding='utf-8') as file:
file.write(content)
if st.button("Summarize Subreddit Data"):
max_input_tokens = 120000 # Allocate tokens for the input text
max_completion_tokens = 4096 # Allocate tokens for the completion (summary)
aggregated_texts = get_aggregated_subreddit_data(st.session_state['loaded_data'])
summaries = {}
key_terms = {}
# Placeholder for API call count
api_call_count_placeholder = st.empty()
# Directory to store text files
export_dir = 'exported_summaries'
if not os.path.exists(export_dir):
os.makedirs(export_dir)
for subreddit, text in aggregated_texts.items():
# Export the text to a file before summarizing
export_filename = f"{export_dir}/{subreddit}_to_summarize.txt"
export_to_txt(export_filename, text)
st.text(f"Content for {subreddit} exported to {export_filename}")
# Summarize the text
summary = summarize_text(text, max_input_tokens, max_completion_tokens)
summaries[subreddit] = summary
key_terms[subreddit] = extract_key_terms(summary)
# Update the API call count dynamically
api_call_count_placeholder.write(f"API calls made so far: {api_call_count}")
# Store summaries and key terms in session state
st.session_state['summaries'] = summaries
st.session_state['key_terms'] = key_terms
# Final API call count
api_call_count_placeholder.write(f"Total API calls made: {api_call_count}")
# Check if summaries and key terms are in session state and display them
if 'summaries' in st.session_state and 'key_terms' in st.session_state:
for subreddit, summary in st.session_state['summaries'].items():
st.subheader(f"Summary for {subreddit}")
st.text_area(f"{subreddit} Summary", summary, height=500)
st.text_area("Key Terms", ", ".join(st.session_state['key_terms'][subreddit]), height=300)
| [
"You are a helpful assistant. You focus on identifying and summarizing key themes within text.",
"Extract key terms from this text and display them in a bullet list: PLACEHOLDER",
"You are a helpful assistant.",
"Identify and summarize key topic and subtopics in the following information:\n\nPLACEHOLDER. Do not list individual posts but always summarize the bigger picture topics."
] |
2024-01-10 | jroakes/Npath | cluster.py | """Cluster module."""
import openai
import pandas as pd
from bertopic.representation import OpenAI
from InstructorEmbedding import INSTRUCTOR
from bertopic import BERTopic
from bertopic.backend import BaseEmbedder
class CustomEmbedder(BaseEmbedder):
"""Custom Embedder."""
def __init__(
self, embedding_model: str = "hkunlp/instructor-large", instruction: str = ""
):
super().__init__()
if not isinstance(instruction, str) or len(instruction) < 1:
raise ValueError("`instruction` is required.")
self.instruction = instruction
self.embedding_model = INSTRUCTOR(embedding_model)
def embed(self, documents, instruction, verbose=False):
"""Embed a list of documents into vectors."""
instruction_documents = [[self.instruction, d] for d in documents]
embeddings = self.embedding_model.encode(
instruction_documents, show_progress_bar=verbose
)
return embeddings
INSTRUCTION = "Represent the intent of the user journey for users of a website"
PROMPT = """
I have a set of users that have the following page visit journey through our website.
The journeys are in the following format:
<Page Title A> -> <Page Title C> -> <Page Title N>
Here are the journeys:
[DOCUMENTS]
The pages visited have these keyword themes: [KEYWORDS]
Based on the information above, extract a short topic label that indicates the most likely persona (who is looking for this information) and intent of the users in the following format:
topic: <topic label>
"""
def analyze_clusters(
df: pd.DataFrame,
model: str = "gpt-3.5-turbo",
api_key: str = None,
min_topic_size: int = 100,
) -> None:
"""Analyze clusters.
Parameters
----------
df : pd.DataFrame
Input DataFrame with conversion data.
model : str, optional
The model to use for topic modeling, by default "gpt-3.5-turbo".
api_key : str, optional
The OpenAI API key, by default None.
min_topic_size : int, optional
The minimum topic size, by default 100.
Returns
-------
None
"""
# Check that key is present
if api_key is None:
raise ValueError("`api_key` is required.")
# Set key
openai.api_key = api_key
representation_model = OpenAI(
model=model,
delay_in_seconds=5,
exponential_backoff=True,
diversity=0.5,
prompt=PROMPT,
chat=True,
)
embedding_model = CustomEmbedder(instruction=INSTRUCTION)
# This is here if developers want to assign topic back to users
users = df.user_id.tolist()
docs = df.activity_list_text.tolist()
topic_model = BERTopic(
nr_topics="auto",
embedding_model=embedding_model,
representation_model=representation_model,
min_topic_size=min_topic_size,
verbose=True,
)
_, _ = topic_model.fit_transform(docs)
topic_model.get_topic_info()
topic_model.visualize_topics()
return topic_model
# Path: cluster.py
| [
"\nI have a set of users that have the following page visit journey through our website.\nThe journeys are in the following format:\n<Page Title A> -> <Page Title C> -> <Page Title N>\n\nHere are the journeys:\n[DOCUMENTS]\n\nThe pages visited have these keyword themes: [KEYWORDS]\n\nBased on the information above, extract a short topic label that indicates the most likely persona (who is looking for this information) and intent of the users in the following format:\ntopic: <topic label>\n"
] |
2024-01-10 | directorBae/news_comment_analyzer | agendium.py | import langchain
import pandas as pd
import numpy as np
import openai
import os
class APIkeyImport:
def __init__(self, Openai_api_key=None, Youtube_api_key=None):
self.OPENAI_API_KEY = Openai_api_key
self.YOUTUBE_API_KEY = Youtube_api_key
def byEnv(self):
self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
self.YOUTUBE_API_KEY = os.getenv("YOUTUBE_API_KEY")
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import CommaSeparatedListOutputParser
class langchainModule(APIkeyImport): # 검색어 뽑아주는 모듈(langchain), 뉴스 기사 본문 요약하는 모듈(langchain), 뉴스 기사와 댓글의 연관성 분석(langchain)
def __init__(self):
super().__init__()
self.strict_model = ChatOpenAI(temperature=0, model_name='gpt-4', api_key=self.OPENAI_API_KEY)
self.smooth_model = ChatOpenAI(temperature=0.5, model_name='gpt-4', api_key=self.OPENAI_API_KEY)
self.list_parser = CommaSeparatedListOutputParser()
self.templates = {
"search":
ChatPromptTemplate.from_messages(
[
("system", """You are a keyword generator that generates search keywords from news articles.
You are given a title, description, and content of the news article.
You should generate search keywords that can be used to search for the news article directly and strongly related to the summarization.
"""),
("human", "title: {title}, description: {description}, content: {content}"),
]
),
"summarization":
ChatPromptTemplate.from_messages(
[
("system", """You are a summarizer that summarizes news articles.
You should summarize given news article.
"""),
("human", "title: {title}, description: {description}, content: {content}"),
]
),
"relation":
ChatPromptTemplate.from_messages(
[
("system", """You should analyze the relation between news articles and comments.
You are given a content of a news article and comments.
You should write reports of the relation between the news article and the comments whether it is related or not, and how much it is related.
"""
),
("human", "content: {content}, comments: {comments}"),
]
),
"topic seperation":
ChatPromptTemplate.from_messages(
[
("system", """You should seperate news article of query. Query is kind of a set of a news article which is not seperated.
You are given a title, description, content of a news.
One query can contain several articles. But some case, news can contain only one article. If then, never mind seperating it. Just return the original content of the news as a return type where written downside of this instruction.
Range of a topic is one article. If the content of the news are connected by meaning, you NEVER seperate it by topic.
You should seperate the content in original query by article, with list format consisted of article composing original content.
Some case, trash datas such as advertisement, non-news contents can be included in the news.
If you find trash datas, you should remove it.
ex) [article1, article2, article3]
"""
),
("human", "title: {title}, description: {description}, content: {content}"),
]
),
"report":
ChatPromptTemplate.from_messages(
[
("system", """You should report a overall result of the news article in Korean.
You are given a title, description, content, analysis of the comments, and relation between comments and article of a news.
You should write a report of the news article.
The report can contain the following contents, and your overall analysis would be about the inclination of the news article,
how comments interact with article, and how much the article is related to the comments in fact.
And also you should give an insight of the inclination, aggression of the news comments by given query.
You can write the report in Korean.
You should write the report in markdown format.
Output format: MAKRDOWN
"""
),
("human", "title: {title}, description: {description}, content: {content}, comments analytics: {comments_analytics}, relation: {relation}"),
]
),
}
def search_keyword_generate(self, title, description, content):
messages = self.templates["search"]
self.search_keyword_generate_chain = messages | self.smooth_model
return self.search_keyword_generate_chain, self.search_keyword_generate_chain.invoke({"title": title, "description": description, "content": content})
def summarize_content(self, title, description, content):
messages = self.templates["content"]
self.summarize_content_chain = messages | self.strict_model
return self.summarize_content_chain, self.summarize_content_chain.invoke({"title": title, "description": description, "content": content})
def calculate_relation(self, content, comments):
messages = self.templates["relation"]
self.calculate_relation_chain = messages | self.smooth_model
return self.calculate_relation_chain, self.calculate_relation_chain.invoke({"content": content, "comments": str(comments)})
def topic_seperation(self, title, description, content):
content = content.replace(",", " ")
"""
TODO: Slow speed because of generation of gpt-4,
try to solve this problem with classical way, instead use openai api directly
"""
messages = self.templates["topic seperation"]
self.topic_seperation_chain = messages | self.strict_model
output = self.topic_seperation_chain.invoke({"title": title, "description": description, "content": content}).content
return self.topic_seperation_chain, self.list_parser.parse(output)
def report(self, title, description, content, comments_analytics):
relation = self.calculate_relation(content, comments_analytics)
messages = self.templates["report"]
self.report_chain = messages | self.smooth_model
return self.report_chain, self.report_chain.invoke({"title": title, "description": description, "content": content, "comments_analytics": comments_analytics, "relation": relation})
import requests
from urllib import parse
from googleapiclient import discovery
from datetime import datetime, timedelta, timezone
from pytube import YouTube
class CollectModule(langchainModule, APIkeyImport):
def __init__(self):
super().__init__()
self.youtube = discovery.build('youtube', 'v3', developerKey=APIkeyImport.YOUTUBE_API_KEY)
openai.api_key = APIkeyImport.OPENAI_API_KEY
def search_keyword_generate(self, title, description, content, is_seperate=False):
if is_seperate == False:
return [super().search_keyword_generate(title, description, content)[1]]
else:
list_of_topic = self.topic_seperation(title, description, content)
list_of_keywords = []
for topic in list_of_topic:
list_of_keywords.append(super().search_keyword_generate(title, description, topic)[1])
return list_of_keywords
def topic_seperation(self, title, description, content):
return super().topic_seperation(title, description, content)[1]
def youtube_search_one_day(self, keyword, dayBefore=0) -> dict: # dayBefore는 n일 전부터 n+1일 전까지 수집 가능하도록 해줌
now = datetime.now()
dateEnd = (now - timedelta(days=dayBefore)).isoformat() # 현재 시점
dateStart = (now - timedelta(days=(dayBefore+1))).isoformat() # 현재 시점으로부터 24시간 전
req = self.youtube.search().list(q=keyword, part='snippet',
videoDuration = 'medium',
order='viewCount',
type='video',
regionCode='KR', #한국 영상만 검색
videoCategoryId=25,
maxResults=3,
publishedAfter = dateStart+'Z',
publishedBefore = dateEnd+'Z',
)
res = req.execute()
return res
def youtube_search_multi_day(self, query, length=3) -> dict: # youtube_search_one_day의 dayBefore 인자를 바꿔가며 n일치 뉴스를 수집 가능
result_dict = {}
for i in range(length):
result = self.youtube_search_one_day(query, i)
result_dict[i] = result
return result_dict
def get_comments_by_id(self, videoID, maxresult) -> list:
req = self.youtube.commentThreads().list(
part='snippet',
videoId = videoID,
maxResults = maxresult
)
res = req.execute()
comment_list = []
for n in range(len(res['items'])):
comment_list.append(res['items'][n]['snippet']['topLevelComment']['snippet']['textOriginal'])
return comment_list
def speech_to_text(self, videoID):
try:
yt = YouTube(f'https://youtu.be/{videoID}')
file_path = yt.streams.filter(only_audio=True).first().download(output_path='/data/audio', filename=f'{videoID}.wav')
audio_file = open(f'/data/audio/{videoID}.wav', 'rb')
transcript = openai.audio.transcriptions.create(model='whisper-1', file=audio_file)
except:
return np.nan
return transcript
def parse_response(self, videores_dict):
result_dict = {}
for i in range(len(videores_dict)):
videores = videores_dict[i]
for n in range(len(videores['items'])):
result_dict[i] = {"title": videores['items'][n]['snippet']['title'],
"content": self.speech_to_text(videores['items'][n]['id']['videoId']),
"publishedAt": videores['items'][n]['snippet']['publishedAt'],
"publisher": videores['items'][n]['snippet']['channelTitle'],
"description": videores['items'][n]['snippet']['description'],
"source": 1,
"comment": self.get_comments_by_id(videores['items'][n]['id']['videoId'], 50),
"code": videores['items'][n]['id']['videoId']}
return result_dict
def youtube_collect(self, keyword_list):
result_dict_list = []
for keyword in keyword_list:
videores_dict = self.youtube_search_multi_day(keyword)
result_dict = self.parse_response(videores_dict)
result_dict_list.append(result_dict)
return result_dict
#TODO: 네이버 뉴스 수집
def naver_collect(self, keyword_list):
return None
def news_collect(self, title, description, content):
keyword_list = self.search_keyword_generate(title, description, content)
youtube_result = self.youtube_collect(keyword_list)
naver_result = self.naver_collect(keyword_list)
youtube_result_df = pd.DataFrame(youtube_result)
naver_result_df = pd.DataFrame(naver_result)
total_result = pd.concat([youtube_result_df, naver_result_df], axis=1).T
return total_result
class ClassifierSet(APIkeyImport):
def __init__(self):
super().__init__()
from openai import OpenAI
self.client = OpenAI(api_key=self.OPENAI_API_KEY)
self.cate_model_id = "ft:gpt-3.5-turbo-1106:team-honeybread::8LLRyOr7"
self.bias_model_id = "ft:gpt-3.5-turbo-1106:team-honeybread::8KwjwyJF"
self.aggr_model_id = "ft:gpt-3.5-turbo-1106:team-honeybread::8KyLe6t9"
def cate_classifier(self, query):
completion = self.client.chat.completions.create(
model=self.cate_model_id,
messages=[
{"role": "system", "content": """You are a great data classifier conducting on a online comment at news article.
All you have to do is convert the comments in the query into category about the comments."""},
{"role": "user", "content": query},
]
)
return completion.choices[0].message.content
def bias_classifier(self, query):
completion = self.client.chat.completions.create(
model=self.bias_model_id,
messages=[
{"role": "system", "content": """You are a great data classifier conducting on a online comment at news article.
All you have to do is convert the comments in the query into bias about the comments.
편향은 그 댓글이 얼마나 편향되어있는지를 나타내며, -1 혹은 1의 값을 가집니다.
편향성은 흑백논리에 해당되는 "정치", "성별", "세대" 카테고리에만 적용됩니다.
우파를 비하하는 댓글은 -1, 좌파를 비하하는 댓글은 1입니다.
역시나 남성을 비하하는 댓글은 -1, 여성을 비하하는 댓글은 1입니다.
노인 세대를 비하하는 댓글은 -1, 어린이 세대를 비하하는 댓글은 1입니다.
"정치", "성별", "세대" 카테고리에 해당하지 않는 값은 모두 0으로 표현하십시오.
따라서 편향 값은 -1부터 1 사이의 실수로 표현됩니다."""},
{"role": "user", "content": query},
]
)
return completion.choices[0].message.content
def aggr_classifier(self, query):
completion = self.client.chat.completions.create(
model=self.aggr_model_id,
messages=[
{"role": "system", "content": """You are a great data classifier conducting on a online comment at news article.
All you have to do is convert the comments in the query into aggression about the comments."""},
{"role": "user", "content": query},
]
)
mapping = {'none': 0.0, 'offensive': 0.5, 'hate': 1.0}
try:
val = mapping[completion.choices[0].message.content]
except:
val = np.nan
return val
#TODO: 뉴스 기사 종합 분석하는 클래스
import pickle
from gensim.models import KeyedVectors
from khaiii import KhaiiiApi
class analyzeModule(langchainModule, ClassifierSet, APIkeyImport):
def __init__(self):
langchainModule.__init__(self)
ClassifierSet.__init__(self)
self.cluster_model = pickle.load(open("model/clustering/comment_feature_clustered_model.pkl", "rb"))
self.w2v_model = KeyedVectors.load_word2vec_format('model/w2v/comment_w2v', binary=False, encoding='utf-8')
def calculate_relation(self, content, comments):
return langchainModule.calculate_relation(self, content, comments)[1]
def summarize_content(self, title, description, content):
return langchainModule.summarize_content(title, description, content)[1]
def comments_processing(self, comments_list):
morph_analyze = KhaiiiApi()
cate_to_int_dict = {'정치': 0, '인종': 1, '성별': 2, '외모': 3, '세대': 4, '기타': 5}
processed_comments = []
for comment in comments_list:
if comment == '' or comment == None:
continue
temp_lex = []
for word in morph_analyze.analyze(comment):
for element in word.morphs:
temp_lex.append(element.lex)
vector = []
for word in temp_lex:
try:
vector.append(self.w2v_model.get_vector(word)) # word2vec 모델에 없는 단어는 제외, 모델 구성 시 min_count로 제외되었을 수 있기 때문
except:
pass
vector = np.mean(vector)
cate = ClassifierSet.cate_classifier(self, comment)
try:
cate_encoded = cate_to_int_dict[cate]
except:
cate_encoded = cate_to_int_dict['기타']
bias = ClassifierSet.bias_classifier(self, comment)
try:
bias = int(bias)
except:
bias = 0
aggr = ClassifierSet.aggr_classifier(self, comment)
comment_vec = np.array([cate_encoded, bias, aggr, vector]).reshape(1, -1)
cmt = {"comment": comment, "category": cate, "category_encoded": cate_encoded, "bias": bias, "aggression": aggr, "cluster": self.cluster_model.predict(comment_vec).tolist()[0]}
processed_comments.append(cmt)
return processed_comments
def report_article(self, title, description, content, comments_list):
comments_analytics = self.comments_processing(comments_list)
return langchainModule.report(self, title, description, content, comments_analytics)[1] | [
"[('system', 'You are a keyword generator that generates search keywords from news articles.\\n You are given a title, description, and content of the news article.\\n You should generate search keywords that can be used to search for the news article directly and strongly related to the summarization.\\n '), ('human', 'title: {title}, description: {description}, content: {content}')]",
"You are a great data classifier conducting on a online comment at news article.\n All you have to do is convert the comments in the query into bias about the comments.\n 편향은 그 댓글이 얼마나 편향되어있는지를 나타내며, -1 혹은 1의 값을 가집니다.\n 편향성은 흑백논리에 해당되는 \"정치\", \"성별\", \"세대\" 카테고리에만 적용됩니다.\n 우파를 비하하는 댓글은 -1, 좌파를 비하하는 댓글은 1입니다.\n 역시나 남성을 비하하는 댓글은 -1, 여성을 비하하는 댓글은 1입니다.\n 노인 세대를 비하하는 댓글은 -1, 어린이 세대를 비하하는 댓글은 1입니다.\n \"정치\", \"성별\", \"세대\" 카테고리에 해당하지 않는 값은 모두 0으로 표현하십시오.\n 따라서 편향 값은 -1부터 1 사이의 실수로 표현됩니다.",
"[('system', 'You are a summarizer that summarizes news articles.\\n You should summarize given news article.\\n '), ('human', 'title: {title}, description: {description}, content: {content}')]",
"[('system', 'You should analyze the relation between news articles and comments.\\n You are given a content of a news article and comments.\\n You should write reports of the relation between the news article and the comments whether it is related or not, and how much it is related.\\n '), ('human', 'content: {content}, comments: {comments}')]",
"You are a great data classifier conducting on a online comment at news article.\n All you have to do is convert the comments in the query into category about the comments.",
"[('system', 'You should report a overall result of the news article in Korean.\\n You are given a title, description, content, analysis of the comments, and relation between comments and article of a news.\\n You should write a report of the news article.\\n The report can contain the following contents, and your overall analysis would be about the inclination of the news article,\\n how comments interact with article, and how much the article is related to the comments in fact.\\n And also you should give an insight of the inclination, aggression of the news comments by given query.\\n \\n You can write the report in Korean.\\n You should write the report in markdown format.\\n Output format: MAKRDOWN\\n '), ('human', 'title: {title}, description: {description}, content: {content}, comments analytics: {comments_analytics}, relation: {relation}')]",
"You are a great data classifier conducting on a online comment at news article.\n All you have to do is convert the comments in the query into aggression about the comments.",
"[('system', 'You should seperate news article of query. Query is kind of a set of a news article which is not seperated.\\n You are given a title, description, content of a news.\\n One query can contain several articles. But some case, news can contain only one article. If then, never mind seperating it. Just return the original content of the news as a return type where written downside of this instruction.\\n \\n Range of a topic is one article. If the content of the news are connected by meaning, you NEVER seperate it by topic.\\n You should seperate the content in original query by article, with list format consisted of article composing original content.\\n Some case, trash datas such as advertisement, non-news contents can be included in the news.\\n If you find trash datas, you should remove it.\\n ex) [article1, article2, article3]\\n '), ('human', 'title: {title}, description: {description}, content: {content}')]"
] |
2024-01-10 | huchenxucs/ChatDB | chatdb.py | import json, re, time
from mysql import MySQLDB
from config import cfg
from chatdb_prompts import prompt_ask_steps, prompt_ask_steps_no_egs
from tables import init_database, database_info, table_details
from langchain.prompts import PromptTemplate
from call_ai_function import populate_sql_statement
from chat import chat_with_ai
def get_steps_from_response(response):
# Regular expression patterns to extract step number, description, and SQL query
pattern = r"Step(\d+):\s+(.*?)\n`(.*?)`"
matches = re.findall(pattern, response, re.DOTALL)
# Extract information and create list of dictionaries
result = []
for match in matches:
step_number = int(match[0])
description = match[1]
sql_query = match[2]
# print(sql_query+'\n')
result.append({
"id": step_number,
"description": description.strip(),
"sql": sql_query.strip(),
})
return result
def init_system_msg():
sys_temp = """
You are ChatDB, a powerful AI assistant, a variant of ChatGPT that can utilize databases as external symbolic memory. \
You are an expert in databases, proficient in SQL statements and can use the database to help users. \
The details of tables in the database are delimited by triple quotes.
\"\"\"
{table_details}
\"\"\"
"""
sys_prompt = PromptTemplate(
template=sys_temp,
input_variables=[],
partial_variables={"table_details": table_details, }
)
sys_prompt_str = sys_prompt.format()
return sys_prompt_str
def chain_of_memory(sql_steps, mysql_db):
num_step = len(sql_steps)
sql_results_history = []
new_mem_ops = []
for i in range(num_step):
cur_step = sql_steps[i]
ori_sql_cmd = cur_step['sql']
print(f"\nStep{cur_step['id']}: {cur_step['description']}\n")
if need_update_sql(ori_sql_cmd):
list_of_sql_str = populate_sql_statement(ori_sql_cmd, sql_results_history)
print(ori_sql_cmd)
new_mem_ops.append(list_of_sql_str)
for sql_str in list_of_sql_str:
print(f"Execute: \n{sql_str}\n")
sql_results, sql_res_str = mysql_db.execute_sql(sql_str)
print(f"Database response:\n{sql_res_str}\n")
if sql_results:
sql_results_history.append(sql_results)
else:
print(f"Execute: \n{ori_sql_cmd}\n")
sql_results, sql_res_str = mysql_db.execute_sql(ori_sql_cmd)
new_mem_ops.append([ori_sql_cmd])
print(f"Database response:\n{sql_res_str}\n")
if sql_results:
sql_results_history.append(sql_results)
return sql_results_history, new_mem_ops
def generate_chat_responses(user_inp, mysql_db, historical_message):
# ask steps
prompt_ask_steps_str = prompt_ask_steps.format(user_inp=user_inp)
response_steps = chat_with_ai(init_system_msg(), prompt_ask_steps_str, historical_message, None,
token_limit=cfg.fast_token_limit)
historical_message[-2]["content"] = prompt_ask_steps_no_egs.format(user_inp=user_inp)
response_steps_list_of_dict = get_steps_from_response(response_steps)
if len(response_steps_list_of_dict) == 0:
print(f"NOT NEED MEMORY: {response_steps}")
return
sql_results_history, new_mem_ops = chain_of_memory(response_steps_list_of_dict, mysql_db)
print("Finish!")
return
def need_update_sql(input_string):
pattern = r"<\S+>"
matches = re.findall(pattern, input_string)
# print(matches)
# if matches:
# print("The pattern was found in the input string.")
# else:
# print("The pattern was not found in the input string.")
return matches
if __name__ == '__main__':
mysql_db = init_database(database_info, "try1024")
his_msgs = []
print("START!")
text = input("USER INPUT: ")
while True:
generate_chat_responses(text, mysql_db, his_msgs)
text = input("USER INPUT: ")
| [
"table_details"
] |
2024-01-10 | huchenxucs/ChatDB | chatdb_prompts.py | from langchain.prompts import PromptTemplate
from sql_examples import egs
prompt_ask_steps_temp = """
Please tell me what standard SQL statements should I use in order to respond to the "USER INPUT". \
If it needs multiple SQL operations on the database, please list them step by step concisely. \
If there is no need to use the database, reply to the "USER INPUT" directly.
The output should be a markdown code snippet formatted in the following schema, \
including the leading and trailing "\`\`\`" and "\`\`\`":
```
Step1: <Description of first step>
SQL command for step1
Step2: <Description of second step>
SQL command for step2
......
```
Here are some examples:
{egs}
USER INPUT: {user_inp}
ANSWER:
"""
prompt_ask_steps = PromptTemplate(
template=prompt_ask_steps_temp,
input_variables=["user_inp"],
partial_variables={
"egs": '\n'.join(egs),
}
)
prompt_ask_steps_no_egs = PromptTemplate(
template=prompt_ask_steps_temp,
input_variables=["user_inp"],
partial_variables={
"egs": ""
}
)
if __name__ == '__main__':
print(prompt_ask_steps.format(user_inp="Hi"))
| [
"\n",
"\nPlease tell me what standard SQL statements should I use in order to respond to the \"USER INPUT\". If it needs multiple SQL operations on the database, please list them step by step concisely. If there is no need to use the database, reply to the \"USER INPUT\" directly.\nThe output should be a markdown code snippet formatted in the following schema, including the leading and trailing \"\\`\\`\\`\" and \"\\`\\`\\`\":\n```\nStep1: <Description of first step>\nSQL command for step1\n\nStep2: <Description of second step>\nSQL command for step2\n\n......\n```\nHere are some examples:\n{egs}\n\nUSER INPUT: {user_inp}\nANSWER:\n",
"USER INPUT",
"user_inp",
"\\`\\`\\`"
] |
2024-01-10 | TheFenixfx/BroAi | BroAI.py | import streamlit as st ##from transformers import pipeline
from streamlit_extras.stylable_container import stylable_container
from streamlit_extras.switch_page_button import switch_page
import pages.Profile_Page as Profile_Page
import toml
import asyncio
import logging
import threading
import os
import json
import subprocess
from bs4 import BeautifulSoup
from IPython import get_ipython
from langchain.llms import LlamaCpp
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.vectorstores import Chroma
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.tools.render import render_text_description
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.prompts import PromptTemplate,ChatPromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain_decorators import StreamingContext, llm_prompt, GlobalSettings
from langchain.document_loaders import DirectoryLoader
from flask import Flask, jsonify
from flask import request
from flask import Flask, stream_with_context, request, Response
#st.set_page_config(page_title="BroAi Interface")
#This is necessary for decorators streaming
GlobalSettings.define_settings(
logging_level=logging.INFO,
print_prompt=True,
print_prompt_name=True)
app = Flask(__name__)
config = toml.load('config.toml')
profile = {}
st.session_state["profile"] = config["Profile"]
profile = st.session_state["profile"]
path_from_toml = config["docs"]
repo_path = os.path.normpath( path_from_toml['path'])
model_path_toml = config["model"]
PATH = os.path.normpath( model_path_toml['path'])
digested_toml = config["digested"]
digested_path = os.path.normpath( digested_toml['path'])
bot_reply_enabled = False
bot_process = None
@st.cache_resource
def llmini():
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(model_path=PATH,
n_gpu_layers=43,
n_batch=512,
n_ctx=5000,
f16_kv=True,#thing in case
callback_manager=callback_manager,
verbose=True,
temperature=0.2)
return llm
llm = llmini()
def escape_path(path):
return path.replace("\\", "\\\\")
def save(filename, variable,directory):
# Ensure the directory exists
directory.replace("\\", "\\\\")
os.makedirs(directory, exist_ok=True)
# Combine directory and filename to get the full path
file_path = os.path.join(directory, filename)
# Check if the file exists and is not empty
if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
# Open the file and append the variable to it
with open(file_path, 'a') as file:
file.write('\n' + str(variable)) # Adding a newline before the variable for readability
else:
# Open the file and write the variable to it
with open(file_path, 'w') as file:
file.write(str(variable))
@st.cache_resource
def docloader():
readme_loader = DirectoryLoader('./documents', glob="**/*.md")
txt_loader = DirectoryLoader('./documents', glob="**/*.txt")
loaders = [readme_loader,txt_loader] # add the created loader here
documents = []
for loader in loaders:
documents.extend(loader.load())
print (f'You have {len(documents)} document(s) in your data')
print (f'There are {len(documents[0].page_content)} characters in your document')
model_name = "BAAI/bge-large-en-v1.5"
model_kwargs = {'device': 'cuda'}
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
db = Chroma.from_documents(documents=documents, embedding=HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
query_instruction="为这个句子生成表示以用于检索相关文章:"
),persist_directory = digested_path)
retriever = db.as_retriever(
search_type="mmr", # Also test "similarity"
search_kwargs={"k": 1},
)
return retriever
doc_db = docloader() # inicia esto luego desde un boton q reciba la ruta y agrega la funcion de descargar
def write_to_config_model(user_input):
config = {'model': user_input}
with open('config.toml', 'w') as file:
toml.dump(config, file)
def write_to_config_docs(user_input):
config = {'docs': user_input}
with open('config.toml', 'w') as file:
toml.dump(config, file)
def write_to_config_digested(user_input):
config = {'digested': user_input}
with open('config.toml', 'w') as file:
toml.dump(config, file)
# Test Api call
@app.route('/llm', methods=['POST'])
def api_endpoint():
data = request.get_json()
jobdesc = data.get('jobdesc')
preferences = data.get("preferences")
return jsonify({'message': 'Hello from Flask!'})
def run_flask():
app.run(port=5000)
tokens=[]
def capture_stream_func(new_token:str):
tokens.append(new_token)
def workPromtInitialization():
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
#system prompt
system_prompt = """Assist a Software Developer to evaluate the job data to identify and recommend remote jobs opportunities that align with
the user's qualifications, and preferences. Answer with a definitive decision if the provided job data is suitable for the user. Consider a work unsuitable if it requires IOS or Swift.
This is the data :
"""
system_prompt = """ Use the jobdata to decide if it align with the user data and the user expectations.
This is the data :
"""
instruction = """
JobData : {context}
User: {userdata}"""
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
header_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context","userdata"],
template=header_template,
)
return QA_CHAIN_PROMPT
def generate_assistant_response(user_input, llm,doc_db):
#Question about the documents
docs = doc_db.get_relevant_documents(user_input)
chain = load_qa_chain(llm, chain_type="stuff",prompt = llmPrompInitialization() )
with st.chat_message("assistant"):
with st.spinner("Working on request"):
response = chain({"input_documents": docs, "question": user_input},return_only_outputs = True)
message = {"role": "assistant", "content": response["output_text"]}
st.write(response["output_text"]) #this is the actual text box on the browser
st.session_state.messages.append(message["content"]) # after response we set the state again as it was to prevent infinite loop
return response
async def generate_assistant_work(user_input, llm):
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
system_prompt = """ You are a helpful assistant. Assist a Software Developer to evaluate the job data to identify and recommend remote jobs opportunities that align with
the user's qualifications, and preferences. You Use the Potential Job data to decide if it align with the User Data. Answer with a definitive decision if the provided job data is suitable for the user. Consider a work unsuitable if it requires IOS or Swift. Answer in Spanish.
This is the data :
"""
instruction = """
User Data : {context}
Potential Job: {userdata}"""
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
header_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
prompt = ChatPromptTemplate.from_template(header_template)
chain = prompt | llm
response = chain.invoke({"userdata": user_input,"context":profile})
output = {"output_text":response}
return output
async def generate_telegram_answer(user_input, llm):
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
system_prompt = """ You are a helpful assistant, answer the best possible to the Message
This is the data :
"""
instruction = """
Message : {message}"""
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
header_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
prompt = ChatPromptTemplate.from_template(header_template)
chain = prompt | llm
response = chain.invoke({"message": user_input})
output = {"output_text":response}
return output
@app.route('/stream', methods=['GET'])
def stream():
def generate():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def async_function():
with StreamingContext(stream_to_stdout=True, callback=capture_stream_func):
result = await rizz_assistant(text="and a quote",preferences="say hello")
print("Stream finished ... we can distinguish tokens thanks to alternating colors")
return result
result = loop.run_until_complete(async_function())
print("---------->"+str(result))
#
yield "Data: " + str(result) + "\n\n" # Format this as needed for your stream
return Response(stream_with_context(generate()), content_type='text/event-stream')
#Non functional, Code example to use streams with openai. Use it to combine it wth a local model or custom usecase
@app.route('/askstream', methods=['POST'])
def streamPostlang():
def generate():
data = request.get_json()
textdesc = data.get('text')
preferences = data.get("system")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if(preferences == "work"):
async def async_function():
with StreamingContext(stream_to_stdout=True, callback=capture_stream_func):
result = []
result = await rizz_assistant(text=textdesc,profile=str(profile))
filename = "buffer.txt"
add_text_to_file(file_path=filename,text=result)
return result
else:
async def async_function():
with StreamingContext(stream_to_stdout=True, callback=capture_stream_func):
result = []
result = await rizz_assistant(text=textdesc,preferences=preferences)
return result
result = loop.run_until_complete(async_function())
yield "Data: " + str(result) + "\n\n" # Format this as needed for your stream
return Response(stream_with_context(generate()), content_type='text/event-stream')
@app.route('/ask', methods=['POST'])
def streamPost():
def generate():
data = request.get_json()
textdesc = data.get('text')
typeofrequest = data.get("system")#change this to request class
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if(typeofrequest == "work"):
async def async_function():
result = await generate_assistant_work(user_input=textdesc,llm=llm)
return result
else:
async def async_function():
#call another
return result
result = loop.run_until_complete(async_function())
yield result['output_text'] + "\n\n" # Format this as needed for your stream
return Response(stream_with_context(generate()), content_type='text/event-stream')
@app.route('/telebotchat', methods=['POST'])
def streamPostTelegram():
def generate():
data = request.get_json()
textdesc = data.get('text')
typeofrequest = data.get("system")#change this to request class
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def async_function():
result = await generate_telegram_answer(user_input=textdesc,llm=llm)
return result
result = loop.run_until_complete(async_function())
yield result['output_text'] + "\n\n" # Format this as needed for your stream
return Response(stream_with_context(generate()), content_type='text/event-stream')
# Code Example. Function pass through openai, could be used to elevated the problem to a bigger model
@llm_prompt(capture_stream=True)
async def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"):
"""
Write me a short header for my post about {topic} for {platform} platform.
It should be for {audience} audience.
(Max 15 words)
"""
pass
# Fun prompt. Function pass through openai, could be used to elevate the prompt to a bigger model
@llm_prompt(capture_stream=True)
async def rizz_assistant( text:str,profile:str):
"""
You are a Night host, you will deliver witty line openers to make your guest confortable, and a way to achieve this is to banter and use emotion to attract the
female gender, the objective is to keep her interested in the user. Use the provided messages in the chat to give a sentence to write
This is the user data :
{profile}
The data from the job is :
{text}
"""
pass
async def run_prompt():
return await write_me_short_post(topic="Hehe, empty prompt") # that can do real magic!
# Code example. It is used when building a Code Interpreter
def exec_python(cell):
print("USING EXEC")
ipython = get_ipython()
result = ipython.run_cell(cell)
log = str(result.result)
if result.error_before_exec is not None:
log += f"\n{result.error_before_exec}"
if result.error_in_exec is not None:
log += f"\n{result.error_in_exec}"
return log
# Code example. Nice to use with Chrome extension
def scrape(url: str):
# scrape website, and also will summarize the content based on objective if the content is too large
# objective is the original objective & task that user give to the agent, url is the url of the website to be scraped
print("Scraping website...")
# Define the headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Define the data to be sent in the request
data = {
"url": url
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = request.post(
"https://chrome.browserless.io/content?token=YOUR SERPAPI TOKEN", headers=headers, data=data_json)
# Check the response status code
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
print("Content:", text)
if len(text) < 8000:
#output = summary(text)
return text
else:
return text
else:
print(f"HTTP request failed with status code {response.status_code}")
def add_text_to_file(file_path, text):
print("running add_text_to_file")
try:
# Try to open the file in append mode
with open(file_path, 'a') as file:
file.write(text + '\n')
print(f'Text added to {file_path}')
except FileNotFoundError:
# If the file doesn't exist, create a new file and add text
with open(file_path, 'w') as file:
file.write(text + '\n')
print(f'New file created: {file_path}')
def sidebar():
"""Configure the sidebar and user's preferences."""
with st.sidebar.expander("🔧 Behaviour", expanded=True):
st.select_slider("Act like a",options=["Assistant","Agent","Expert (need docs)"])
st.sidebar.divider()
with st.sidebar:
with stylable_container(
key="side",
css_styles="""div["data-testid="stSidebarUserContent"]{background-color: "#02ab21"}
"""
):
st.container()
":green[RESTART APP ON CHANGE]"
"Model file route"
user_input = st.text_input("Enter the path:")
if st.button("Save",key="savebtn"):
write_to_config_model(user_input)
st.success("Path saved to config.toml")
"The model you are using is :"
st.text(config['model']['path'])
"Documents file route"
doc_input = st.text_input("Enter docs path:")
option = st.radio(
'Select a document context for:',
('Coder','Work Analizer','Character Simulation','Passport Bro AI','Cardinal System',))
st.write(f'You selected: :green[{option}]')
options = ['Python','Dart','Kotlin','Javascript']
country_options = ['Dominican Republic','Philipines','Colombia','Brazil']
if(option == 'Coder'):
selected_option = st.selectbox("Choose language for Coder:", options)
st.write(f"You selected: :green[{selected_option}]")
elif(option == 'Passport Bro AI'):
selected_option_country = st.selectbox("Choose Country for Passport Bro AI:", country_options)
st.write(f"You selected: :blue[{selected_option_country}]")
if st.button("Load",key="keybtn"):
write_to_config_docs(doc_input)
"Context loaded from :"
st.text( config['docs']['path'])
"Persistent context saved in folder"
persistent_input = st.text_input("Digested docs path:")
if st.button("Digest",key="digestbtn"):
write_to_config_digested(persistent_input)
"Digested content saved in :"
st.text( config['digested']['path'])
if st.button("Switch page",key="switchbtn"):
switch_page("Profile Page")
def layout(llm,doc_db):
global bot_process
st.title("Telegram Bot Controller")
# Button to start the bot
if st.button("Start Bot", key="start_button"):
if bot_process is None or bot_process.poll() is not None:
# Start the bot script in a new process
bot_process = subprocess.Popen(["python", "./chatbots/telegrambotchat.py"])
st.success("Bot started successfully!")
else:
st.warning("Bot is already running!")
# Button to stop the bot
if st.button("Stop Bot", key="stop_button"):
if bot_process and bot_process.poll() is None:
# Terminate the bot process
bot_process.terminate()
st.success("Bot stopped successfully!")
else:
st.warning("Bot is not running!")
st.header("Personal :blue[AI]:sunglasses:")
# System Message
if st.button('Connect with Bro Ai Extension'):
thread = threading.Thread(target=run_flask)
thread.start()
st.write('Extension connection started!')
if "messages" not in st.session_state: #session state dict is the way to navigate the state graphs that you are building
st.session_state.messages = [
{"role": "assistant", "content": "Ask a question"}
]
#is reloading the state on layout
# User input
user_input = st.chat_input("Your question") # "Your Question" is only a placeholder and not actually a text input
if user_input:
st.session_state.messages.append({"role": "user", "content": user_input})
# Generate response
try:
if st.session_state.messages[-1]["role"] != "assistant": # when the state is not assistant, because there is input, use the model
generate_assistant_response(user_input,llm,doc_db)
except Exception as ex:
print(str(""))
def llmPrompInitialization():
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
#system prompt
system_prompt = """You are a helpful coder assistant, you will use the provided context to answer questions.
Read the given code examples before answering questions and think step by step. If you can not answer a user question based on
the provided context, inform the user. Do not use any other information for answer to the user"""
instruction = """
Context : {context}
User: {question}"""
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
header_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context","question"],
template=header_template,
)
return QA_CHAIN_PROMPT
def mistralPrompInitialization():
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
#system prompt
system_prompt = """You are a helpful coder assistant, you will use the provided context to answer questions.
Read the given code examples before answering questions and think step by step. If you can not answer a user question based on
the provided context, inform the user. Do not use any other information for answer to the user"""
instruction = """
Context : {context}
User: {question}"""
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
header_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context","question"],
template=header_template,
)
return QA_CHAIN_PROMPT
# Toy function, is a challenge to see if you can modify the doc loader to read code
def generate_assistant_coder(user_input, llm,doc_db):
#Question about the documents
docs = doc_db.get_relevant_documents(user_input)
chain = load_qa_chain(llm, chain_type="stuff",prompt = llmPrompInitialization() )
with st.chat_message("assistant"):
with st.spinner("Working on request"):
response = chain({"context": docs, "question": user_input},return_only_outputs = True)
message = {"role": "assistant", "content": response["output_text"]}
st.write(response["output_text"]) #this is the actual text box on the browser
st.session_state.messages.append(message["content"]) # after response we set the state again as it was to prevent infinite loop
return response
def telegramlit():
global bot_reply_enabled
st.title("Telegram Bot Controller")
# Button to toggle the bot_reply function state
if st.button("Toggle Bot", key="toggle_button"):
bot_reply_enabled = not bot_reply_enabled
st.write(f"Bot is {'ON' if bot_reply_enabled else 'OFF'}")
def main():
"""Set up user preferences, and layout"""
sidebar()
layout(llm,doc_db)
if __name__ == "__main__":
main()
| [
" You are a helpful assistant, answer the best possible to the Message\n This is the data :\n ",
" You are a helpful assistant. Assist a Software Developer to evaluate the job data to identify and recommend remote jobs opportunities that align with \n the user's qualifications, and preferences. You Use the Potential Job data to decide if it align with the User Data. Answer with a definitive decision if the provided job data is suitable for the user. Consider a work unsuitable if it requires IOS or Swift. Answer in Spanish.\n This is the data :\n ",
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"output_text",
"context",
"Ask a question",
" Use the jobdata to decide if it align with the user data and the user expectations. \n \n This is the data :\n ",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER",
"You are a helpful coder assistant, you will use the provided context to answer questions.\n Read the given code examples before answering questions and think step by step. If you can not answer a user question based on\n the provided context, inform the user. Do not use any other information for answer to the user",
"Assist a Software Developer to evaluate the job data to identify and recommend remote jobs opportunities that align with \n the user's qualifications, and preferences. Answer with a definitive decision if the provided job data is suitable for the user. Consider a work unsuitable if it requires IOS or Swift. \n \n This is the data :\n "
] |
2024-01-10 | joyyuan89/FinAI | apps~chatPDF_1.0~app_cloud.py | import os
import tempfile
import streamlit as st
from streamlit_chat import message
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
#from langchain.document_loaders import PyPDFium2Loader
from langchain.document_loaders import PyPDFLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
#from langchain.chat_models import ChatOpenAI
st.set_page_config(
page_title="ChatPDF",
page_icon="📚",
layout="wide",
initial_sidebar_state="expanded",
)
class PDFQuery:
def __init__(self, openai_api_key = None) -> None:
#self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
#os.environ["OPENAI_API_KEY"] = openai_api_key
self.embeddings = OpenAIEmbeddings()
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
#self.retriver = None
def ask(self, question: str) -> str:
if self.chain is None:
response = "Please, add a document."
else:
docs = self.db.get_relevant_documents(question)
response = self.chain.run(input_documents=docs, question=question)
return response
def upload(self, file_path: os.PathLike) -> None:
#loader = PyPDFium2Loader(file_path)
loader = PyPDFLoader(file_path)
pages = loader.load()
chunks = pages
#chunks = loader.load_and_split(text_splitter = self.text_splitter)
#chunks = self.text_splitter.split_documents(pages)
self.db = FAISS.from_documents(chunks, self.embeddings).as_retriever(search_type="mmr")
self.chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
def forget(self) -> None:
self.db = None
self.chain = None
def display_messages():
st.subheader("💬 Chat")
for i, (msg, is_user) in enumerate(st.session_state["messages"]):
message(msg, is_user=is_user, key=str(i))
st.session_state["thinking_spinner"] = st.empty()
def process_input():
if st.session_state["user_input"] and len(st.session_state["user_input"].strip()) > 0:
user_text = st.session_state["user_input"].strip()
with st.session_state["thinking_spinner"], st.spinner(f"Thinking"):
query_text = st.session_state["pdfquery"].ask(user_text)
st.session_state["messages"].append((user_text, True))
st.session_state["messages"].append((query_text, False))
def read_and_save_file():
st.session_state["pdfquery"].forget() # to reset the knowledge base
st.session_state["messages"] = []
st.session_state["user_input"] = ""
for file in st.session_state["file_uploader"]:
with tempfile.NamedTemporaryFile(delete=False) as tf:
tf.write(file.getbuffer())
file_path = tf.name
with st.session_state["uploading_spinner"], st.spinner(f"uploading {file.name}"):
st.session_state["pdfquery"].upload(file_path)
os.remove(file_path)
def is_openai_api_key_set() -> bool:
return len(st.session_state["OPENAI_API_KEY"]) > 0
def main():
if len(st.session_state) == 0:
st.session_state["messages"] = []
st.session_state["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "")
if is_openai_api_key_set():
st.session_state["pdfquery"] = PDFQuery(st.session_state["OPENAI_API_KEY"])
else:
st.session_state["pdfquery"] = None
st.header("📚 ChatPDF")
st.markdown("")
st.markdown("Key in your OpenAI API key to get started. Skip if you already have it in your environment variables.")
if st.text_input("OpenAI API Key", value=st.session_state["OPENAI_API_KEY"], key="input_OPENAI_API_KEY", type="password"):
if (
len(st.session_state["input_OPENAI_API_KEY"]) > 0
and st.session_state["input_OPENAI_API_KEY"] != st.session_state["OPENAI_API_KEY"]
):
st.session_state["OPENAI_API_KEY"] = st.session_state["input_OPENAI_API_KEY"]
if st.session_state["pdfquery"] is not None:
st.warning("Please, upload the files again.")
st.session_state["messages"] = []
st.session_state["user_input"] = ""
st.session_state["pdfquery"] = PDFQuery(st.session_state["OPENAI_API_KEY"])
st.subheader("📄 Upload a document")
st.file_uploader(
"Upload document",
type=["pdf"],
key="file_uploader",
on_change=read_and_save_file,
label_visibility="collapsed",
accept_multiple_files=True,
disabled=not is_openai_api_key_set(),
)
st.session_state["uploading_spinner"] = st.empty()
display_messages()
st.text_input("What's your question?", key="user_input", disabled=not is_openai_api_key_set(), on_change=process_input)
if __name__ == "__main__":
main() | [] |
2024-01-10 | joyyuan89/FinAI | apps~chatPDF_1.1~pdfquery.py | import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
#from langchain.document_loaders import PyPDFium2Loader
from langchain.document_loaders import PyPDFLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
#from langchain.chat_models import ChatOpenAI
class PDFQuery:
def __init__(self, openai_api_key = None) -> None:
#self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
#os.environ["OPENAI_API_KEY"] = openai_api_key
self.embeddings = OpenAIEmbeddings()
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
#self.retriver = None
def qa(self, question: str) -> str:
if self.chain is None:
response = "Please, add a document."
else:
docs = self.db.get_relevant_documents(question)
response = self.chain.run(input_documents=docs, question=question)
return response
def upload(self, file_path: os.PathLike) -> None:
#loader = PyPDFium2Loader(file_path)
loader = PyPDFLoader(file_path)
pages = loader.load()
chunks = pages
#chunks = loader.load_and_split(text_splitter = self.text_splitter)
#chunks = self.text_splitter.split_documents(pages)
self.db = FAISS.from_documents(chunks, self.embeddings).as_retriever(search_type="mmr")
self.chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
def forget(self) -> None:
self.db = None
self.chain = None
| [] |
2024-01-10 | joyyuan89/FinAI | apps~chatPDF_1.0~pdfquery.py | import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
#from langchain.document_loaders import PyPDFium2Loader
from langchain.document_loaders import PyPDFLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
#from langchain.chat_models import ChatOpenAI
class PDFQuery:
def __init__(self, openai_api_key = None) -> None:
#self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
#os.environ["OPENAI_API_KEY"] = openai_api_key
self.embeddings = OpenAIEmbeddings()
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
#self.retriver = None
def ask(self, question: str) -> str:
if self.chain is None:
response = "Please, add a document."
else:
docs = self.db.get_relevant_documents(question)
response = self.chain.run(input_documents=docs, question=question)
return response
def upload(self, file_path: os.PathLike) -> None:
#loader = PyPDFium2Loader(file_path)
loader = PyPDFLoader(file_path)
pages = loader.load()
chunks = pages
#chunks = loader.load_and_split(text_splitter = self.text_splitter)
#chunks = self.text_splitter.split_documents(pages)
self.db = FAISS.from_documents(chunks, self.embeddings).as_retriever(search_type="mmr")
self.chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
def forget(self) -> None:
self.db = None
self.chain = None
| [] |
2024-01-10 | mapu/toolchains | MaPUSim~APC~src~cpu~BaseCPU.py | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
# Andreas Hansson
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB
from ArmInterrupts import ArmInterrupts
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
elif buildEnv['TARGET_ISA'] == 'mapu':
from MapuTLB import MapuTLB
from MapuInterrupts import MapuInterrupts
elif buildEnv['TARGET_ISA'] == 'ucp':
from UcpTLB import UcpTLB
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
numThreads = Param.Unsigned(2, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Cycle to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'mapu':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.MapuTLB(MapuTLB(), "Data TLB")
itb = Param.MapuTLB(MapuTLB(), "Instruction TLB")
interrupts = Param.MapuInterrupts(
NULL, "Interrupt Controller")
elif buildEnv['TARGET_ISA'] == 'ucp':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.UcpTLB(UcpTLB(), "Data TLB")
itb = Param.UcpTLB(UcpTLB(), "Instruction TLB")
interrupts = Param.UcpInterrupts(
NULL, "Interrupt Controller")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Tick(0,
"interval to print out the progress message")
defer_registration = Param.Bool(False,
"defer registration with system (for sampling)")
clock = Param.Clock('1t', "clock speed")
phase = Param.Latency('0ns', "clock phase")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
micache_port = MasterPort("MPU Instruction Port")
_cached_ports = ['icache_port', 'dcache_port', 'micache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
_localApic = X86LocalApic(pio_addr=0x2000000000000000)
self.interrupts = _localApic
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
elif buildEnv['TARGET_ISA'] == 'mapu':
self.interrupts = MapuInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, mic = None, iwc = None, dwc = None):
self.icache = ic
self.micache = mic
self.dcache = dc
self.icache_port = ic.cpu_side
self.micache_port = mic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side', 'micache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
self.toL2Bus = CoherentBus()
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def addCheckerCpu(self):
pass
| [] |
2024-01-10 | KuramitsuLab/lm-chaineval-harness | scripts~models.py | import torch
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig
import openai
from openai import OpenAI
import json
import boto3
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# =====================
# Base Classes
# =====================
class Model:
"""Base class for abstracting a pretrained model."""
def generate(self, prompt: str)->str:
return f"Generated response for: {prompt}"
class ModelLoader:
"""Loads a Model instance based on a model name and additional arguments."""
def __init__(self, model_name, model_args:dict):
self.model_name = model_name
self.model_args = model_args
def load(self)->Model:
return Model()
# =====================
# Testing Code
# =====================
class TestModel:
def generate(self, prompt: str, model_args=None)->str:
return f"Generated response for: {prompt} \n with args: {model_args}"
class TestModelLoader(ModelLoader):
def load(self)->Model:
return TestModel()
# =====================
# HuggingFace Model Integration
# =====================
class HFModel(Model):
def __init__(self, model_name, hf_token=None, model_args=None, quantize=False):
default_args = {
"max_length": 512,
"do_sample": True,
"top_p": 0.95,
"temperature": 0.2,
"return_full_text": False,
}
model_args = model_args or {}
if "max_new_tokens" in model_args:
default_args.pop("max_length", None)
default_args.update(model_args)
# super().__init__()
# Initialize the tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
model_name,
use_auth_token=hf_token if hf_token else None,
trust_remote_code=True,
padding_side='left'
)
self.tokenizer.pad_token = self.tokenizer.eos_token
# pipelineなしで実装----------------------------------
# # Initialize the model
# self.model = AutoModelForCausalLM.from_pretrained(
# model_name,
# use_auth_token=hf_token if hf_token else None,
# trust_remote_code=True
# )
# # Set the device to GPU if available
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.model.to(self.device)
# ----------------------------------
self.model_args = default_args
if quantize:
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
use_auth_token=hf_token if hf_token else None,
)
# self.model = AutoModelForCausalLM.from_pretrained(
# model_name, device_map="auto", use_auth_token=hf_token if hf_token else None, load_in_4bit=True
# )
else:
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
use_auth_token=hf_token if hf_token else None,
trust_remote_code=True,
device_map="auto",
)
self.generator = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
# device=0 if torch.cuda.is_available() else -1,
use_auth_token=hf_token if hf_token else None,
**self.model_args
)
def generate(self, prompt: str) -> str:
# pipelineなしで実装----------------------------------
# input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
# generated_ids = self.model.generate(input_ids, **self.model_args)
# return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
# ----------------------------------
generated_texts = self.generator(prompt, **self.model_args, pad_token_id=self.generator.tokenizer.eos_token_id)
return generated_texts[0]['generated_text']
class HFModelLoader(ModelLoader):
def __init__(self, model_name, hf_token=None, model_args=None, quantize=True):
super().__init__(model_name, model_args)
self.hf_token = hf_token
self.quantize = quantize
def load(self) -> HFModel:
return HFModel(self.model_name, self.hf_token, self.model_args, self.quantize)
# =====================
# OpenAI Model Integration
# =====================
class OpenAIModel(Model):
def __init__(self, openai_api_key, model_name, model_args=None):
# Default arguments for OpenAI API
default_args = {
"temperature": 0.2,
"top_p": 0.95,
"max_tokens": 512,
"n": 1}
# Override defaults with any user-provided arguments
model_args = model_args or {}
default_args.update(model_args)
super().__init__()
self.openai_api_key = openai_api_key
self.model_name = model_name
self.model_args = default_args
def generate(self, prompt: str) -> str:
client = OpenAI(api_key=self.openai_api_key)
response = client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
**self.model_args
)
# prompt_and_response = prompt + "\n" + response.choices[0].message.content
return response.choices[0].message.content
class OpenAIModelLoader(ModelLoader):
def __init__(self, openai_api_key, model_name, model_args=None):
super().__init__(model_name, model_args)
self.openai_api_key = openai_api_key
def load(self) -> OpenAIModel:
return OpenAIModel(self.openai_api_key, self.model_name, self.model_args)
# =====================
# Anthropic Model Integration
# =====================
class AnthropicModel(Model):
def __init__(self, aws_access_key_id, aws_secret_access_key, model_name, model_args=None):
# Default arguments for Anthropic Claude API
default_args = {
"max_tokens_to_sample": 512,
"temperature": 0.2,
"top_p": 0.95,
}
# Override defaults with any user-provided arguments
model_args = model_args or {}
default_args.update(model_args)
super().__init__()
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.model_name = model_name
self.model_args = default_args
def check_and_append_claude_format(self, prompt: str) -> str:
human_str = "\n\nHuman:"
assistant_str = "\n\nAssistant:"
if human_str not in prompt:
prompt = human_str + prompt
if assistant_str not in prompt:
prompt += assistant_str
return prompt
def generate(self, prompt: str) -> str:
bedrock = boto3.client("bedrock-runtime",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name='ap-northeast-1'
)
prompt = self.check_and_append_claude_format(prompt)
body = json.dumps(
{
"prompt": prompt,
# "prompt": "\n\nHuman: Tell me a funny joke about outer space\n\nAssistant:",
"anthropic_version": "bedrock-2023-05-31",
**self.model_args,
}
)
response = bedrock.invoke_model(body=body, modelId=self.model_name)
response_body = json.loads(response.get("body").read())
return response_body.get("completion")
class AnthropicModelLoader(ModelLoader):
def __init__(self, aws_access_key_id, aws_secret_access_key, model_name, model_args=None):
super().__init__(model_name, model_args)
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
def load(self) -> AnthropicModel:
return AnthropicModel(self.aws_access_key_id, self.aws_secret_access_key, self.model_name, self.model_args)
# =====================
# Model Loader Factory
# =====================
class ModelLoaderFactory:
@staticmethod
def create(model_name, openai_api_key=None, aws_access_key_id=None, aws_secret_access_key=None, hf_token=None, model_args=None, quantize=True):
try:
if model_name == "test":
return TestModelLoader(model_name, model_args)
elif model_name.startswith("gpt"):
return OpenAIModelLoader(openai_api_key, model_name, model_args)
elif model_name.startswith("anthropic"):
return AnthropicModelLoader(aws_access_key_id, aws_secret_access_key, model_name, model_args)
else:
return HFModelLoader(model_name, hf_token, model_args, quantize)
except Exception as e:
print(f"Failed to load the model. Error message: {e}")
raise e
# =====================
# Utility Function
# =====================
def load_model(model_path, openai_api_key, aws_access_key_id, aws_secret_access_key, hf_token, model_args, quantize):
model_loader = ModelLoaderFactory.create(
model_path,
openai_api_key,
aws_access_key_id,
aws_secret_access_key,
hf_token,
model_args,
quantize
)
model = model_loader.load()
return model | [
"\n\nHuman:\n\nHuman:promptcba15dc8-88cf-493c-91e5-26f60c1dcb6f",
"\n\nHuman:prompt419dbfd4-fde1-4457-8184-831eed246801",
"\n\nAssistant:"
] |
2024-01-10 | uday-31/regime-modeling-with-NLP | modules~text_preprocessing.py | import gensim
from gensim.corpora import Dictionary
from gensim.matutils import corpus2dense, corpus2csc
from gensim.models import TfidfModel
from gensim.models.nmf import Nmf
from gensim.models.coherencemodel import CoherenceModel
import numpy as np
import pandas as pd
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, ENGLISH_STOP_WORDS
FOMC_STOP_WORDS = ["federal", "reserve", "board", "meeting", "committee", "minutes", "members"]
def remove_names_from_minutes(text: str):
"""
This function removes all names from the start of FED Minutes by relying on
the fact that the phrases 'the manager' and 'unanimous' tend to appear at
the end of the initial string of names.
@param tet(str): text which needs to have names removed from the start
@returns res(str): portion of text after first occurence of 'the manager'
or 'unanimous'
"""
text = text.lower()
split_by = ''
if 'the manager' in text and 'unanimous' in text:
if text.index('the manager') > text.index('unanimous'):
split_by = 'unanimous'
else:
split_by = 'the manager'
elif 'the manager' in text:
split_by = 'the manager'
elif 'unanimous' in text:
split_by = 'unanimous'
else:
raise ValueError('Neither in text!')
res = text.split(split_by)[1]
return res
def tokenizer_wo_stopwords(text: str):
"""
This function prepares raw text by tokenizing it and removing all stop
words (based on nltk stopwords).
@param text(str): raw text which needs to be prepared for analysis
@return res(str): string representation of text without stopwords
"""
tokens = nltk.word_tokenize(text)
words = [word.lower() for word in tokens if word.isalpha()]
words_wo_stop = [w.lower() for w in words if
w.lower() not in ENGLISH_STOP_WORDS and w.lower() not in FOMC_STOP_WORDS]
res = ' '.join(words_wo_stop)
return res
class TF_IDF():
def __init__(self, X_train: pd.Series = None, X_test: pd.Series = None):
self.X_train = X_train
self.X_test = X_test
# Attributes needed for manual TF-IDF computations
self.def_vectorizer = None
self.tfidf_manual_train = None
self.tfidf_manual_test = None
# Attributes needed for gensim TF-IDF computations
self.dict_gensim_statements = None
self.tfidf_model_gensim = None
self.tfidf_statements_train = None
self.tfidf_statements_test = None
self.tfidf_gensim_train = None
self.tfidf_gensim_test = None
def assign(self, X_train: pd.Series = None, X_test: pd.Series = None):
self.X_train = X_train
self.X_test = X_test
def fit_manual_helper(self, train: bool = True):
"""
This function manually computes the TF-IDF values for a column of train
OR test documents, to avoid the incorrect computations performed by
sklearn's native implementation.
@param train: flag determining if function will fit/transform train
data, or only fit vectorizer to test data
"""
if train:
text = self.X_train
else:
text = self.X_test
try:
assert text is not None
except Exception as e:
print(f"assign() train/test data before fitting!")
return
# Get number of documents
n_docs = text.shape[0]
# Generate bag-of-words matrix
if train:
self.def_vectorizer = CountVectorizer(token_pattern='[a-zA-Z]+')
word_bow_matrix = self.def_vectorizer.fit_transform(text)
else:
word_bow_matrix = self.def_vectorizer.transform(text)
word_bow_df = pd.DataFrame(
word_bow_matrix.toarray(),
columns=self.def_vectorizer.get_feature_names_out()
)
# Create TF matrix
tf_df = word_bow_df / word_bow_df.sum(axis=1).values.reshape(n_docs, 1)
# Compute IDF values
idf = np.log(n_docs / (word_bow_df / word_bow_df.values).sum(axis=0))
# Manually create TF-IDF matrix
if train:
self.tfidf_manual_train = tf_df * idf
else:
self.tfidf_manual_test = tf_df * idf
def fit_manual(self):
"""
This function fits the manual TF-IDF model to train data and generates
the values for the test data by calling the previously-defined helper
function consecutively on train and test data.
"""
self.fit_manual_helper(train=True)
self.fit_manual_helper(train=False)
def fit_gensim_helper(self, train: bool = True):
"""
This function uses gensim to compute the TF-IDF values for a column of
train or test documents, to avoid the incorrect computations performed
by sklearn's native implementation.
@param train: flag determining if function will fit/transform train
data, or only fit vectorizer to test data
"""
if train:
text = self.X_train
else:
text = self.X_test
try:
assert text is not None
except Exception as e:
print(f"assign() train/test data before fitting!")
return
gensim_statements = text.apply(lambda x: x.split(" ")).tolist()
if train:
self.dict_gensim_statements = Dictionary(gensim_statements)
bow_gensim_statements = [self.dict_gensim_statements.doc2bow(d) for d in gensim_statements]
if train:
self.tfidf_model_gensim = TfidfModel(bow_gensim_statements)
tfidf_statements = self.tfidf_model_gensim[bow_gensim_statements]
if train:
self.tfidf_statements_train = tfidf_statements
else:
self.tfidf_statements_test = tfidf_statements
num_terms = len(self.dict_gensim_statements.keys())
num_docs = len(tfidf_statements)
if train:
self.tfidf_gensim_train = corpus2dense(
tfidf_statements,
num_terms,
num_docs
).T
else:
self.tfidf_gensim_test = corpus2dense(
tfidf_statements,
num_terms,
num_docs
).T
def fit_gensim(self):
"""
This function fits the gensim TF-IDF model to train data and generates
the values for the test data by calling the previously-defined helper
function consecutively on train and test data.
"""
self.fit_gensim_helper(train=True)
self.fit_gensim_helper(train=False)
if __name__ == "__main__":
print(f"Please import this module as a library!")
| [] |
2024-01-10 | FergusFettes/loom | loom~utils~multiverse_util.py | import os
import numpy as np
import openai
from loom.utils.gpt_util import logprobs_to_probs
from loom.utils.tokenizer import token_to_word, tokenize
def generate(prompt, engine, goose=False):
if goose:
openai.api_base = "https://api.goose.ai/v1"
openai.api_key = os.environ.get("GOOSEAI_API_KEY", None)
else:
openai.api_base = "https://api.openai.com/v1"
openai.api_key = os.environ.get("OPENAI_API_KEY", None)
# print('calling engine', engine, 'at endpoint', openai.api_base)
# print('prompt:', prompt)
response = openai.Completion.create(prompt=prompt, max_tokens=1, n=1, temperature=0, logprobs=100, model=engine)
return response
# TODO multiple "ground truth" trajectories
def greedy_word_multiverse(
prompt,
ground_truth="",
max_depth=3,
unnormalized_amplitude=1,
unnormalized_threshold=0.1,
engine="ada",
goose=False,
):
if isinstance(ground_truth, str):
ground_truth = tokenize(ground_truth)
ground_truth = [token_to_word(token).replace("Ġ", " ") for token in ground_truth]
if max_depth == 0:
return {}, ground_truth
print("generating...")
response = generate(prompt, engine, goose)
logprobs = response.choices[0]["logprobs"]["top_logprobs"][0]
probs = {k: logprobs_to_probs(v) for k, v in sorted(logprobs.items(), key=lambda item: item[1], reverse=True)}
multiverse = {
token: {"normalized_prob": prob, "unnormalized_prob": prob * unnormalized_amplitude, "children": {}}
for token, prob in probs.items()
}
ground_truth_token = ground_truth[0] if ground_truth else "NO GROUND TRUTH"
done_ground_truth = False
for token in multiverse.items():
if token[1]["unnormalized_prob"] > unnormalized_threshold:
token[1]["children"], _ = greedy_word_multiverse(
prompt + token[0],
ground_truth="",
max_depth=max_depth - 1,
unnormalized_threshold=unnormalized_threshold,
unnormalized_amplitude=token[1]["unnormalized_prob"],
engine=engine,
goose=goose,
)
elif token[0] == ground_truth_token:
token[1]["children"], _ = greedy_word_multiverse(
prompt + token[0],
ground_truth=ground_truth[1:],
max_depth=max_depth - 1,
unnormalized_threshold=unnormalized_threshold,
unnormalized_amplitude=token[1]["unnormalized_prob"],
engine=engine,
goose=goose,
)
done_ground_truth = True
else:
break
if not done_ground_truth:
if ground_truth_token in multiverse:
multiverse[ground_truth_token]["children"], _ = greedy_word_multiverse(
prompt + ground_truth_token,
ground_truth=ground_truth[1:],
max_depth=max_depth - 1,
unnormalized_threshold=unnormalized_threshold,
unnormalized_amplitude=multiverse[ground_truth_token]["unnormalized_prob"],
engine=engine,
goose=goose,
)
return multiverse, ground_truth
| [] |
2024-01-10 | FergusFettes/loom | loom~utils~gpt_util.py | import codecs
import math
import numpy as np
import openai
from loom.utils.tokenizer import logit_mask
def normalize(probs):
return [float(i) / sum(probs) for i in probs]
def logprobs_to_probs(probs):
if isinstance(probs, list):
return [math.exp(x) for x in probs]
else:
return math.exp(probs)
def dict_logprobs_to_probs(prob_dict):
return {key: math.exp(prob_dict[key]) for key in prob_dict.keys()}
def total_logprob(response):
logprobs = response["logprobs"]["token_logprobs"]
logprobs = [i for i in logprobs if not math.isnan(i)]
return sum(logprobs)
def tokenize_ada(prompt):
response = openai.Completion.create(engine="ada", prompt=prompt, max_tokens=0, echo=True, n=1, logprobs=0)
tokens = response.choices[0]["logprobs"]["tokens"]
positions = response.choices[0]["logprobs"]["text_offset"]
return tokens, positions
def prompt_probs(prompt, engine="ada"):
response = openai.Completion.create(engine=engine, prompt=prompt, max_tokens=0, echo=True, n=1, logprobs=0)
positions = response.choices[0]["logprobs"]["text_offset"]
tokens = response.choices[0]["logprobs"]["tokens"]
logprobs = response.choices[0]["logprobs"]["token_logprobs"]
return logprobs, tokens, positions
# evaluates logL(prompt+target | prompt)
def conditional_logprob(prompt, target, engine="ada"):
combined = prompt + target
response = openai.Completion.create(engine=engine, prompt=combined, max_tokens=0, echo=True, n=1, logprobs=0)
positions = response.choices[0]["logprobs"]["text_offset"]
logprobs = response.choices[0]["logprobs"]["token_logprobs"]
word_index = positions.index(len(prompt))
total_conditional_logprob = sum(logprobs[word_index:])
return total_conditional_logprob
# TODO use threading
# returns the conditional probabilities for each event happening after prompt
def event_probs(prompt, events, engine="ada"):
probs = []
for event in events:
logprob = conditional_logprob(prompt, event, engine)
probs.append(logprobs_to_probs(logprob))
normal_probs = normalize(probs)
return probs, normal_probs
# like event_probs, returns conditional probabilities (normalized & unnormalized) for each token occurring after prompt
def token_probs(prompt, tokens, engine="ada"):
pass
# returns a list of positions and counterfactual probability of token at position
# if token is not in top_logprobs, probability is treated as 0
# all positions if actual_token=None, else only positions where the actual token in response is actual_token
# TODO next sequence instead of next token
def counterfactual(response, token, actual_token=None, next_token=None, sort=True):
counterfactual_probs = []
tokens = response.choices[0]["logprobs"]["tokens"]
top_logprobs = response.choices[0]["logprobs"]["top_logprobs"]
positions = response.choices[0]["logprobs"]["text_offset"]
for i, probs in enumerate(top_logprobs):
if (
(actual_token is None and next_token is None)
or actual_token == tokens[i]
or (i < len(tokens) - 1 and next_token == tokens[i + 1])
):
if token in probs:
counterfactual_probs.append({"position": positions[i + 1], "prob": logprobs_to_probs(probs[token])})
else:
counterfactual_probs.append({"position": positions[i + 1], "prob": 0})
if sort:
counterfactual_probs = sorted(counterfactual_probs, key=lambda k: k["prob"])
return counterfactual_probs
# returns a list of substrings of content and
# logL(preprompt+substring+target | preprompt+substring) for each substring
def substring_probs(preprompt, content, target, engine="ada", quiet=0):
logprobs = []
substrings = []
_, positions = tokenize_ada(content)
for position in positions:
substring = content[:position]
prompt = preprompt + substring
logprob = conditional_logprob(prompt, target, engine)
logprobs.append(logprob)
substrings.append(substring)
if not quiet:
print(substring)
print("logprob: ", logprob)
return substrings, logprobs
# returns a list of substrings of content
# logL(substring+target | substring) for each substring
def token_conditional_logprob(content, target, engine="ada"):
response = openai.Completion.create(engine=engine, prompt=content, max_tokens=0, echo=True, n=1, logprobs=100)
tokens = response.choices[0]["logprobs"]["tokens"]
top_logprobs = response.choices[0]["logprobs"]["top_logprobs"]
logprobs = []
substrings = []
substring = ""
for i, probs in enumerate(top_logprobs):
substrings.append(substring)
if target in probs:
logprobs.append(probs[target])
else:
logprobs.append(None)
substring += tokens[i]
return substrings, logprobs
def sort_logprobs(substrings, logprobs, n_top=None):
sorted_indices = np.argsort(logprobs)
top = []
if n_top is None:
n_top = len(sorted_indices)
for i in range(n_top):
top.append({"substring": substrings[sorted_indices[-(i + 1)]], "logprob": logprobs[sorted_indices[-(i + 1)]]})
return top
def top_logprobs(preprompt, content, target, n_top=None, engine="ada", quiet=0):
substrings, logprobs = substring_probs(preprompt, content, target, engine, quiet)
return sort_logprobs(substrings, logprobs, n_top)
def decibels(prior, evidence, target, engine="ada"):
prior_target_logprob = conditional_logprob(prompt=prior, target=target, engine=engine)
evidence_target_logprob = conditional_logprob(prompt=evidence, target=target, engine=engine)
return (evidence_target_logprob - prior_target_logprob), prior_target_logprob, evidence_target_logprob
def parse_stop(stop_string):
return codecs.decode(stop_string, "unicode-escape").split("|")
def parse_logit_bias(logit_string):
biases = codecs.decode(logit_string, "unicode-escape").split("|")
bias_dict = {}
for b in biases:
bias_parts = b.split(":")
token = bias_parts[0]
bias = int(bias_parts[1])
bias_dict[token] = bias
return logit_mask(bias_dict)
| [
"PLACEHOLDERsubstring47ccad2a-01a8-4783-9a48-063dc680c517",
"PLACEHOLDERsubstringe5067d5a-7a3d-480e-b27a-0d7fedd5aaf4PLACEHOLDER",
"PLACEHOLDERsubstring4f6594af-913c-4384-a5b5-e11063c76771"
] |
2024-01-10 | Grace-Hephzibah/OpenChat | dj_backend_server~api~views~views_chat.py | from django.http import JsonResponse
from django.views.decorators.http import require_POST
from langchain import QAWithSourcesChain
from api.utils import get_vector_store
from api.utils.make_chain import getConversationRetrievalChain, getRetrievalQAWithSourcesChain
import json
from django.views.decorators.csrf import csrf_exempt
from api.interfaces import StoreOptions
from web.models.chat_histories import ChatHistory
from django.shortcuts import get_object_or_404
from web.models.chatbot import Chatbot
from uuid import uuid4
import logging
import traceback
from web.services.chat_history_service import get_chat_history_for_retrieval_chain
import os
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
@csrf_exempt
@require_POST
def chat(request):
try:
body = json.loads(request.body.decode('utf-8'))
question = body.get('question')
namespace = body.get('namespace')
mode = body.get('mode')
initial_prompt = body.get('initial_prompt')
token = body.get('token')
session_id = body.get('session_id')
bot = get_object_or_404(Chatbot, token=token)
if not question:
return JsonResponse({'error': 'No question in the request'}, status=400)
sanitized_question = question.strip().replace('\n', ' ')
vector_store = get_vector_store(StoreOptions(namespace=namespace))
response_text = get_completion_response(vector_store=vector_store, initial_prompt=initial_prompt,mode=mode, sanitized_question=sanitized_question, session_id=session_id)
ChatHistory.objects.bulk_create([
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=True,
message=sanitized_question,
session_id=session_id
),
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=False,
message=response_text,
session_id=session_id
)
])
return JsonResponse({'text': response_text})
except json.JSONDecodeError:
return JsonResponse({'error': 'Invalid JSON in request body'}, status=400)
except Chatbot.DoesNotExist:
return JsonResponse({'error': 'Chatbot not found'}, status=404)
except Exception as e:
logger.error(str(e))
logger.error(traceback.format_exc())
return JsonResponse({'error': 'An error occurred'}, status=500)
def get_completion_response(vector_store, mode, initial_prompt, sanitized_question, session_id):
chain_type = os.getenv("CHAIN_TYPE", "conversation_retrieval")
chain: QAWithSourcesChain
if chain_type == 'retrieval_qa':
chain = getRetrievalQAWithSourcesChain(vector_store, mode, initial_prompt)
response = chain({"question": sanitized_question}, return_only_outputs=True)
response_text = response['answer']
elif chain_type == 'conversation_retrieval':
chain = getConversationRetrievalChain(vector_store, mode, initial_prompt)
chat_history = get_chat_history_for_retrieval_chain(session_id, limit=40)
response = chain({"question": sanitized_question, "chat_history": chat_history}, return_only_outputs=True)
response_text = response['answer']
return response_text
| [
"initial_prompt"
] |
2024-01-10 | sil-ai/sil-microsoft-hackathon-2023 | project_1_task~pre-Hackathon~internship_qa_generation.py | import os
import openai
import time
import json
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('OPENAI_API_KEY')
dirname = os.path.dirname(__file__)
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.5,
)
return response.choices[0].message["content"]
def format_data(ref, response, verse):
data = {}
data['book'] = str(ref.split(" ")[0])
data['chapter'] = int(ref.split(" ")[1].split(":")[0])
data['start_verse'] = int(ref.split(" ")[1].split(":")[1])
data['end_verse'] = int(ref.split(" ")[1].split(":")[1])
data['context'] = str(verse)
data['questions'] = json.loads(response)
return data
def get_prompt(verse):
prompt = f'''
Your task is to generate question answer pairs given a Bible verse as context.
These question answer pairs are meant to be simple, imagine that they are for a 2nd grade comprehension quiz.
Here are some examples:
verse: "God saw that the light was good, so God separated the light from the darkness."
response:
[
{{
"question": "What did God separate the light from?",
"answer": "The darkness"
}},
{{
"question": "What did God separate from the darkness?",
"answer": "The light"
}}
]
verse: "God said, "Let the water under the sky be gathered to one place and let dry ground appear." It was so."
response:
[
{{
"question": "What did God want to be gathered to one place?",
"answer": "The water"
}},
{{
"question": "What appeared when the water was gathered to one place?",
"answer": "Dry ground"
}}
]
Do not create any duplicate questions.
You can create anywhere between 1 - 4 questions depending on the amount of content in the verse.
The answers should be nouns and as short as possible.
Make sure you use the exact format as seen above.
verse: "{verse}"
response:
'''
return prompt
def main():
filename = os.path.join(dirname, 'vref.txt')
refs_file = open(filename, encoding="utf-8")
refs = refs_file.readlines()
filename = os.path.join(dirname, 'en-NET.txt')
verses_file = open(filename, encoding="utf-8")
verses = verses_file.readlines()
# Book of Ruth
start_index = 7129
end_index = 7214
data = []
try:
for i, verse in enumerate(verses[start_index:end_index]):
prompt = get_prompt(verse.strip())
response = get_completion(prompt)
formatted = format_data(refs[start_index+i].strip(), response, verse)
data.append(formatted)
print(f"Line {start_index+i} Completed")
time.sleep(20)
except Exception as e:
print(e)
finally:
output_file = open(f'{dirname}/output.json', "w")
json.dump(data, output_file, ensure_ascii=False)
return
main() | [
"\n\t\tYour task is to generate question answer pairs given a Bible verse as context.\n\t\tThese question answer pairs are meant to be simple, imagine that they are for a 2nd grade comprehension quiz.\n\n\t\tHere are some examples:\n\n\t\tverse: \"God saw that the light was good, so God separated the light from the darkness.\"\n\t\tresponse:\n\t\t[\n\t\t\t{\n\t\t\t\t\"question\": \"What did God separate the light from?\",\n\t\t\t\t\"answer\": \"The darkness\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"question\": \"What did God separate from the darkness?\",\n\t\t\t\t\"answer\": \"The light\"\n\t\t\t}\n\t\t]\n\n\t\tverse: \"God said, \"Let the water under the sky be gathered to one place and let dry ground appear.\" It was so.\"\n\t\tresponse: \n\t\t[\n\t\t\t{\n\t\t\t\t\"question\": \"What did God want to be gathered to one place?\",\n\t\t\t\t\"answer\": \"The water\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"question\": \"What appeared when the water was gathered to one place?\",\n\t\t\t\t\"answer\": \"Dry ground\"\n\t\t\t}\n\t\t]\n\n\t\tDo not create any duplicate questions.\n\t\tYou can create anywhere between 1 - 4 questions depending on the amount of content in the verse. \n\t\tThe answers should be nouns and as short as possible.\n\t\tMake sure you use the exact format as seen above.\n\n\t\tverse: \"PLACEHOLDER\"\n\t\tresponse: \n\t"
] |
2024-01-10 | n3d1117/chatgpt-telegram-bot | bot~telegram_bot.py | from __future__ import annotations
import asyncio
import logging
import os
import io
from uuid import uuid4
from telegram import BotCommandScopeAllGroupChats, Update, constants
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultArticle
from telegram import InputTextMessageContent, BotCommand
from telegram.error import RetryAfter, TimedOut, BadRequest
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, CallbackQueryHandler, Application, ContextTypes, CallbackContext
from pydub import AudioSegment
from PIL import Image
from utils import is_group_chat, get_thread_id, message_text, wrap_with_indicator, split_into_chunks, \
edit_message_with_retry, get_stream_cutoff_values, is_allowed, get_remaining_budget, is_admin, is_within_budget, \
get_reply_to_message_id, add_chat_request_to_usage_tracker, error_handler, is_direct_result, handle_direct_result, \
cleanup_intermediate_files
from openai_helper import OpenAIHelper, localized_text
from usage_tracker import UsageTracker
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
bot_language = self.config['bot_language']
self.commands = [
BotCommand(command='help', description=localized_text('help_description', bot_language)),
BotCommand(command='reset', description=localized_text('reset_description', bot_language)),
BotCommand(command='stats', description=localized_text('stats_description', bot_language)),
BotCommand(command='resend', description=localized_text('resend_description', bot_language))
]
# If imaging is enabled, add the "image" command to the list
if self.config.get('enable_image_generation', False):
self.commands.append(BotCommand(command='image', description=localized_text('image_description', bot_language)))
if self.config.get('enable_tts_generation', False):
self.commands.append(BotCommand(command='tts', description=localized_text('tts_description', bot_language)))
self.group_commands = [BotCommand(
command='chat', description=localized_text('chat_description', bot_language)
)] + self.commands
self.disallowed_message = localized_text('disallowed', bot_language)
self.budget_limit_message = localized_text('budget_limit', bot_language)
self.usage = {}
self.last_message = {}
self.inline_queries_cache = {}
async def help(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = self.group_commands if is_group_chat(update) else self.commands
commands_description = [f'/{command.command} - {command.description}' for command in commands]
bot_language = self.config['bot_language']
help_text = (
localized_text('help_text', bot_language)[0] +
'\n\n' +
'\n'.join(commands_description) +
'\n\n' +
localized_text('help_text', bot_language)[1] +
'\n\n' +
localized_text('help_text', bot_language)[2]
)
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
(transcribe_minutes_today, transcribe_seconds_today, transcribe_minutes_month,
transcribe_seconds_month) = self.usage[user_id].get_current_transcription_duration()
vision_today, vision_month = self.usage[user_id].get_current_vision_tokens()
characters_today, characters_month = self.usage[user_id].get_current_tts_usage()
current_cost = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
remaining_budget = get_remaining_budget(self.config, self.usage, update)
bot_language = self.config['bot_language']
text_current_conversation = (
f"*{localized_text('stats_conversation', bot_language)[0]}*:\n"
f"{chat_messages} {localized_text('stats_conversation', bot_language)[1]}\n"
f"{chat_token_length} {localized_text('stats_conversation', bot_language)[2]}\n"
f"----------------------------\n"
)
# Check if image generation is enabled and, if so, generate the image statistics for today
text_today_images = ""
if self.config.get('enable_image_generation', False):
text_today_images = f"{images_today} {localized_text('stats_images', bot_language)}\n"
text_today_vision = ""
if self.config.get('enable_vision', False):
text_today_vision = f"{vision_today} {localized_text('stats_vision', bot_language)}\n"
text_today_tts = ""
if self.config.get('enable_tts_generation', False):
text_today_tts = f"{characters_today} {localized_text('stats_tts', bot_language)}\n"
text_today = (
f"*{localized_text('usage_today', bot_language)}:*\n"
f"{tokens_today} {localized_text('stats_tokens', bot_language)}\n"
f"{text_today_images}" # Include the image statistics for today if applicable
f"{text_today_vision}"
f"{text_today_tts}"
f"{transcribe_minutes_today} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_today} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_today']:.2f}\n"
f"----------------------------\n"
)
text_month_images = ""
if self.config.get('enable_image_generation', False):
text_month_images = f"{images_month} {localized_text('stats_images', bot_language)}\n"
text_month_vision = ""
if self.config.get('enable_vision', False):
text_month_vision = f"{vision_month} {localized_text('stats_vision', bot_language)}\n"
text_month_tts = ""
if self.config.get('enable_tts_generation', False):
text_month_tts = f"{characters_month} {localized_text('stats_tts', bot_language)}\n"
# Check if image generation is enabled and, if so, generate the image statistics for the month
text_month = (
f"*{localized_text('usage_month', bot_language)}:*\n"
f"{tokens_month} {localized_text('stats_tokens', bot_language)}\n"
f"{text_month_images}" # Include the image statistics for the month if applicable
f"{text_month_vision}"
f"{text_month_tts}"
f"{transcribe_minutes_month} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_month} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_month']:.2f}"
)
# text_budget filled with conditional content
text_budget = "\n\n"
budget_period = self.config['budget_period']
if remaining_budget < float('inf'):
text_budget += (
f"{localized_text('stats_budget', bot_language)}"
f"{localized_text(budget_period, bot_language)}: "
f"${remaining_budget:.2f}.\n"
)
# No longer works as of July 21st 2023, as OpenAI has removed the billing API
# add OpenAI account information for admin request
# if is_admin(self.config, user_id):
# text_budget += (
# f"{localized_text('stats_openai', bot_language)}"
# f"{self.openai.get_billing_current_month():.2f}"
# )
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def resend(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resend the last request
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' is not allowed to resend the message')
await self.send_disallowed_message(update, context)
return
chat_id = update.effective_chat.id
if chat_id not in self.last_message:
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' does not have anything to resend')
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('resend_failed', self.config['bot_language'])
)
return
# Update message text, clear self.last_message and send the request to prompt
logging.info(f'Resending the last prompt from user: {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
with update.message._unfrozen() as message:
message.text = self.last_message.pop(chat_id)
await self.prompt(update=update, context=context)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('reset_done', self.config['bot_language'])
)
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not self.config['enable_image_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
image_query = message_text(update.message)
if image_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('image_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New image generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
if self.config['image_receive_mode'] == 'photo':
await update.effective_message.reply_photo(
reply_to_message_id=get_reply_to_message_id(self.config, update),
photo=image_url
)
elif self.config['image_receive_mode'] == 'document':
await update.effective_message.reply_document(
reply_to_message_id=get_reply_to_message_id(self.config, update),
document=image_url
)
else:
raise Exception(f"env variable IMAGE_RECEIVE_MODE has invalid value {self.config['image_receive_mode']}")
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('image_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_PHOTO)
async def tts(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an speech for the given input using TTS APIs
"""
if not self.config['enable_tts_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
tts_query = message_text(update.message)
if tts_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('tts_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New speech generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
speech_file, text_length = await self.openai.generate_speech(text=tts_query)
await update.effective_message.reply_voice(
reply_to_message_id=get_reply_to_message_id(self.config, update),
voice=speech_file
)
speech_file.close()
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('tts_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_VOICE)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not self.config['enable_transcription'] or not await self.check_allowed_and_within_budget(update, context):
return
if is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
filename = update.message.effective_attachment.file_unique_id
async def _execute():
filename_mp3 = f'{filename}.mp3'
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
if os.path.exists(filename):
os.remove(filename)
return
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
try:
transcript = await self.openai.transcribe(filename_mp3)
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# check if transcript starts with any of the prefixes
response_to_transcription = any(transcript.lower().startswith(prefix.lower()) if prefix else False
for prefix in self.config['voice_reply_prompts'])
if self.config['voice_reply_transcript'] and not response_to_transcription:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\""
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = (
f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\"\n\n"
f"_{localized_text('answer', bot_language)}:_\n{response}"
)
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('transcribe_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def vision(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Interpret image using vision model.
"""
if not self.config['enable_vision'] or not await self.check_allowed_and_within_budget(update, context):
return
chat_id = update.effective_chat.id
prompt = update.message.caption
if is_group_chat(update):
if self.config['ignore_group_vision']:
logging.info(f'Vision coming from group chat, ignoring...')
return
else:
trigger_keyword = self.config['group_trigger_keyword']
if (prompt is None and trigger_keyword != '') or \
(prompt is not None and not prompt.lower().startswith(trigger_keyword.lower())):
logging.info(f'Vision coming from group chat with wrong keyword, ignoring...')
return
image = update.message.effective_attachment[-1]
async def _execute():
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(image.file_id)
temp_file = io.BytesIO(await media_file.download_as_bytearray())
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
# convert jpg from telegram to png as understood by openai
temp_file_png = io.BytesIO()
try:
original_image = Image.open(temp_file)
original_image.save(temp_file_png, format='PNG')
logging.info(f'New vision request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.config['stream']:
stream_response = self.openai.interpret_image_stream(chat_id=chat_id, fileobj=temp_file_png, prompt=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if is_direct_result(content):
return await handle_direct_result(self.config, update, content)
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content,
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
try:
interpretation, total_tokens = await self.openai.interpret_image(chat_id, temp_file_png, prompt=prompt)
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=interpretation,
parse_mode=constants.ParseMode.MARKDOWN
)
except BadRequest:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=interpretation
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('vision_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('vision_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
vision_token_price = self.config['vision_token_price']
self.usage[user_id].add_vision_tokens(total_tokens, vision_token_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_vision_tokens(total_tokens, vision_token_price)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if update.edited_message or not update.message or update.message.via_bot:
return
if not await self.check_allowed_and_within_budget(update, context):
return
logging.info(
f'New message received from user {update.message.from_user.name} (id: {update.message.from_user.id})')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = message_text(update.message)
self.last_message[chat_id] = prompt
if is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.lower().startswith(trigger_keyword.lower()) or update.message.text.lower().startswith('/chat'):
if prompt.lower().startswith(trigger_keyword.lower()):
prompt = prompt[len(trigger_keyword):].strip()
if update.message.reply_to_message and \
update.message.reply_to_message.text and \
update.message.reply_to_message.from_user.id != context.bot.id:
prompt = f'"{update.message.reply_to_message.text}" {prompt}'
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
try:
total_tokens = 0
if self.config['stream']:
await update.effective_message.reply_chat_action(
action=constants.ChatAction.TYPING,
message_thread_id=get_thread_id(update)
)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if is_direct_result(content):
return await handle_direct_result(self.config, update, content)
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content,
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _reply():
nonlocal total_tokens
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
if is_direct_result(response):
return await handle_direct_result(self.config, update, response)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = split_into_chunks(response)
for index, chunk in enumerate(chunks):
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk
)
except Exception as exception:
raise exception
await wrap_with_indicator(update, context, _reply, constants.ChatAction.TYPING)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('chat_fail', self.config['bot_language'])} {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if len(query) < 3:
return
if not await self.check_allowed_and_within_budget(update, context, is_inline=True):
return
callback_data_suffix = "gpt:"
result_id = str(uuid4())
self.inline_queries_cache[result_id] = query
callback_data = f'{callback_data_suffix}{result_id}'
await self.send_inline_query_result(update, result_id, message_content=query, callback_data=callback_data)
async def send_inline_query_result(self, update: Update, result_id, message_content, callback_data=""):
"""
Send inline query result
"""
try:
reply_markup = None
bot_language = self.config['bot_language']
if callback_data:
reply_markup = InlineKeyboardMarkup([[
InlineKeyboardButton(text=f'🤖 {localized_text("answer_with_chatgpt", bot_language)}',
callback_data=callback_data)
]])
inline_query_result = InlineQueryResultArticle(
id=result_id,
title=localized_text("ask_chatgpt", bot_language),
input_message_content=InputTextMessageContent(message_content),
description=message_content,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea'
'-b02a7a32149a.png',
reply_markup=reply_markup
)
await update.inline_query.answer([inline_query_result], cache_time=0)
except Exception as e:
logging.error(f'An error occurred while generating the result card for inline query {e}')
async def handle_callback_inline_query(self, update: Update, context: CallbackContext):
"""
Handle the callback query from the inline query result
"""
callback_data = update.callback_query.data
user_id = update.callback_query.from_user.id
inline_message_id = update.callback_query.inline_message_id
name = update.callback_query.from_user.name
callback_data_suffix = "gpt:"
query = ""
bot_language = self.config['bot_language']
answer_tr = localized_text("answer", bot_language)
loading_tr = localized_text("loading", bot_language)
try:
if callback_data.startswith(callback_data_suffix):
unique_id = callback_data.split(':')[1]
total_tokens = 0
# Retrieve the prompt from the cache
query = self.inline_queries_cache.get(unique_id)
if query:
self.inline_queries_cache.pop(unique_id)
else:
error_message = (
f'{localized_text("error", bot_language)}. '
f'{localized_text("try_again", bot_language)}'
)
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{error_message}',
is_inline=True)
return
unavailable_message = localized_text("function_unavailable_in_inline_mode", bot_language)
if self.config['stream']:
stream_response = self.openai.get_chat_response_stream(chat_id=user_id, query=query)
i = 0
prev = ''
backoff = 0
async for content, tokens in stream_response:
if is_direct_result(content):
cleanup_intermediate_files(content)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
if len(content.strip()) == 0:
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n{answer_tr}:\n{content}',
is_inline=True)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
divider = '_' if use_markdown else ''
text = f'{query}\n\n{divider}{answer_tr}:{divider}\n{content}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text = text[:4096]
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text, markdown=use_markdown, is_inline=True)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _send_inline_query_response():
nonlocal total_tokens
# Edit the current message to indicate that the answer is being processed
await context.bot.edit_message_text(inline_message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{loading_tr}',
parse_mode=constants.ParseMode.MARKDOWN)
logging.info(f'Generating response for inline query by {name}')
response, total_tokens = await self.openai.get_chat_response(chat_id=user_id, query=query)
if is_direct_result(response):
cleanup_intermediate_files(response)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
text_content = f'{query}\n\n_{answer_tr}:_\n{response}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text_content = text_content[:4096]
# Edit the original message with the generated content
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text_content, is_inline=True)
await wrap_with_indicator(update, context, _send_inline_query_response,
constants.ChatAction.TYPING, is_inline=True)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.error(f'Failed to respond to an inline query via button callback: {e}')
logging.exception(e)
localized_answer = localized_text('chat_fail', self.config['bot_language'])
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f"{query}\n\n_{answer_tr}:_\n{localized_answer} {str(e)}",
is_inline=True)
async def check_allowed_and_within_budget(self, update: Update, context: ContextTypes.DEFAULT_TYPE,
is_inline=False) -> bool:
"""
Checks if the user is allowed to use the bot and if they are within their budget
:param update: Telegram update object
:param context: Telegram context object
:param is_inline: Boolean flag for inline queries
:return: Boolean indicating if the user is allowed to use the bot
"""
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
if not await is_allowed(self.config, update, context, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) is not allowed to use the bot')
await self.send_disallowed_message(update, context, is_inline)
return False
if not is_within_budget(self.config, self.usage, update, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) reached their usage limit')
await self.send_budget_reached_message(update, context, is_inline)
return False
return True
async def send_disallowed_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the disallowed message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.disallowed_message,
disable_web_page_preview=True
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.disallowed_message)
async def send_budget_reached_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the budget reached message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.budget_limit_message
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.budget_limit_message)
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.group_commands, scope=BotCommandScopeAllGroupChats())
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('tts', self.tts))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(CommandHandler('resend', self.resend))
application.add_handler(CommandHandler(
'chat', self.prompt, filters=filters.ChatType.GROUP | filters.ChatType.SUPERGROUP)
)
application.add_handler(MessageHandler(
filters.PHOTO | filters.Document.IMAGE,
self.vision))
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP, constants.ChatType.PRIVATE
]))
application.add_handler(CallbackQueryHandler(self.handle_callback_inline_query))
application.add_error_handler(error_handler)
application.run_polling()
| [] |
2024-01-10 | christianmk04/urp-interface | chatpdf.py | # IMPORT FLASK APP DEPENDENCIES
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
from os import environ
from distutils.log import debug
from fileinput import filename
app = Flask(__name__)
CORS(app)
##################################################################################################################################################################################################
##################################################################################################################################################################################################
##################################################################################################################################################################################################
'''
FUNCTIONS HERE ARE TO BE USED FOR RENDERING OF THE DIFFERENT HTML PAGES OF THE INTERFACE
'''
# RENDER KEYWORD AND TEXT INPUT PAGE
@app.route('/')
def main_text():
return render_template('interface.html')
# RENDER UPLOAD PDF PAGE
@app.route('/upload')
def main_upload():
return render_template('interface_upload.html')
# RENDER RETRIEVAL PAGE FOR SELECTION
@app.route('/retrieval')
def main_retrieval_page():
return render_template('interface_retrieval.html')
# RENDER PAGE TO EDIT CS AND RELATED QA
@app.route('/retrieval_csqa')
def main_retrieval_csqa_page():
return render_template('retrieval_csqa.html')
##################################################################################################################################################################################################
##################################################################################################################################################################################################
##################################################################################################################################################################################################
# IMPORT LANGCHAIN DEPENDENCIES
from PyPDF2 import PdfReader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
import tiktoken
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
import openai
import requests
import os
##################################################################################################################################################################################################
##################################################################################################################################################################################################
##################################################################################################################################################################################################
'''
FUNCTIONS HERE ARE TO BE USED FOR GENERATION OF RESOURCES USING PDF FILES
FUNCTIONS IN THIS SECTION INCLUDE:
REUSABLE FUNCTION - PDF_READ
- USED TO READ THE PDF FILE AND GENERATE THE KNOWLEDGE BASE AND CHAIN FOR GENERATION OF CASE STUDY
REUSABLE FUNCTION - UPLOAD_FILE_SKELETON
- USED TO UPLOAD THE PDF FILE TO THE DB
REUSABLE FUNCTION - UPLOAD_CS_SKELETON
- USED TO UPLOAD THE GENERATED CASE STUDY TO THE DB
FUNCTION - UPLOAD_CS
- GENERATES CASE STUDY AND UPLOADS IT TO THE DB
FUNCTION - UPLOAD_QA
- GENERATES INDEPENDENT QA AND UPLOADS IT TO THE DB
FUNCTION - UPLOAD_CSQA
- GENERATES CASE STUDY AND RELATED QA AND UPLOADS IT TO THE DB
'''
# FUNCTION TO DO THE PDF READING - REUSABLE FUNCTION
def pdf_read(uploaded_file):
reader = PdfReader(uploaded_file)
raw_text = ""
for i, page in enumerate(reader.pages):
text = page.extract_text()
if text:
raw_text += text
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len,
)
texts = text_splitter.split_text(raw_text)
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(texts, embeddings)
chain = load_qa_chain(ChatOpenAI(), chain_type="stuff")
return [chain, knowledge_base]
# FUNCTION TO UPLOAD PDF FILE TO DB - REUSABLE FUNCTION
def upload_file_skeleton(file_to_upload, file_name):
mongo_upload_endpoint = "http://localhost:5001/upload_pdf" + "/" + file_to_upload.filename
try:
response = requests.post(mongo_upload_endpoint, file_to_upload)
file_id = response.text
print("Successfully uploaded file to DB")
except Exception as e:
print("Error")
print(e)
return file_id
# FUNCTION TO UPLOAD GENERATED CASE STUDY TO DB
def upload_cs_skeleton(file_id, case_study_output, topics):
mongo_upload_endpoint = "http://localhost:5001/upload_cs_for_pdf" + "/" + file_id
topic_split = topics.split("\n")
main_topic = topic_split[0].split(":")[1].strip()
sub_topics = topic_split[1].split(":")[1].strip()
case_study_output_json = {
"main_topic": main_topic,
"sub_topic": sub_topics,
"case_study": case_study_output
}
try:
response = requests.post(mongo_upload_endpoint, json=case_study_output_json)
print("Successfully uploaded case study to DB")
cs_id = response.text
except Exception as e:
print("Error")
print(e)
return cs_id
# UPLOAD PDF FILE TO DB AND GENERATE + UPLOAD CASE STUDY TO DB
@app.route('/upload_file_cs', methods=['POST'])
def upload_cs():
print('-----------------Uploading file------------------------')
# ERROR HANDLING - TO MAKE SURE THAT A FILE HAS BEEN UPLOADED BY THE USER AND A VALID API KEY IS ENTERED
user_api_key = request.form['user_api_key']
if user_api_key == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please enter a valid API key!")
uploaded_file = request.files['file']
if uploaded_file.filename == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please upload a PDF file!")
# SET API KEY FOR GENERATION OF RESOURCE
os.environ["OPENAI_API_KEY"] = user_api_key
# UPLOAD FILE TO DB
file_id = upload_file_skeleton(uploaded_file, uploaded_file.filename)
# GENERATE CASE STUDY
chain = pdf_read(uploaded_file)[0]
knowledge_base = pdf_read(uploaded_file)[1]
cs_query = "Based on the contents in this file, can you create a fictional case study for me about a fictional company? The case study should revolve around Agile and DevOps, and should reference as much of the contents of in the file. The case study should follow this structure: 1. Introduction of Company and Background 2. Current Practices 2. Problems faced due to current practices 3. The need to implement new practices and what they are 4. Results 5. Conclusion. \n\n Make the case study in such a way where the individual sections are not numbered and that the whole case study flows seamlessly \n\n Skip the pleasantries of acknowledging the user and start generating the case study immediately (Meaning, do not start with 'Sure, here's a case study for...' or 'Here's a case study for...')."
cs_docs = knowledge_base.similarity_search(cs_query)
cs_output = chain.run(input_documents=cs_docs,question=cs_query)
topic_query = f'Based on the contents in this file, can you identify the main topic of the file contents? The main topic should be a single word, and should be strictly either Agile or DevOps. Identify also, only 5 subtopics that are related to the main topic. The subtopics should be single words as well. \n\n Skip the pleasantries of acknowledging the user and start generating the topic immediately (Meaning, do not start with "Sure, here\'s the topic for..." or "Here\'s the topic for..."). Some examples of sub-topics include automation, continuous integration, continuous delivery, etc. Generate your response as follows in the example delimited by the double apostrophes: \n\n """ \n Main Topic: DevOps \n Sub-Topics: Automation, Continuous Integration, Continuous Delivery, Continuous Deployment, Continuous Testing"""'
topic_docs = knowledge_base.similarity_search(topic_query)
topic_output = chain.run(input_documents=topic_docs,question=topic_query)
# UPLOAD CASE STUDY TO DB
upload_cs_skeleton(file_id, cs_output, topic_output)
return render_template('interface_post_upload_cs.html', cs_output=cs_output)
# UPLOAD PDF FILE TO DB AND GENERATE + UPLOAD QUESTIONS & ANSWERS TO DB
@app.route('/upload_file_qa', methods=['POST'])
def upload_qa():
print('-----------------Uploading file------------------------')
user_api_key = request.form['user_api_key']
if user_api_key == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please enter a valid API key!")
uploaded_file = request.files['file']
if uploaded_file.filename == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please upload a PDF file!")
# SET API KEY FOR GENERATION OF RESOURCE
os.environ["OPENAI_API_KEY"] = user_api_key
# UPLOAD FILE TO DB
file_id = upload_file_skeleton(uploaded_file, uploaded_file.filename)
# GENERATE QUESTIONS AND ANSWERS
chain = pdf_read(uploaded_file)[0]
knowledge_base = pdf_read(uploaded_file)[1]
ques_ind_query = "Based on the contents of the file, can you write me 10 questions that relate to DevOps and Agile? Have the questions reference as much of the content inside the file as possible, whilst adhering to the theme of DevOps and Agile. \n\n Write the questions in the following format: \n1. Question 1\n2. Question 2\n3. Question 3 \n\n and so on. \n\n Skip the pleasantries of acknowledging the user and start generating the questions immediately (Meaning, do not start with 'Sure, here's a questions for...')."
q_docs = knowledge_base.similarity_search(ques_ind_query)
q_output = chain.run(input_documents=q_docs,question=ques_ind_query)
ans_ind_query = f'Please provide the answers to the following questions. \n\n {q_output} \n\n Skip the pleasantries of acknowledging the user and start generating the answers immediately (Meaning, do not start with "Sure, here\'s the answers for...").'
a_docs = knowledge_base.similarity_search(ans_ind_query)
a_output = chain.run(input_documents=a_docs,question=ans_ind_query)
topic_query = f'Based on the contents of the file, can you identify the main topic of the questions and answers? The main topic should be a single word, and should be strictly either Agile or DevOps. Identify also, only 5 subtopics that are related to the main topic. The subtopics should be single words as well. \n\n Skip the pleasantries of acknowledging the user and start generating the topic immediately (Meaning, do not start with "Sure, here\'s the topic for..." or "Here\'s the topic for..."). Some examples of sub-topics include automation, continuous integration, continuous delivery, etc. Generate your response as follows in the example delimited by the double apostrophes: \n\n """ \n Main Topic: DevOps \n Sub-Topics: Automation, Continuous Integration, Continuous Delivery, Continuous Deployment, Continuous Testing"""'
topic_docs = knowledge_base.similarity_search(topic_query)
topic_output = chain.run(input_documents=topic_docs,question=topic_query)
topic_split = topic_output.split("\n")
main_topic = topic_split[0].split(":")[1].strip()
sub_topics = topic_split[1].split(":")[1].strip()
# UPLOAD QUESTIONS AND ANSWERS TO DB
mongo_upload_endpoint = "http://localhost:5001/upload_qa_for_pdf" + "/" + file_id
qa_json = {
"questions": q_output,
"answers": a_output,
"main_topic": main_topic,
"sub_topic": sub_topics
}
try:
response = requests.post(mongo_upload_endpoint, json=qa_json)
print(response.text)
except Exception as e:
print("Error")
print(e)
return render_template('interface_post_upload_qa.html', q_output=q_output, a_output=a_output)
# UPLOAD PDF FILE TO DB AND GENERATE + UPLOAD CASE STUDY AND QUESTIONS & ANSWERS TO DB
@app.route('/upload_file_csqa', methods=['POST'])
def upload_csqa():
print('-----------------Uploading file------------------------')
user_api_key = request.form['user_api_key']
if user_api_key == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please enter a valid API key!")
uploaded_file = request.files['file']
if uploaded_file.filename == '':
return render_template('interface_upload_error.html', error_message="Unable to proceed. Please upload a PDF file!")
# SET API KEY FOR GENERATION OF RESOURCE
os.environ["OPENAI_API_KEY"] = user_api_key
# UPLOAD FILE TO DB
file_id = upload_file_skeleton(uploaded_file, uploaded_file.filename)
# GENERATE CASE STUDY
chain = pdf_read(uploaded_file)[0]
knowledge_base = pdf_read(uploaded_file)[1]
cs_query = "Based on the contents in this file, can you create a fictional case study for me about a fictional company? The case study should revolve around Agile and DevOps, and should reference as much of the contents of in the file. The case study should follow this structure: 1. Introduction of Company and Background 2. Current Practices 2. Problems faced due to current practices 3. The need to implement new practices and what they are 4. Results 5. Conclusion. \n\n Make the case study in such a way where the individual sections are not numbered and that the whole case study flows seamlessly \n\n Skip the pleasantries of acknowledging the user and start generating the case study immediately (Meaning, do not start with 'Sure, here's a case study for...' or 'Here's a case study for...')."
cs_docs = knowledge_base.similarity_search(cs_query)
cs_output = chain.run(input_documents=cs_docs,question=cs_query)
topic_query = f'Based on the contents of the file, can you identify the main topic of the questions and answers? The main topic should be a single word, and should be strictly either Agile or DevOps. Identify also, only 5 subtopics that are related to the main topic. The subtopics should be single words as well. \n\n Skip the pleasantries of acknowledging the user and start generating the topic immediately (Meaning, do not start with "Sure, here\'s the topic for..." or "Here\'s the topic for..."). Some examples of sub-topics include automation, continuous integration, continuous delivery, etc. Generate your response as follows in the example delimited by the double apostrophes: \n\n """ \n Main Topic: DevOps \n Sub-Topics: Automation, Continuous Integration, Continuous Delivery, Continuous Deployment, Continuous Testing"""'
topic_docs = knowledge_base.similarity_search(topic_query)
topic_output = chain.run(input_documents=topic_docs,question=topic_query)
# UPLOAD CASE STUDY TO DB
cs_id = upload_cs_skeleton(file_id, cs_output, topic_output)
# GENERATE QUESTIONS AND ANSWERS
ques_cs_query = f'Based on the case study below, can you create 10 questions about the case study? Phrase them in a way where it will require more critical thinking. \n\n Case Study: {cs_output} \n\n Skip the pleasantries of acknowledging the user and start generating the questions immediately (Meaning, do not start with \'Sure, here\'s a questions for...\')'
q_docs = knowledge_base.similarity_search(ques_cs_query)
q_output = chain.run(input_documents=q_docs,question=ques_cs_query)
a_query = f'Based on the case study and the questions below, could you provide the answers to the questions? \n\n Case Study: {cs_output} \n\n Questions: {q_output} \n\n Skip the pleasantries of acknowledging the user and start generating the answers immediately. (Meaning, do not start with "Sure, here\'s the answers for...").'
a_docs = knowledge_base.similarity_search(a_query)
a_output = chain.run(input_documents=a_docs,question=a_query)
topic_split = topic_output.split("\n")
main_topic = topic_split[0].split(":")[1].strip()
sub_topics = topic_split[1].split(":")[1].strip()
# UPLOAD RELATED QUESTIONS AND ANSWERS TO DB
mongo_upload_endpoint = "http://localhost:5001/upload_csqa_for_pdf" + "/" + cs_id
qa_json = {
"questions": q_output,
"answers": a_output,
"main_topic": main_topic,
"sub_topic": sub_topics
}
try:
response = requests.post(mongo_upload_endpoint, json=qa_json)
print(response.text)
except Exception as e:
print("Error")
print(e)
return render_template('interface_post_upload_csqa.html', cs_output=cs_output, q_output=q_output, a_output=a_output)
##################################################################################################################################################################################################
##################################################################################################################################################################################################
##################################################################################################################################################################################################
'''
FUNCTIONS HERE ARE TO BE USED FOR API CALLS FROM OTHER SOURCES SUCH AS POSTMAN OR OTHER INTERFACES
FUNCTIONS IN THIS SECTION INCLUDE:
- CASE STUDY
- GENERATE CASE STUDY (api_get_cs)
- INDEPENDENT QUESTIONS AND ANSWERS
- GENERATE QUESTIONS AND ANSWERS (api_get_qa)
- CASE STUDY QUESTIONS AND ANSWERS
- GENERATE CASE STUDY + RELATED QUESTIONS AND ANSWERS (api_get_csqa)
'''
# API ROUTES FOR OTHER APPLICATIONS TO CALL AND USE
# API ROUTE TO GENERATE CASE STUDY
@app.route('/api_get_cs/<string:api_key>/<string:main_topic>/<string:sub_topic>', methods=['GET'])
def api_get_cs(api_key, main_topic, sub_topic):
# SET UP MONGO RETRIEVAL FROM MONGO MICROSERVICE
mongo_retrieve_endpoint = "http://localhost:5001/get_case_study/manual/" + main_topic + "/" + sub_topic
try:
response = requests.get(mongo_retrieve_endpoint)
except Exception as e:
print("Error")
print(e)
# GET DATA FROM MONGO MICROSERVICE RESPONSE
json_data = response.json()
data = json_data["data"][0]
ref_case_study = data["content"]
# SET API KEY - CHECK IF API KEY IS VALID OR ENTERED
openai.api_key = api_key
if api_key == '':
return jsonify({"error": "Unable to proceed. Please enter in API key!"})
# GENERATE CHAT COMPLETION
try:
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": "You are an instructor teaching an Agile and DevOps course, your job is to provide questions and answers for students for the purpose of assessing students purposes. You are currently chatting with a Professor of the course, who is asking you for questions and answers about Agile and DevOps."},
{"role": "user", "content": f"Can you provide me with a sample case study about {main_topic} that focuses on {sub_topic}? Skip the pleasantries of acknowledging the user and start generating the case study immediately. (Meaning, do not start with 'Sure, here's a case study for...')."},
{"role": "assistant", "content": f"{ref_case_study}"},
{"role": "user", "content": f"Please provide me with another case study about {main_topic} that focuses on {sub_topic} following the same format as what you have just generated. Skip the pleasantries of acknowledging the user and start generating the case study immediately as before. (Meaning, do not start with 'Sure, here's a case study for...')"},
],
temperature = 1.1,
max_tokens = 2048,
)
except Exception as e:
return jsonify({"error": e})
generated_case_study = completion.choices[0].message.content
# UPLOAD CASE STUDY TO DB
mongo_upload_endpoint = "http://localhost:5001/upload_cs"
cs_json = {
"content": generated_case_study,
"main_topic": main_topic,
"sub_topic": sub_topic,
"mode": "api_call"
}
try:
response = requests.post(mongo_upload_endpoint, json=cs_json)
print(response.text)
except Exception as e:
print("Error")
print(e)
return jsonify(
{
"case study": generated_case_study,
"message" : f"Case study generated for {main_topic} focusing on {sub_topic}. Case study uploaded to database."
}
)
# API TO GENERATE QUESTIONS AND ANSWERS
@app.route('/api_get_qa/<string:api_key>/<string:sub_topic>', methods=['GET'])
def api_get_qa(api_key, sub_topic):
sub_topics = ["Automation", "Software Design", "Version Control", "Software Lifecycle", "Agile Methodologies", "Software Security"]
# SET UP MONGO RETRIEVAL FROM MONGO MICROSERVICE
mongo_retrieve_endpoint = "http://localhost:5001/get_ind_questions" + "/manual" + "/" + sub_topic
try:
response = requests.get(mongo_retrieve_endpoint)
except Exception as e:
print("Error")
print(e)
# FORMAT QUESTIONS AND ANSWERS INTO STRING TO BE PUT INTO THE CHAT COMPLETION MESSAGE
questions_string = ""
answers_string = ""
# GET DATA FROM MONGO MICROSERVICE RESPONSE
# DATA RETRIEVED IS THE REFERENCE QUESTIONS AND ANSWERS
json_data = response.json()
data = json_data["data"]
# FORMAT QUESTIONS AND ANSWERS
for i in range(len(data)):
questions_string += f'{i+1}. ' + data[i]["question"] + "\n"
answers_string += f'{i+1}. ' + data[i]["answer"] + "\n"
print(questions_string)
print(answers_string)
# SET API KEY - CHECK IF API KEY IS VALID OR ENTERED
openai.api_key = api_key
if api_key == '':
return jsonify({"error": "Unable to proceed. Please enter in API key!"})
# GENERATE CHAT COMPLETION
try:
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": "You are an instructor teaching an Agile and DevOps course, your job is to provide questions and answers for students for the purpose of assessing students purposes. You are currently chatting with a Professor of the course, who is asking you for questions and answers about Agile and DevOps. "},
{"role": "user", "content": f"Can you provide me with sample questions and answers about {sub_topic} under Agile/DevOps? Provide the questions and answers in a way where it will require more critical thinking. Format your response in this way:\n\n 'Questions: \n1.\n2.\n3. \n\n Answers: \n1.\n2.\n3.' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a questions and answers for...')"},
{"role": "assistant", "content": f"Questions:\n{questions_string}\nAnswers:\n{answers_string}"},
{"role": "user", "content": "Please provide me with 10 more questions and answers following the same format as what you have just generated. Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a questions and answers for...')"},
],
temperature = 1.1,
max_tokens = 2048,
)
except Exception as e:
return jsonify({"error": e})
answers_unformatted = completion.choices[0].message.content.split("Answers:")[1]
questions_unformatted = completion.choices[0].message.content.split("Answers:")[0].split("Questions:")[1]
mongo_upload_endpoint = "http://localhost:5001/upload_ind_qa"
qa_json = {
"mode": "api_call",
"sub_topic": sub_topic,
"questions": questions_unformatted,
"answers": answers_unformatted
}
try:
response = requests.post(mongo_upload_endpoint, json=qa_json)
print(response)
except Exception as e:
print("Error")
print(e)
questions_formatted_arr = []
answers_formatted_arr = []
questions_split_arr = questions_unformatted.split("\n")
answers_split_arr = answers_unformatted.split("\n")
for i in range(len(questions_split_arr)):
if questions_split_arr[i] != '':
questions_formatted_arr.append(questions_split_arr[i])
for i in range(len(answers_split_arr)):
if answers_split_arr[i] != '':
answers_formatted_arr.append(answers_split_arr[i])
return jsonify(
{
"questions" : questions_formatted_arr,
"answers" : answers_formatted_arr,
"message" : f"Questions and answers generated for {sub_topic}. Uploaded generated questions and answers to the database."
}
)
# API ENDPOINT TO GENERATE CASE STUDY, QUESTIONS AND ANSWERS
@app.route('/api_get_csqa/<string:api_key>/<string:main_topic>/<string:sub_topic>', methods=['GET'])
def api_get_csqa(api_key, main_topic, sub_topic):
# CHECK IF SUB_TOPIC IS IN THE LIST OF SUB_TOPICS
sub_topics = ["Automation", "Software Design", "Version Control", "Software Lifecycle", "Agile Methodologies", "Software Security"]
if sub_topic not in sub_topics:
# SET UP MONGO RETRIEVAL FROM MONGO MICROSERVICE
mongo_retrieve_endpoint = "http://localhost:5001/get_csqa/manual/" + main_topic + "/" + sub_topic
else:
# SET UP MONGO RETRIEVAL FROM MONGO MICROSERVICE
mongo_retrieve_endpoint = "http://localhost:5001/get_csqa/automatic/" + main_topic + "/" + sub_topic
try:
response = requests.get(mongo_retrieve_endpoint)
data = response.json()
except Exception as e:
print("Error")
print(e)
case_study = data["case_study"]
questions = data["questions"]
answers = data["answers"]
# FORMAT QUESTIONS AND ANSWERS INTO STRINGS
questions_string = ""
answers_string = ""
for i in range(len(questions)):
questions_string += f'{i+1}. ' + questions[i] + "\n"
answers_string += f'{i+1}. ' + answers[i] + "\n"
# SET API KEY - CHECK IF API KEY IS VALID OR ENTERED
openai.api_key = api_key
if api_key == '':
return jsonify({"error": "Unable to proceed. Please enter in API key!"})
# GENERATE CHAT COMPLETION
try:
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": "You are an instructor teaching an Agile and DevOps course, your job is to provide questions and answers for students for the purpose of assessing students purposes. You are currently chatting with a Professor of the course, who is asking you for questions and answers about Agile and DevOps. "},
# REFERENCE PROMPT ENGINEERING FOR CASE STUDY
{"role": "user", "content": f"Can you provide me with a sample case study about {main_topic} that focuses on {sub_topic}? Skip the pleasantries of acknowledging the user and start generating the case study immediately. (Meaning, do not start with 'Sure, here's a case study for...')."},
{"role": "assistant", "content": f"{case_study}"},
# REFERENCE PROMPT ENGINEERING FOR QUESTIONS AND ANSWERS
{"role": "user", "content": f"Can you provide me with sample questions and answers about the case study above? Where the questions are about {main_topic}, focusing on {sub_topic}? Provide the questions and answers in a way where it will require more critical thinking. Format your response in this way:\n\n 'Questions: \n1.\n2.\n3. \n\n Answers: \n1.\n2.\n3.' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a case study/questions and answers for...')"},
{"role": "assistant", "content": f"Questions:\n{questions_string}\nAnswers:\n{answers_string}"},
{"role": "user", "content": f"Please provide me with another case study, and 10 sample questions and sample answers for the case study above. Have the case study, questions and answers be about {main_topic} which focuses on {sub_topic}. Follow the same format as what you have just generated, such as denoted in the triple apostrophe delimiters: \n\n ''' Case Study:\n (Generated Case Study)\n\nQuestions: \n1.\n2.\n3.\n\n Answers:\n1.\n2.\n3.\n\n ''' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a case study/questions and answers for...')"},
],
temperature = 1.1,
max_tokens = 2048,
)
except Exception as e:
return jsonify({"error": e})
# FORMAT CASE STUDY, QUESTIONS AND ANSWERS INTO STRINGS
content = completion.choices[0].message.content
# GET QUESTIONS AND ANSWERS FIRST
questions_unformatted = content.split("Answers:")[0].split("Questions:")[1]
answers_unformatted = content.split("Answers:")[1]
questions_formatted_arr = []
answers_formatted_arr = []
questions_split_arr = questions_unformatted.split("\n")
answers_split_arr = answers_unformatted.split("\n")
for i in range(len(questions_split_arr)):
if questions_split_arr[i] != '' and questions_split_arr[i] != ' ':
questions_formatted_arr.append(questions_split_arr[i])
for i in range(len(answers_split_arr)):
if answers_split_arr[i] != '' and answers_split_arr[i] != ' ':
answers_formatted_arr.append(answers_split_arr[i])
# GET CASE STUDY
generated_case_study = content.split("Answers:")[0].split("Questions:")[0].split("Case Study:")[1]
# SET UP MONGO UPLOAD CS TO MONGO MICROSERVICE
mongo_upload_cs_endpoint = "http://localhost:5001/upload_cs"
new_cs = {
"main_topic" : main_topic,
"sub_topic" : sub_topic,
"content" : generated_case_study,
"mode": "api_call"
}
try:
response = requests.post(mongo_upload_cs_endpoint, json=new_cs)
print(response)
except Exception as e:
print("Error")
print(e)
# SET UP MONGO UPLOAD RELATED QA TO MONGO MICROSERVICE
mongo_upload_qa_endpoint = "http://localhost:5001/upload_qa_for_cs"
new_qa_data = {
"main_topic" : main_topic,
"sub_topic" : sub_topic,
"mode": "api_call",
"content": generated_case_study,
"questions": questions_unformatted,
"answers": answers_unformatted,
}
try:
response = requests.post(mongo_upload_qa_endpoint, json=new_qa_data)
print(response)
except Exception as e:
print("Error")
print(e)
return jsonify(
{
"case_study" : generated_case_study,
"questions" : questions_formatted_arr,
"answers" : answers_formatted_arr,
"message" : f"Case study, questions and answers generated for {main_topic} focusing on {sub_topic}. Uploaded all to the database.",
}
)
# FLASK APP ROUTE
if __name__ == '__main__':
app.run(port=5000, debug=True) | [
"Please provide me with 10 more questions and answers following the same format as what you have just generated. Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a questions and answers for...')",
"Can you provide me with sample questions and answers about PLACEHOLDER under Agile/DevOps? Provide the questions and answers in a way where it will require more critical thinking. Format your response in this way:\n\n 'Questions: \n1.\n2.\n3. \n\n Answers: \n1.\n2.\n3.' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a questions and answers for...')",
"PLACEHOLDER",
"Please provide me with another case study about PLACEHOLDER that focuses on PLACEHOLDER following the same format as what you have just generated. Skip the pleasantries of acknowledging the user and start generating the case study immediately as before. (Meaning, do not start with 'Sure, here's a case study for...')",
"You are an instructor teaching an Agile and DevOps course, your job is to provide questions and answers for students for the purpose of assessing students purposes. You are currently chatting with a Professor of the course, who is asking you for questions and answers about Agile and DevOps.",
"You are an instructor teaching an Agile and DevOps course, your job is to provide questions and answers for students for the purpose of assessing students purposes. You are currently chatting with a Professor of the course, who is asking you for questions and answers about Agile and DevOps. ",
"Can you provide me with a sample case study about PLACEHOLDER that focuses on PLACEHOLDER? Skip the pleasantries of acknowledging the user and start generating the case study immediately. (Meaning, do not start with 'Sure, here's a case study for...').",
"Please provide me with another case study, and 10 sample questions and sample answers for the case study above. Have the case study, questions and answers be about PLACEHOLDER which focuses on PLACEHOLDER. Follow the same format as what you have just generated, such as denoted in the triple apostrophe delimiters: \n\n ''' Case Study:\n (Generated Case Study)\n\nQuestions: \n1.\n2.\n3.\n\n Answers:\n1.\n2.\n3.\n\n ''' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a case study/questions and answers for...')",
"Can you provide me with sample questions and answers about the case study above? Where the questions are about PLACEHOLDER, focusing on PLACEHOLDER? Provide the questions and answers in a way where it will require more critical thinking. Format your response in this way:\n\n 'Questions: \n1.\n2.\n3. \n\n Answers: \n1.\n2.\n3.' \n\n Skip the pleasantries of acknowledging the user and start generating the questions and answers immediately. (Meaning, do not start with 'Sure, here's a case study/questions and answers for...')",
"Questions:\nPLACEHOLDER\nAnswers:\nPLACEHOLDER"
] |
2024-01-10 | mindspore-lab/mindcv | examples~open_clip~src~open_clip~factory.py | import json
import logging
import os
import re
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from mindspore import load_checkpoint, load_param_into_net
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .loss import ClipLoss, DistillClipLoss
from .model import (
CLIP,
CustomTextCLIP,
convert_to_custom_text_param_dict,
convert_weights_to_lp,
resize_pos_embed,
resize_text_pos_embed,
)
from .openai import load_openai_model
from .pretrained import download_pretrained, get_pretrained_cfg, list_pretrained_tags_by_model
from .tokenizer import block_mask_tokenize, random_mask_tokenize, syntax_mask_tokenize, tokenize
from .transform import AugmentationCfg, image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / "model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "vision_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
"""enumerate available model architectures based on config files"""
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
config = get_model_config(model_name)
if "text_mask" in config["text_cfg"] and config["text_cfg"]["text_mask"] == "syntax":
tokenizer = syntax_mask_tokenize
elif "text_mask" in config["text_cfg"] and config["text_cfg"]["text_mask"] == "random":
tokenizer = random_mask_tokenize
elif "text_mask" in config["text_cfg"] and config["text_cfg"]["text_mask"] == "block":
tokenizer = block_mask_tokenize
else:
tokenizer = tokenize
if "context_length" in config["text_cfg"].keys():
context_length = config["text_cfg"]["context_length"]
tokenizer = partial(tokenizer, context_length=context_length)
return tokenizer
def load_ckpt(model, checkpoint_path, strict=False):
param_dict = load_checkpoint(checkpoint_path)
# detect old format and make compatible with new format
if "positional_embedding" in param_dict and not hasattr(model, "positional_embedding"):
param_dict = convert_to_custom_text_param_dict(param_dict)
position_id_key = "text.transformer.embeddings.position_ids"
if position_id_key in param_dict and not hasattr(model, position_id_key):
del param_dict[position_id_key]
resize_pos_embed(param_dict, model)
resize_text_pos_embed(param_dict, model)
incompatible_keys = load_param_into_net(model, param_dict, strict_load=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
cache_dir: Optional[str] = None,
require_pretrained: bool = False,
**model_kwargs,
):
model_name = model_name.replace("/", "-") # for callers using old naming with / in ViT names
pretrained_cfg = {}
model_cfg = None
if pretrained and pretrained.lower() == "openai":
logging.info(f"Loading pretrained {model_name} from OpenAI.")
model = load_openai_model(
model_name,
cache_dir=cache_dir,
)
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f"Loaded {model_name} model config.")
else:
logging.error(f"Model config for {model_name} not found; available models {list_models()}.")
raise RuntimeError(f"Model config for {model_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
custom_text = model_cfg.pop("custom_text", False) or force_custom_text
if custom_text:
if "coca" in model_name:
raise ImportError("COCA model have not been supported yet.")
else:
model = CustomTextCLIP(**model_cfg, **model_kwargs)
else:
model = CLIP(**model_cfg, **model_kwargs)
convert_weights_to_lp(model)
pretrained_loaded = False
if pretrained:
checkpoint_path = ""
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f"Loading pretrained {model_name} weights ({pretrained}).")
load_ckpt(model, checkpoint_path)
else:
error_str = (
f"Pretrained weights ({pretrained}) not found for model {model_name}."
f"Available pretrained tags ({list_pretrained_tags_by_model(model_name)}."
)
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f"Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded."
)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get("mean", None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get("std", None) or OPENAI_DATASET_STD
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
**model_kwargs,
):
model = create_model(
model_name,
pretrained,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
cache_dir=cache_dir,
**model_kwargs,
)
image_mean = image_mean or getattr(model.visual, "image_mean", None)
image_std = image_std or getattr(model.visual, "image_std", None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
| [] |
2024-01-10 | AnshKetchum/riptide | leadgen~db~UsersDatabase.py | import os
import uuid
import json
from langchain.vectorstores.faiss import FAISS
from langchain.tools import Tool
from langchain.document_loaders.json_loader import JSONLoader
from langchain.chains import ConversationalRetrievalChain
from pydantic.v1 import BaseModel
from pydantic.v1 import Field
from typing import Union, Tuple, Dict, List
from typing import Optional, Type
from leadgen.llms.base import BaseLLM
from .adapters.MockAdapter import MockAdapter
class UsersDatabase:
EMBED_SAVE_DIR = "embeddings_user"
EMBED_SAVE_INDEX = "user_embeddings"
USERS_SAVE_DIR = "user_documents"
def __init__(self, provider: BaseLLM, persist_directory = os.path.join("data", "users"), ) -> None:
#Store our provider
self.provider = provider
#Load our vectorstore
self.persist_dir = str(persist_directory)
self.embeddings_dir = os.path.join(persist_directory, self.EMBED_SAVE_DIR)
self.users_dir = os.path.join(persist_directory, self.USERS_SAVE_DIR)
#Create the directory if it doesn't exist
os.makedirs(self.embeddings_dir, exist_ok=True)
os.makedirs(self.users_dir, exist_ok=True)
_, self.vectorstore = self.load_vectorstore()
def load_vectorstore(self):
if os.path.exists(os.path.join(self.embeddings_dir, f'{self.EMBED_SAVE_INDEX}.faiss')):
return True, FAISS.load_local(self.embeddings_dir, index_name=self.EMBED_SAVE_INDEX, embeddings=self.provider.get_embeddings())
return False, FAISS.from_texts(["Dummy text."], self.provider.get_embeddings())
def save_vectorstore(self):
self.vectorstore.save_local(self.embeddings_dir, index_name=self.EMBED_SAVE_INDEX)
def get_retriever(self, k = 5):
return self.vectorstore.as_retriever(k = k)
def get_qa_chain(self):
llm = self.provider.get_llm()
qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever= self.userdb.get_retriever(), return_source_documents=True)
return qa
def get_llm(self):
return self.provider.get_llm() | [] |
2024-01-10 | AnshKetchum/riptide | leadgen~agents~templates~output.py | import asyncio
import json
from json import JSONDecodeError
from typing import List, Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import (
AgentAction,
AgentFinish,
OutputParserException,
)
from langchain.schema.agent import AgentActionMessageLog
from langchain.schema.messages import (
AIMessage,
BaseMessage,
)
from langchain.schema.output import ChatGeneration, Generation
class GeneralFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "llm-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
_tool_input = json.loads(function_call["arguments"])
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content}, log=message.content
)
def parse_result(
self, result: List[Generation], *, partial: bool = False
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return self._parse_ai_message(message)
async def aparse_result(
self, result: List[Generation], *, partial: bool = False
) -> Union[AgentAction, AgentFinish]:
return await asyncio.get_running_loop().run_in_executor(
None, self.parse_result, result
)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
raise ValueError("Can only parse messages")
| [] |
2024-01-10 | AnshKetchum/riptide | leadgen~db~JobsDatabase.py | import os
import uuid
import json
from langchain.vectorstores.faiss import FAISS
from langchain.tools import Tool
from langchain.document_loaders.json_loader import JSONLoader
from pydantic.v1 import BaseModel
from pydantic.v1 import Field
from typing import Union, Tuple, Dict, List
from typing import Optional, Type
from leadgen.llms.base import BaseLLM
from .adapters.MockAdapter import MockAdapter
class JobsDatabase:
EMBED_SAVE_DIR = "embeddings_job"
EMBED_SAVE_INDEX = "job_embeddings"
JOBS_SAVE_DIR = "job_documents"
def __init__(self, provider: BaseLLM, persist_directory = os.path.join("data", "jobs"), ) -> None:
#Store our provider
self.provider = provider
#Load our vectorstore
self.persist_dir = str(persist_directory)
self.embeddings_dir = os.path.join(persist_directory, self.EMBED_SAVE_DIR)
self.jobs_dir = os.path.join(persist_directory, self.JOBS_SAVE_DIR)
print(self.embeddings_dir, self.jobs_dir)
#Create the directory if it doesn't exist
os.makedirs(self.embeddings_dir, exist_ok=True)
os.makedirs(self.jobs_dir, exist_ok=True)
_, self.vectorstore = self.load_vectorstore()
#Setup our adapters
self.adapters = {'MockAdapter' : MockAdapter()} #name --> adapter, i.e adapters["linkedin"] contains a linkedin adapter
def load_vectorstore(self):
if os.path.exists(os.path.join(self.embeddings_dir, f'{self.EMBED_SAVE_INDEX}.faiss')):
return True, FAISS.load_local(self.embeddings_dir, index_name=self.EMBED_SAVE_INDEX, embeddings=self.provider.get_embeddings())
return False, FAISS.from_texts(["Dummy text."], self.provider.get_embeddings())
def save_vectorstore(self):
self.vectorstore.save_local(self.embeddings_dir, index_name=self.EMBED_SAVE_INDEX)
def store_job(self, job):
#Store the job in the local directory
print("STORING JOB")
job_name = uuid.uuid4()
fp = os.path.join(self.jobs_dir, f'{job_name}.json')
with open(fp, 'w') as f:
json.dump(job, f)
#Adding the documents
doc_loader = JSONLoader(file_path=fp, jq_schema='.', text_content=False)
docs = doc_loader.load()
self.vectorstore.add_documents(docs)
print("STORED JOB", job_name)
return str(job_name)
def store_jobs(self, jobs):
uuids = []
for job in jobs:
uuids.append(self.store_job(job))
self.save_vectorstore()
return uuids
def get_retriever(self, k = 5):
return self.vectorstore.as_retriever(k = k)
def get_job(self, providedUUID):
print('getting job', providedUUID)
fp = os.path.join(self.jobs_dir, f'{providedUUID}.json')
if not os.path.exists(fp):
return f"Use the tool to return a job first! Re-run the procedure, and call obtain_job_data"
with open(os.path.join(self.jobs_dir, f'{providedUUID}.json'), 'r') as f:
partial_job = json.load(f)
print('sending', providedUUID)
return partial_job
def create_tool_get_job(self):
class ObtainDataByUUID(BaseModel):
uuid: str = Field()
def obtain_data_by_uuid(uuid):
print("getting job by uuid")
return self.get_job(uuid)
description = """Retrieve some jobs using the obtain_job_data tool, and then ask about the job using it's uuid.
Make sure to run this tool only after calling the obtain_job_data tool
"""
tool = Tool.from_function(
func=obtain_data_by_uuid,
name="job_retrieval_search",
description=description,
args_schema=ObtainDataByUUID
)
return tool
def retrieve_jobs(self, topic, k = 1):
'''
Returns jobs based on a simple keyword search
'''
jobs = []
for key in self.adapters:
jobs.extend(self.adapters[key].retrieve_jobs(topic))
if len(jobs) > k:
jobs = jobs[:k]
break
uuids = self.store_jobs(jobs)
return f'Successfully retrieved {len(jobs)} on {topic}! Use the get_jobs tool to obtain data on each job using the uuids listed here: {",".join(uuids)} by passing individual ones into the get_jobs tool as input.'
def create_tool_retrieve_jobs(self):
class ObtainJobDataSchema(BaseModel):
get_data_topic: str = Field()
def get_job_data(topic):
print("retrieving jobs", topic)
return self.retrieve_jobs(topic)
description = """Gets and obtains job data. Use this tool BEFORE any data analysis on job postings companies have made, and the requirements they are looking for within a job. Without running this tool first, you won't
Example input: data analyst
This would get you job postings from companies looking for data analysts. You can ALSO use this tool in sucession if you want data on multiple topics. For example, you might realize that
after getting data on data analytics, some machine learning jobs might also be relevant. Then re-run this tool, and it'll add machine learning jobs as well
Additionally, you will have to specify the number of jobs you'll need. If no clear wording is given,
default to 5.
"""
tool = Tool.from_function(
func=get_job_data,
name="obtain_job_data",
description=description,
args_schema=ObtainJobDataSchema
)
return tool
def get_complete_application(self, uuid):
print('get complete application', uuid)
with open(os.path.join(self.jobs_dir, f'{uuid}.json'), 'r') as f:
partial_job = json.load(f)
print('get complete application done', partial_job)
return f'Here are the application questions you\'ll need to answer for application {uuid}. Answer them, and then submit the job application with the resume and cover letter. Questions: {self.adapters[partial_job["metadata"]["src"]].get_complete_application(partial_job)}'
def create_tool_get_application(self):
class GetApplicationQuestionsSchema(BaseModel):
uuid: str = Field()
def get_app_qs(uuid):
return self.get_complete_application(uuid)
print(isinstance(GetApplicationQuestionsSchema, BaseModel))
tool = Tool.from_function(
func=get_app_qs,
name="get_job_application_questions",
description="Retrieve all the questions you need to fill out for the job application",
args_schema=GetApplicationQuestionsSchema
)
return tool
def apply_to_application(self, uuid, answers, resume_fp, content_letter_fp):
print('apply to applications', uuid)
with open(os.path.join(self.jobs_dir, f'{uuid}.json'), 'r') as f:
partial_job = json.load(f)
print('apply to applications done', uuid)
return self.adapters[partial_job["metadata"]["src"]].apply_to_application(partial_job, answers, resume_fp, content_letter_fp)
def create_tool_apply_job(self):
class JobAnswer(BaseModel):
content: str
class JobAnswers(BaseModel):
answers: List[JobAnswer]
class ApplyToJobSchema(BaseModel):
uuid: str = Field()
answers: JobAnswers = Field()
resume_file_filepath: str = Field()
content_letter_file_filepath: str = Field()
def apply_to_app(uuid, answers, resume_fp, content_letter_fp):
return self.apply_to_application(uuid, json.load(answers), resume_fp, content_letter_fp)
tool = Tool.from_function(
func=apply_to_app,
name="apply_to_application",
description="Use this tool to apply to a job",
args_schema=ApplyToJobSchema
)
return tool
def poll_job_application_status(self, uuid):
with open(os.path.join(self.jobs_dir, f'{uuid}.json'), 'r') as f:
partial_job = json.load(f)
return self.adapters[partial_job["metadata"]["src"]].poll_job_application_status(partial_job)
def create_tool_application_status(self):
class ApplicationStatusSchema(BaseModel):
uuid: str = Field()
def poll_job_app_status(uuid):
return self.poll_job_application_status(uuid)
tool = Tool.from_function(
func= poll_job_app_status,
name="poll_job_application_status",
description="Use this tool to check in on application status if ever requested by the user.",
args_schema=ApplicationStatusSchema
)
return tool
def get_toolkit(self):
return [
self.create_tool_get_application(),
self.create_tool_apply_job(),
self.create_tool_application_status(),
self.create_tool_retrieve_jobs(),
self.create_tool_get_job(),
]
| [] |
2024-01-10 | AnshKetchum/riptide | complex_dash.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import UnstructuredFileLoader
from leadgen.agents.base import get_chain
from leadgen.utils.doc_utils import extract_text_from_upload
from leadgen.utils.latex_utils import generate_latex, template_commands, render_latex
from leadgen.prompts.resume import generate_json_resume
from leadgen.llms.current import provider
import json
import os
# Chat UI title
st.header("JobsGPT")
st.subheader('File type supported: PDF/DOCX/TXT :city_sunrise:')
openai_api_key = os.getenv("OPENAI_API_KEY")
agent = None
embeddings = provider.get_embeddings()
#if os.path.exists("data/user_docs.faiss"):
# vectorstore = FAISS.load_local(os.path.join("data", "user_docs."), embeddings)
#else:
vectorstore = FAISS.from_texts(["This is some starting text"], embeddings)
# File uploader in the sidebar on the left
with st.sidebar:
if not openai_api_key:
openai_api_key = st.text_input("OpenAI API Key", type="password")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
# Set OPENAI_API_KEY as an environment variable
os.environ["OPENAI_API_KEY"] = openai_api_key
with st.sidebar:
uploaded_files = st.file_uploader("Please upload your files", accept_multiple_files=True, type=None)
st.info("Please refresh the browser if you decided to upload more files to reset the session", icon="🚨")
# Check if files are uploaded
if uploaded_files:
# Print the number of files to console
print(f"Number of files uploaded: {len(uploaded_files)}")
# Load the data and perform preprocessing only if it hasn't been loaded before
if "processed_data" not in st.session_state:
# Load the data from uploaded PDF files
documents = []
for uploaded_file in uploaded_files:
# Get the full file path of the uploaded file
file_path = os.path.join(os.getcwd(), uploaded_file.name)
# Save the uploaded file to disk
with open(os.path.join("data", "uploaded", file_path), "wb") as f:
f.write(uploaded_file.getvalue())
loader = UnstructuredFileLoader(file_path)
loaded_documents = loader.load()
print(f"Number of files loaded: {len(loaded_documents)}")
# Extend the main documents list with the loaded documents
documents.extend(loaded_documents)
# Chunk the data, create embeddings, and save in vectorstore
text_splitter = CharacterTextSplitter(chunk_size=2000, chunk_overlap=200)
document_chunks = text_splitter.split_documents(documents)
vectorstore.add_documents(document_chunks)
print('Saving locally')
vectorstore.save_local("data", index_name="user_docs")
# Store the processed data in session state for reuse
st.session_state.processed_data = {
"document_chunks": document_chunks,
"vectorstore": vectorstore,
}
# Print the number of total chunks to console
print(f"Number of total chunks: {len(document_chunks)}")
else:
# If the processed data is already available, retrieve it from session state
document_chunks = st.session_state.processed_data["document_chunks"]
vectorstore = st.session_state.processed_data["vectorstore"]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("Ask your questions?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Query the assistant using the latest chat history
print("MESSAGES")
messages = [(message["role"], message["content"]) for message in st.session_state.messages]
print(messages)
if not agent:
agent = get_chain(vectorstore, st)
result = agent({"input": prompt, "chat_history": messages})
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
full_response = result["output"]
message_placeholder.markdown(full_response + "|")
message_placeholder.markdown(full_response)
print(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [] |
2024-01-10 | AnshKetchum/riptide | starter_dash.py | import streamlit as st
from langchain.agents.agent_toolkits.conversational_retrieval.tool import create_retriever_tool
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationalRetrievalChain
from leadgen.agents.base import agent
st.title('🦜🔗 Quickstart App')
def generate_response(pt):
print('received', pt)
res = agent({'input': pt})
print('out', res)
st.info(res["output"])
with st.form('my_form'):
text = st.text_area('Enter text:', 'What are the three key pieces of advice for learning how to code?')
submitted = st.form_submit_button('Submit')
if submitted:
generate_response(text) | [] |
2024-01-10 | AnshKetchum/riptide | leadgen~llms~current.py | from .openai import OpenAILLM
from .llama import LLaMALLM
import json
class Provider:
"""
A wrapper selector class to pick our desired LLM
"""
def __init__(self) -> None:
with open('prefs.json', 'r') as f:
dct = json.load(f)
if dct['llm'] == OpenAILLM.SELECTOR:
self.provider = OpenAILLM()
elif dct['llm'] == LLaMALLM.SELECTOR:
self.provider = LLaMALLM()
else:
raise ValueError("The llm you want hasn't been added yet")
def get_llm_and_embeddings(self):
return self.provider
provider = Provider().get_llm_and_embeddings()
| [] |
2024-01-10 | AnshKetchum/riptide | leadgen~tools~resume~jsonres.py |
from langchain.tools import Tool, BaseTool
from pydantic import BaseModel, Field
from typing import Union, Tuple, Dict
from typing import Optional, Type
from langchain.agents.agent_toolkits.conversational_retrieval.tool import create_retriever_tool
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.retrievers.multi_query import MultiQueryRetriever
from leadgen.utils.latex_utils import generate_latex, template_commands, render_latex
from leadgen.llms.current import provider
from .prompts import BASICS_PROMPT, EDUCATION_PROMPT, AWARDS_PROMPT, PROJECTS_PROMPT, WORK_PROMPT, SKILLS_PROMPT, SYSTEM_TAILORING
import json
import random
from stqdm import stqdm
'''
Iteration 1 - Just takes experiences and creates a resume out of them
Iteration 2 - Actively searches for company information
'''
class CreateResumeToolInput(BaseModel):
company_name: str = Field()
job_description: str = Field()
class CreateResumeTool(BaseTool):
name = "create_resume_from_experiences"
description = """Use this tool to create a personalized resume.
Provide the company name and job description, and then this tool will
1. Retrieve the k most similar experiences and create a JSON resume
2. Construct a LaTeX resume from that JSON resume, and save it as an output pdf under the filename 'output.pdf'.
"""
args_schema: Type[BaseModel] = CreateResumeToolInput
def _run(
self, company_name: str, job_description: str, run_manager = None
) -> str:
"""Use the tool."""
print('Generating Resume.')
vectordb = FAISS.load_local('data', index_name="user_docs", embeddings= provider.get_embeddings())
llm = provider.get_llm()
qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectordb.as_retriever())
#Generate a summary to be used as for CV
sections = []
for p in stqdm(
[ BASICS_PROMPT,
EDUCATION_PROMPT,
AWARDS_PROMPT,
PROJECTS_PROMPT,
WORK_PROMPT,
SKILLS_PROMPT,
],
desc="This might take a while..."
):
prompt = p.replace("<job_description>", job_description)
answer = qa(
{"question": prompt,
"chat_history" : []},
return_only_outputs=True
)["answer"]
answer = json.loads(answer)
if prompt == BASICS_PROMPT and "basics" not in answer:
answer = {"basics": answer} # common mistake GPT makes
sections.append(answer)
json_resume = {}
for section in sections:
json_resume.update(section)
print("JSON RESUME")
print(json_resume)
with open('json_resume.json', 'w') as f:
json.dump(json_resume, f)
rand_choice = list(template_commands.keys())[random.randint(1, 100) % len(template_commands)]
latex_resume = generate_latex(rand_choice, json_resume, ["education", "work", "skills", "projects", "awards"])
resume_bytes = render_latex(template_commands[rand_choice], latex_resume)
print('writing bytes')
with open('out.pdf', 'wb') as f:
f.write(resume_bytes)
print('written bytes')
print('done')
return f'Resume saved as out.pdf!'
async def _arun(
self, company_name: str, job_description: str, run_manager = None
) -> str:
"""Use the tool."""
print('Generating Resume.')
vectordb = FAISS.load_local('data', index_name="user_docs", embeddings= provider.get_embeddings())
llm = provider.get_llm()
qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectordb.as_retriever())
#Generate a summary to be used as for CV
sections = []
for p in stqdm(
[ BASICS_PROMPT,
EDUCATION_PROMPT,
AWARDS_PROMPT,
PROJECTS_PROMPT,
WORK_PROMPT,
SKILLS_PROMPT,
],
desc="This might take a while..."
):
prompt = p.replace("<job_description>", job_description)
answer = qa(
{"question": prompt,
"chat_history" : []},
return_only_outputs=True
)["answer"]
answer = json.loads(answer)
if prompt == BASICS_PROMPT and "basics" not in answer:
answer = {"basics": answer} # common mistake GPT makes
sections.append(answer)
json_resume = {}
for section in sections:
json_resume.update(section)
print("JSON RESUME")
print(json_resume)
with open('json_resume.json', 'w') as f:
json.dump(json_resume, f)
rand_choice = list(template_commands.keys())[random.randint(1, 100) % len(template_commands)]
latex_resume = generate_latex(rand_choice, json_resume, ["education", "work", "skills", "projects", "awards"])
resume_bytes = render_latex(template_commands[rand_choice], latex_resume)
with open('out.pdf', 'wb') as f:
f.write(resume_bytes)
return f'Resume saved as out.pdf!'
| [
"<job_description>",
"Use this tool to create a personalized resume. \n\n Provide the company name and job description, and then this tool will\n\n 1. Retrieve the k most similar experiences and create a JSON resume\n\n 2. Construct a LaTeX resume from that JSON resume, and save it as an output pdf under the filename 'output.pdf'.\n\n "
] |
2024-01-10 | AnshKetchum/riptide | leadgen~tools~general~misc.py | from langchain.agents.agent_toolkits import JsonToolkit
from langchain.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain.agents.agent_toolkits import FileManagementToolkit
from langchain.tools.shell.tool import ShellTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain_experimental.tools.python.tool import PythonAstREPLTool
import platform
#from langchain.chains import create_extraction_chain - soon, ingest data from webpages
general_tools = [
ShellTool(),
*FileManagementToolkit().get_tools(),
DuckDuckGoSearchRun(),
YahooFinanceNewsTool(),
]
| [] |
2024-01-10 | AnshKetchum/riptide | leadgen~utils~handlers.py | from langchain.callbacks.base import BaseCallbackHandler
import os
import streamlit as st
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.text = initial_text
self.run_id_ignore_token = None
def on_llm_start(self, serialized: dict, prompts: list, **kwargs):
# Workaround to prevent showing the rephrased question as output
if prompts[0].startswith("Human"):
self.run_id_ignore_token = kwargs.get("run_id")
def on_llm_new_token(self, token: str, **kwargs) -> None:
if self.run_id_ignore_token == kwargs.get("run_id", False):
return
self.text += token
self.container.markdown(self.text)
class PrintRetrievalHandler(BaseCallbackHandler):
def __init__(self, container):
self.status = container.status("**Context Retrieval**")
def on_retriever_start(self, serialized: dict, query: str, **kwargs):
self.status.write(f"**Question:** {query}")
self.status.update(label=f"**Context Retrieval:** {query}")
def on_retriever_end(self, documents, **kwargs):
for idx, doc in enumerate(documents):
source = os.path.basename(doc.metadata["source"])
self.status.write(f"**Document {idx} from {source}**")
self.status.markdown(doc.page_content)
self.status.update(state="complete") | [] |
2024-01-10 | AnshKetchum/riptide | leadgen~tools~outreach~mail~gmail.py | from langchain.tools.gmail.utils import build_resource_service, get_gmail_credentials
from langchain.agents.agent_toolkits import GmailToolkit
'''
Activate in final pipeline.
Use this to automatically send out cold
outreach.
'''
# Can review scopes here https://developers.google.com/gmail/api/auth/scopes
# For instance, readonly scope is 'https://www.googleapis.com/auth/gmail.readonly'
credentials = get_gmail_credentials(
token_file="token.json",
scopes=["https://mail.google.com/"],
client_secrets_file="credentials.json",
)
api_resource = build_resource_service(credentials=credentials)
toolkit = GmailToolkit(api_resource=api_resource)
tools = toolkit.get_tools() | [] |
2024-01-10 | AnshKetchum/riptide | leadgen~prompts~resume.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.adapters import openai as lc_openai
import json
from stqdm import stqdm
SYSTEM_PROMPT = "You are a smart assistant to career advisors at the Harvard Extension School. You will reply with JSON only."
CV_TEXT_PLACEHOLDER = "<CV_TEXT>"
SYSTEM_TAILORING = """
You are a smart assistant to career advisors at the Harvard Extension School. Your take is to rewrite
resumes to be more brief and convincing according to the Resumes and Cover Letters guide.
"""
TAILORING_PROMPT = """
Consider the following CV:
<CV_TEXT>
Your task is to rewrite the given CV. Follow these guidelines:
- Be truthful and objective to the experience listed in the CV
- Be specific rather than general
- Rewrite job highlight items using STAR methodology (but do not mention STAR explicitly)
- Fix spelling and grammar errors
- Writte to express not impress
- Articulate and don't be flowery
- Prefer active voice over passive voice
- Do not include a summary about the candidate
Improved CV:
"""
BASICS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface Basics {
name: string;
email: string;
phone: string;
website: string;
address: string;
}
Write the basics section according to the Basic schema. On the response, include only the JSON.
"""
EDUCATION_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface EducationItem {
institution: string;
area: string;
additionalAreas: string[];
studyType: string;
startDate: string;
endDate: string;
score: string;
location: string;
}
interface Education {
education: EducationItem[];
}
Write the education section according to the Education schema. On the response, include only the JSON.
"""
AWARDS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface AwardItem {
title: string;
date: string;
awarder: string;
summary: string;
}
interface Awards {
awards: AwardItem[];
}
Write the awards section according to the Awards schema. Include only the awards section. On the response, include only the JSON.
"""
PROJECTS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface ProjectItem {
name: string;
description: string;
keywords: string[];
url: string;
}
interface Projects {
projects: ProjectItem[];
}
Write the projects section according to the Projects schema. Include all projects, but only the ones present in the CV. On the response, include only the JSON.
"""
SKILLS_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
type HardSkills = "Programming Languages" | "Tools" | "Frameworks" | "Computer Proficiency";
type SoftSkills = "Team Work" | "Communication" | "Leadership" | "Problem Solving" | "Creativity";
type OtherSkills = string;
Now consider the following TypeScript Interface for the JSON schema:
interface SkillItem {
name: HardSkills | SoftSkills | OtherSkills;
keywords: string[];
}
interface Skills {
skills: SkillItem[];
}
Write the skills section according to the Skills schema. Include only up to the top 4 skill names that are present in the CV and related with the education and work experience. On the response, include only the JSON.
"""
WORK_PROMPT = """
You are going to write a JSON resume section for an applicant applying for job posts.
Consider the following CV:
<CV_TEXT>
Now consider the following TypeScript Interface for the JSON schema:
interface WorkItem {
company: string;
position: string;
startDate: string;
endDate: string;
location: string;
highlights: string[];
}
interface Work {
work: WorkItem[];
}
Write a work section for the candidate according to the Work schema. Include only the work experience and not the project experience. For each work experience, provide a company name, position name, start and end date, and bullet point for the highlights. Follow the Harvard Extension School Resume guidelines and phrase the highlights with the STAR methodology
"""
def generate_json_resume(cv_text, api_key, model="gpt-3.5-turbo"):
"""Generate a JSON resume from a CV text"""
sections = []
for prompt in stqdm(
[
BASICS_PROMPT,
EDUCATION_PROMPT,
AWARDS_PROMPT,
PROJECTS_PROMPT,
SKILLS_PROMPT,
WORK_PROMPT,
],
desc="This may take a while...",
):
filled_prompt = prompt.replace(CV_TEXT_PLACEHOLDER, cv_text)
response = lc_openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": filled_prompt},
],
)
try:
answer = response["choices"][0]["message"]["content"]
answer = json.loads(answer)
print(answer)
if prompt == BASICS_PROMPT and "basics" not in answer:
answer = {"basics": answer} # common mistake GPT makes
sections.append(answer)
except Exception as e:
print(e)
final_json = {}
for section in sections:
final_json.update(section)
return final_json
def tailor_resume(cv_text, api_key, model="gpt-3.5-turbo"):
filled_prompt = TAILORING_PROMPT.replace("<CV_TEXT>", cv_text)
try:
response = lc_openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_TAILORING},
{"role": "user", "content": filled_prompt},
],
)
answer = response["choices"][0]["message"]["content"]
return answer
except Exception as e:
print(e)
print("Failed to tailor resume.")
return cv_text | [
"\nYou are a smart assistant to career advisors at the Harvard Extension School. Your take is to rewrite\nresumes to be more brief and convincing according to the Resumes and Cover Letters guide.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface EducationItem {\n institution: string;\n area: string;\n additionalAreas: string[];\n studyType: string;\n startDate: string;\n endDate: string;\n score: string;\n location: string;\n}\n\ninterface Education {\n education: EducationItem[];\n}\n\n\nWrite the education section according to the Education schema. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface WorkItem {\n company: string;\n position: string;\n startDate: string;\n endDate: string;\n location: string;\n highlights: string[];\n}\n\ninterface Work {\n work: WorkItem[];\n}\n\nWrite a work section for the candidate according to the Work schema. Include only the work experience and not the project experience. For each work experience, provide a company name, position name, start and end date, and bullet point for the highlights. Follow the Harvard Extension School Resume guidelines and phrase the highlights with the STAR methodology\n",
"You are a smart assistant to career advisors at the Harvard Extension School. You will reply with JSON only.",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface Basics {\n name: string;\n email: string;\n phone: string;\n website: string;\n address: string;\n}\n\nWrite the basics section according to the Basic schema. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface AwardItem {\n title: string;\n date: string;\n awarder: string;\n summary: string;\n}\n\ninterface Awards {\n awards: AwardItem[];\n}\n\nWrite the awards section according to the Awards schema. Include only the awards section. On the response, include only the JSON.\n",
"\nConsider the following CV:\n<CV_TEXT>\n\nYour task is to rewrite the given CV. Follow these guidelines:\n- Be truthful and objective to the experience listed in the CV\n- Be specific rather than general\n- Rewrite job highlight items using STAR methodology (but do not mention STAR explicitly)\n- Fix spelling and grammar errors\n- Writte to express not impress\n- Articulate and don't be flowery\n- Prefer active voice over passive voice\n- Do not include a summary about the candidate\n\nImproved CV:\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface ProjectItem {\n name: string;\n description: string;\n keywords: string[];\n url: string;\n}\n\ninterface Projects {\n projects: ProjectItem[];\n}\n\nWrite the projects section according to the Projects schema. Include all projects, but only the ones present in the CV. On the response, include only the JSON.\n",
"\nYou are going to write a JSON resume section for an applicant applying for job posts.\n\nConsider the following CV:\n<CV_TEXT>\n\ntype HardSkills = \"Programming Languages\" | \"Tools\" | \"Frameworks\" | \"Computer Proficiency\";\ntype SoftSkills = \"Team Work\" | \"Communication\" | \"Leadership\" | \"Problem Solving\" | \"Creativity\";\ntype OtherSkills = string;\n\nNow consider the following TypeScript Interface for the JSON schema:\n\ninterface SkillItem {\n name: HardSkills | SoftSkills | OtherSkills;\n keywords: string[];\n}\n\ninterface Skills {\n skills: SkillItem[];\n}\n\nWrite the skills section according to the Skills schema. Include only up to the top 4 skill names that are present in the CV and related with the education and work experience. On the response, include only the JSON.\n"
] |
2024-01-10 | ververica/pyflink-nlp | lda_trainer.py | import pandas as pd
import spacy
from gensim.models.phrases import Phrases, Phraser
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.utils import simple_preprocess as spp
from gensim.parsing.preprocessing import STOPWORDS
from gensim.corpora import Dictionary
from gensim.test.utils import datapath
"""
Latent Dirichlet Allocation (LDA) is a bag-of-words algorithm that allows
to automatically discover topics contained within a given set of documents.
It is more flexible than K-Means in the sense that documents can belong to
multiple clusters.
You can find sample data to play with and the original training set at:
https://drive.google.com/file/d/1ugnWEQV19g0qTDz0LJkjr8VgujkAb2ez/view?usp=sharing
"""
def lemmatize(tl_bg, allowed_postags=['NOUN','VERB','ADJ','ADV']):
tl_out = []
for tkn in tl_bg:
doc = nlp(" ".join(tkn))
tl_out.append([t.lemma_ for t in doc if t.pos_ in allowed_postags])
return tl_out
def clean(tl):
tl_stopwords = [[word for word in spp(str(tkn)) if word not in STOPWORDS] for tkn in tl]
bigram = Phrases(tl_stopwords, min_count=5, threshold=100)
bigram_mod = Phraser(bigram)
tl_bigram_model = [bigram_mod[tkn] for tkn in tl_stopwords]
out = lemmatize(tl_bigram_model)
return out
def tokenize(msg):
for m in msg:
yield (spp(str(m), deacc=True))
def format_topics_sentences(ldamodel, corpus, texts):
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(ldamodel[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the dominant topic, percentage of contribution and keywords
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # Dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
if __name__ == '__main__':
df = pd.read_csv("<path-to-dir>/<training-set>.csv")
token_list = list(tokenize(df["message_subject"]))
nlp = spacy.load('en', disable=['parser', 'ner'])
texts = clean(token_list)
# Build a index to word Dictionary
id2word = Dictionary(texts)
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
lda_model = LdaModel(corpus=corpus,
id2word=id2word,
num_topics=22,
random_state=100,
update_every=1,
chunksize=1000,
passes=10,
alpha='auto')
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
df_topic_sents_keywords = format_topics_sentences(lda_model, corpus, texts)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
print(df_dominant_topic.groupby(['Dominant_Topic']).size())
# Save the model
temp_file = datapath("<path-to-dir>/lda_model/lda_model_user_ml")
lda_model.save(temp_file)
# print(lda_model.print_topics(num_topics=24,num_words=5))
# print('\nPerplexity: ', lda_model.log_perplexity(corpus))
# print('\nCoherence Score: ', coherence_lda)
# print('Number of unique tokens: %d' % len(id2word))
# print('Number of documents: %d' % len(corpus))
| [] |
2024-01-10 | ververica/pyflink-nlp | tokenizer.py | import spacy
from gensim.models.phrases import Phrases, Phraser
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.utils import simple_preprocess as spp
from gensim.parsing.preprocessing import STOPWORDS
def tokenize(msg):
nlp = spacy.load('en', disable=['parser', 'ner'])
msg = " ".join([token.lemma_ for token in nlp(msg)])
tkn_list = (spp(str(msg), deacc=True))
return tkn_list
def clean(tl):
tl_stopwords = list(filter(lambda x: x not in STOPWORDS,tl))
return tl_stopwords
def find_topic(m,model,dic):
token_list = list(tokenize(m))
texts = clean(token_list)
vc = dic.doc2bow(texts)
vector = model[vc]
topics = sorted(vector, key=lambda x: x[1], reverse=True)
return str(topics[0][0])
| [] |
2024-01-10 | joefiorini/ml-playground | Libary%20Testing~langchain_extras~llms~exllama.py | import torch
from langchain.llms.base import LLM
from langchain.chains import ConversationChain
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.callbacks import StdOutCallbackHandler
from typing import Any, Dict, Generator, List, Optional
from pydantic import Field, root_validator
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
from langchain.memory import ConversationTokenBufferMemory
from langchain.prompts import PromptTemplate
from exllama.tokenizer import ExLlamaTokenizer
from exllama.generator import ExLlamaGenerator
from exllama.lora import ExLlamaLora
import os, glob, time, json, sys, logging
class Exllama(LLM):
client: Any #: :meta private:
model_path: str
"""The path to the GPTQ model folder."""
exllama_cache: ExLlamaCache = None#: :meta private:
config: ExLlamaConfig = None#: :meta private:
generator: ExLlamaGenerator = None#: :meta private:
tokenizer: ExLlamaTokenizer = None#: :meta private:
##Langchain parameters
logfunc = print
stop_sequences: Optional[List[str]] = Field("", description="Sequences that immediately will stop the generator.")
streaming: Optional[bool] = Field(True, description="Whether to stream the results, token by token.")
##Generator parameters
disallowed_tokens: Optional[List[int]] = Field(None, description="List of tokens to disallow during generation.")
temperature: Optional[float] = Field(0.95, description="Temperature for sampling diversity.")
top_k: Optional[int] = Field(40, description="Consider the most probable top_k samples, 0 to disable top_k sampling.")
top_p: Optional[float] = Field(0.65, description="Consider tokens up to a cumulative probabiltiy of top_p, 0.0 to disable top_p sampling.")
min_p: Optional[float] = Field(0.0, description="Do not consider tokens with probability less than this.")
typical: Optional[float] = Field(0.0, description="Locally typical sampling threshold, 0.0 to disable typical sampling.")
token_repetition_penalty_max: Optional[float] = Field(1.15, description="Repetition penalty for most recent tokens.")
token_repetition_penalty_sustain: Optional[int] = Field(256, description="No. most recent tokens to repeat penalty for, -1 to apply to whole context.")
token_repetition_penalty_decay: Optional[int] = Field(128, description="Gradually decrease penalty over this many tokens.")
beams: Optional[int] = Field(0, description="Number of beams for beam search.")
beam_length: Optional[int] = Field(1, description="Length of beams for beam search.")
##Config overrides
max_seq_len: Optional[int] = Field(2048, decription="Reduce to save memory. Can also be increased, ideally while also using compress_pos_emn and a compatible model/LoRA")
compress_pos_emb: Optional[float] = Field(1.0, description="Amount of compression to apply to the positional embedding.")
set_auto_map: Optional[str] = Field(None, description ="Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. 20,7,7")
gpu_peer_fix: Optional[bool] = Field(None, description="Prevent direct copies of data between GPUs")
alpha_value: Optional[float] = Field(1.0, description="Rope context extension alpha")
##Tuning
matmul_recons_thd: Optional[int] = Field(None)
fused_mlp_thd: Optional[int] = Field(None)
sdp_thd: Optional[int] = Field(None)
fused_attn: Optional[bool] = Field(None)
matmul_fused_remap: Optional[bool] = Field(None)
rmsnorm_no_half2: Optional[bool] = Field(None)
rope_no_half2: Optional[bool] = Field(None)
matmul_no_half2: Optional[bool] = Field(None)
silu_no_half2: Optional[bool] = Field(None)
concurrent_streams: Optional[bool] = Field(None)
##Lora Parameters
lora_path: Optional[str] = Field(None, description="Path to your lora.")
@staticmethod
def get_model_path_at(path):
patterns = ["*.safetensors", "*.bin", "*.pt"]
model_paths = []
for pattern in patterns:
full_pattern = os.path.join(path, pattern)
model_paths = glob.glob(full_pattern)
if model_paths: # If there are any files matching the current pattern
break # Exit the loop as soon as we find a matching file
if model_paths: # If there are any files matching any of the patterns
return model_paths[0]
else:
return None # Return None if no matching files were found
@staticmethod
def configure_object(params, values, logfunc):
obj_params = {k: values.get(k) for k in params}
def apply_to(obj):
for key, value in obj_params.items():
if value:
if hasattr(obj, key):
setattr(obj, key, value)
logfunc(f"{key} {value}")
else:
raise AttributeError(f"{key} does not exist in {obj}")
return apply_to
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
model_path = values["model_path"]
lora_path = values["lora_path"]
tokenizer_path = os.path.join(model_path, "tokenizer.model")
model_config_path = os.path.join(model_path, "config.json")
model_path = Exllama.get_model_path_at(model_path)
config = ExLlamaConfig(model_config_path)
tokenizer = ExLlamaTokenizer(tokenizer_path)
config.model_path = model_path
##Set logging function if verbose or set to empty lambda
verbose = values['verbose']
if not verbose:
values['logfunc'] = lambda *args, **kwargs: None
logfunc = values['logfunc']
model_param_names = [
"temperature",
"top_k",
"top_p",
"min_p",
"typical",
"token_repetition_penalty_max",
"token_repetition_penalty_sustain",
"token_repetition_penalty_decay",
"beams",
"beam_length",
]
config_param_names = [
"max_seq_len",
"compress_pos_emb",
"gpu_peer_fix",
"alpha_value"
]
tuning_parameters = [
"matmul_recons_thd",
"fused_mlp_thd",
"sdp_thd",
"matmul_fused_remap",
"rmsnorm_no_half2",
"rope_no_half2",
"matmul_no_half2",
"silu_no_half2",
"concurrent_streams",
"fused_attn",
]
configure_config = Exllama.configure_object(config_param_names, values, logfunc)
configure_config(config)
configure_tuning = Exllama.configure_object(tuning_parameters, values, logfunc)
configure_tuning(config)
configure_model = Exllama.configure_object(model_param_names, values, logfunc)
##Special parameter, set auto map, it's a function
if values['set_auto_map']:
config.set_auto_map(values['set_auto_map'])
logfunc(f"set_auto_map {values['set_auto_map']}")
model = ExLlama(config)
exllama_cache = ExLlamaCache(model)
generator = ExLlamaGenerator(model, tokenizer, exllama_cache)
configure_model(generator.settings)
##Load and apply lora to generator
if lora_path is not None:
lora_config_path = os.path.join(lora_path, "adapter_config.json")
lora_path = Exllama.get_model_path_at(lora_path)
lora = ExLlamaLora(model, lora_config_path, lora_path)
generator.lora = lora
logfunc(f"Loaded LORA @ {lora_path}")
##Set special attribute on generator, this is a new addition and doesn't normally exist on generator.
values["stop_sequences"] = [x.strip().lower() for x in values["stop_sequences"]]
setattr(generator.settings, "stop_sequences", values["stop_sequences"])
logfunc(f"stop_sequences {values['stop_sequences']}")
disallowed = values.get("disallowed_tokens")
if disallowed:
generator.disallow_tokens(disallowed)
print(f"Disallowed Tokens: {generator.disallowed_tokens}")
values["client"] = model
values["generator"] = generator
values["config"] = config
values["tokenizer"] = tokenizer
values["exllama_cache"] = exllama_cache
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Exllama"
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
return self.generator.tokenizer.num_tokens(text)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
combined_text_output = ""
for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager):
combined_text_output += token
return combined_text_output
from enum import Enum
class MatchStatus(Enum):
EXACT_MATCH = 1
PARTIAL_MATCH = 0
NO_MATCH = 2
def match_status(self, sequence: str, banned_sequences: List[str]):
sequence = sequence.strip().lower()
for banned_seq in banned_sequences:
if banned_seq == sequence:
return self.MatchStatus.EXACT_MATCH
elif banned_seq.startswith(sequence):
return self.MatchStatus.PARTIAL_MATCH
return self.MatchStatus.NO_MATCH
def stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
config = self.config
generator = self.generator
beam_search = self.beams >= 1 and self.beam_length >= 1
ids = generator.tokenizer.encode(prompt)
generator.gen_begin_reuse(ids)
if beam_search:
generator.begin_beam_search()
token_getter = generator.beam_search
else:
generator.end_beam_search()
token_getter = generator.gen_single_token
last_newline_pos = 0
match_buffer = ""
seq_length = len(generator.tokenizer.decode(generator.sequence_actual[0]))
response_start = seq_length
cursor_head = response_start
token_count = 0
while(token_count < (self.max_seq_len - 4)): #Slight extra padding space as we seem to occassionally get a few more than 1-2 tokens
#Fetch a token
token = token_getter()
#If it's the ending token replace it and end the generation.
if token.item() == generator.tokenizer.eos_token_id:
generator.replace_last_token(generator.tokenizer.newline_token_id)
if beam_search:
generator.end_beam_search()
return
#Tokenize the string from the last new line, we can't just decode the last token due to how sentencepiece decodes.
stuff = generator.tokenizer.decode(generator.sequence_actual[0][last_newline_pos:])
cursor_tail = len(stuff)
chunk = stuff[cursor_head:cursor_tail]
cursor_head = cursor_tail
#Append the generated chunk to our stream buffer
match_buffer = match_buffer + chunk
if token.item() == generator.tokenizer.newline_token_id:
last_newline_pos = len(generator.sequence_actual[0])
cursor_head = 0
cursor_tail = 0
#Check if the stream buffer is one of the stop sequences
status = self.match_status(match_buffer, self.stop_sequences)
if status == self.MatchStatus.EXACT_MATCH:
#Encountered a stop, rewind our generator to before we hit the match and end generation.
rewind_length = generator.tokenizer.encode(match_buffer).shape[-1]
generator.gen_rewind(rewind_length)
gen = generator.tokenizer.decode(generator.sequence_actual[0][response_start:])
if beam_search:
generator.end_beam_search()
return
elif status == self.MatchStatus.PARTIAL_MATCH:
#Partially matched a stop, continue buffering but don't yield.
continue
elif status == self.MatchStatus.NO_MATCH:
if run_manager:
run_manager.on_llm_new_token(
token=match_buffer, verbose=self.verbose,
)
token_count += generator.tokenizer.num_tokens(match_buffer)
yield match_buffer # Not a stop, yield the match buffer.
match_buffer = ""
return
from langchain.callbacks.base import BaseCallbackHandler
import time
class BasicStreamingHandler(BaseCallbackHandler):
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> Any:
"""Run when LLM starts running."""
self.logfunc(prompts[0])
self.logfunc(f"\nLength: {len(prompts[0])}")
# self.logfunc(f"Buffer: {self.chain.llm.get_num_tokens_from_messages(self.chain.memory.buffer)}\n")
self.start_time = time.time()
def on_llm_new_token(self, token: str, **kwargs) -> None:
print(token, end="", flush=True)
self.token_count += self.chain.llm.generator.tokenizer.num_tokens(token)
def on_llm_end(self, response, **kwargs) -> None:
end_time = time.time()
elapsed_time = end_time - self.start_time
tokens_per_second = self.token_count / elapsed_time
self.logfunc(f"\nToken count: {self.token_count}")
self.logfunc(f"Tokens per second: {tokens_per_second}")
self.token_count = 0
def set_chain(self, chain):
self.chain = chain
self.token_count = 0
self.logfunc = self.chain.llm.logfunc
| [] |
2024-01-10 | chengchang/tensorpack | examples~DeepQNetwork~atari_wrapper.py | # -*- coding: utf-8 -*-
# File: atari_wrapper.py
import numpy as np
from collections import deque
import gym
from gym import spaces
_v0, _v1 = gym.__version__.split('.')[:2]
assert int(_v0) > 0 or int(_v1) >= 10, gym.__version__
"""
The following wrappers are copied or modified from openai/baselines:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
class MapState(gym.ObservationWrapper):
def __init__(self, env, map_func):
gym.ObservationWrapper.__init__(self, env)
self._func = map_func
def observation(self, obs):
return self._func(obs)
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
chan = 1 if len(shp) == 2 else shp[2]
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], chan * k), dtype=np.uint8)
def reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k - 1):
self.frames.append(np.zeros_like(ob))
self.frames.append(ob)
return self.observation()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self.observation(), reward, done, info
def observation(self):
assert len(self.frames) == self.k
if self.frames[-1].ndim == 2:
return np.stack(self.frames, axis=-1)
else:
return np.concatenate(self.frames, axis=2)
class _FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
def step(self, action):
return self.env.step(action)
def FireResetEnv(env):
if isinstance(env, gym.Wrapper):
baseenv = env.unwrapped
else:
baseenv = env
if 'FIRE' in baseenv.get_action_meanings():
return _FireResetEnv(env)
return env
class LimitLength(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
def reset(self):
# This assumes that reset() will really reset the env.
# If the underlying env tries to be smart about reset
# (e.g. end-of-life), the assumption doesn't hold.
ob = self.env.reset()
self.cnt = 0
return ob
def step(self, action):
ob, r, done, info = self.env.step(action)
self.cnt += 1
if self.cnt == self.k:
done = True
return ob, r, done, info
| [] |
2024-01-10 | ShayVD/IntroPythonChatGPT | wikibot.py | import openai
import wikipedia
from os import getenv
from dotenv import load_dotenv
load_dotenv()
# pass the api key
openai.api_key = getenv("OPENAI_API_KEY")
# get user input
title = input("Title of the page: ")
# get the wikipedia content
page = wikipedia.page(title=title, auto_suggest=False)
# define the prompt
prompt = "Write a summary of the following article: " + page.content[:10000]
messages = []
messages.append({"role": "user", "content": prompt})
try:
# make an api call
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
n=1
)
# print the response
print(response.choices[0].message.content)
# authentication issue
except openai.AuthenticationError as e:
print("No Valid Token/Authentication Error: %s" % e.message)
# invalid request issue
except openai.BadRequestError as e:
print("Bad Request Error: %s" % e.message)
| [
"Write a summary of the following article: ",
"Write a summary of the following article: \" + page.content[:10000]"
] |
2024-01-10 | ryanisho/sakura | request.py | import openai
from revChatGPT.V3 import Chatbot
gpt_key = "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"
# openai.api_key = "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"
def search(input):
response = openai.Completion.create(
model="text-davinci-003",
prompt=input
+ "Given the following prompt, translate it into Python code\n\n {input} \n\n###",
temperature=0,
max_tokens=150,
top_p=1.0,
frequency_penalty=1.0,
presence_penalty=0.0,
stop=['"""'],
)
return response.choices[0].text
def searchCustom(input):
chatbot = Chatbot(api_key=gpt_key)
response = chatbot.ask(
f"Given the following prompt, translate it into Python code\n\n{input}",
)
return response
def ntrCustom(input):
chatbot = Chatbot(api_key=gpt_key)
response = chatbot.ask(
f"Given the following Python code, translate it into English\n\n{input}",
)
return response
def debugCustom(input):
chatbot = Chatbot(api_key=gpt_key)
response = chatbot.ask(
f"Given the following prompt, fix bugs in the below function. cite stackoverflow links and code to do this\n\n{input}",
)
return response
def debug(input):
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"##### Fix bugs in the below function\n\n### Buggy Python\n {input} \n\n###",
temperature=0.1,
max_tokens=200,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["###"],
)
return response.choices[0].text
def explain(input):
response = openai.Completion.create(
model="text-davinci-003",
prompt=input
+ '"""\nHere\'s what the above class is doing, explained in a detailed way:\n1.',
temperature=0,
max_tokens=150,
top_p=1.0,
frequency_penalty=1.0,
presence_penalty=0.0,
stop=['"""'],
)
return response.choices[0].text
| [
"##### Fix bugs in the below function\n\n### Buggy Python\n INPUT \n\n###",
"INPUT\"\"\"\nHere's what the above class is doing, explained in a detailed way:\n1.",
"INPUTGiven the following prompt, translate it into Python code\n\n {input} \n\n###"
] |
2024-01-10 | ryanisho/sakura | model~src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import binascii
import contextlib
import datetime
import json
import logging
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
from pathlib import Path
from typing import AsyncGenerator
from typing import Generator
from typing import NoReturn
import httpx
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from . import __version__
from . import typings as t
from .recipient import PythonRecipient
from .recipient import Recipient
from .recipient import RecipientManager
from .utils import create_completer
from .utils import create_session
from .utils import get_input
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://ai.fakeopen.com/api/"
bcolors = t.Colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
recipients: RecipientManager
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = True,
base_url: str | None = None,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
user_home = Path().cwd()
self.cache_path = Path(Path().cwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not Path(user_home, ".config").exists():
Path(user_home, ".config").mkdir()
if not Path(user_home, ".config", "revChatGPT").exists():
Path(user_home, ".config", "revChatGPT").mkdir()
self.cache_path = Path(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
if "email" in config and "password" in config:
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except t.Error as error:
if error.code == 5:
raise
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
error = TypeError("Proxy must be a string!")
raise error
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
proxies = {
"http://": config["proxy"],
"https://": config["proxy"],
}
self.session = AsyncClient(proxies=proxies) # type: ignore
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.base_url = base_url or BASE_URL
self.recipients = RecipientManager()
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self) -> None:
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.set_access_token(self.config["access_token"])
elif "email" not in self.config or "password" not in self.config:
error = t.AuthenticationError("Insufficient login details provided!")
raise error
if "access_token" not in self.config:
try:
self.login()
except AuthError as error:
print(error.details)
print(error.status_code)
raise error
@logger(is_timed=False)
def set_access_token(self, access_token: str) -> None:
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.session.cookies.update(
{
"library": "revChatGPT",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except binascii.Error:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
except json.JSONDecodeError:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
error = t.Error(
source="__get_cached_access_token",
message="Access token expired",
code=t.ErrorType.EXPIRED_ACCESS_TOKEN_ERROR,
)
raise error
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict) -> None:
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = self.cache_path.home() or Path(".")
dirname.mkdir(parents=True, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def login(self) -> None:
"""Login to OpenAI by email and password"""
if self.config.get("email") and self.config.get("password"):
log.error("Insufficient login details provided!")
error = t.AuthenticationError("Insufficient login details provided!")
raise error
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
log.debug("Using authenticator to get access token")
auth.begin()
auth.get_access_token()
self.set_access_token(auth.access_token)
@logger(is_timed=True)
def __send_request(
self,
data: dict,
auto_continue: bool = False,
timeout: float = 360,
**kwargs,
) -> Generator[dict, None, None]:
log.debug("Sending the payload")
cid, pid = data["conversation_id"], data["parent_message_id"]
model, message = None, ""
self.conversation_id_prev_queue.append(cid)
self.parent_id_prev_queue.append(pid)
response = self.session.post(
url=f"{self.base_url}conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
finish_details = None
for line in response.iter_lines():
# remove b' and ' at the beginning and end and ignore case
line = str(line)[2:-1]
if line.lower() == "internal server error":
log.error(f"Internal Server Error: {line}")
error = t.Error(
source="ask",
message="Internal Server Error",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if not line or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise ValueError(f"Field missing. Details: {str(line)}")
if line.get("message").get("author").get("role") != "assistant":
continue
message: str = line["message"]["content"]["parts"][0]
cid = line["conversation_id"]
pid = line["message"]["id"]
metadata = line["message"].get("metadata", {})
model = metadata.get("model_slug", None)
finish_details = metadata.get("finish_details", {"type": None})["type"]
yield {
"message": message,
"conversation_id": cid,
"parent_id": pid,
"model": model,
"finish_details": finish_details,
"end_turn": line["message"].get("end_turn", True),
"recipient": line["message"].get("recipient", "all"),
}
self.conversation_mapping[cid] = pid
if pid is not None:
self.parent_id = pid
if cid is not None:
self.conversation_id = cid
if not (auto_continue and finish_details == "max_tokens"):
return
message = message.strip("\n")
for i in self.continue_write(
conversation_id=cid,
timeout=timeout,
auto_continue=False,
):
i["message"] = message + i["message"]
yield i
@logger(is_timed=True)
def post_messages(
self,
messages: list[dict],
conversation_id: str | None = None,
parent_id: str | None = None,
model: str | None = None,
auto_continue: bool = False,
timeout: float = 360,
**kwargs,
) -> Generator[dict, None, None]:
"""Ask a question to the chatbot
Args:
messages (list[dict]): The messages to send
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
model (str | None, optional): The model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields: Generator[dict, None, None] - The response from the chatbot
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
raise t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": messages,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
yield from self.__send_request(
data,
timeout=timeout,
auto_continue=auto_continue,
)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: float = 360,
**kwargs,
) -> Generator[dict, None, None]:
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to "".
model (str, optional): The model to use. Defaults to "".
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields: The response from the chatbot
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
messages = [
{
"id": str(uuid.uuid4()),
"role": "user",
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
]
yield from self.post_messages(
messages,
conversation_id=conversation_id,
parent_id=parent_id,
model=model,
auto_continue=auto_continue,
timeout=timeout,
)
@logger(is_timed=True)
def continue_write(
self,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: float = 360,
) -> Generator[dict, None, None]:
"""let the chatbot continue to write.
Args:
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to None.
model (str, optional): The model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
raise t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
log.debug(
f"Conversation ID {conversation_id} not found in conversation mapping, mapping conversations",
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "continue",
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
yield from self.__send_request(
data,
timeout=timeout,
auto_continue=auto_continue,
)
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
@logger(is_timed=False)
def __check_response(self, response: requests.Response) -> None:
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as ex:
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error from ex
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{self.base_url}conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None) -> list:
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
return json.loads(response.text)
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str) -> str:
"""
Generate title for conversation
"""
response = self.session.post(
f"{self.base_url}conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
return response.json().get("title", "Error generating title")
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{self.base_url}conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self) -> None:
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""Async Chatbot class for ChatGPT"""
def __init__(
self,
config: dict,
conversation_id: str | None = None,
parent_id: str = "",
base_url: str = "",
) -> None:
"""
Same as Chatbot class, but with async methods.
"""
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
base_url=base_url,
)
async def __send_request(
self,
data: dict,
auto_continue: bool = False,
timeout: float = 360,
) -> AsyncGenerator[dict, None]:
cid, pid = data["conversation_id"], data["parent_message_id"]
self.conversation_id_prev_queue.append(cid)
self.parent_id_prev_queue.append(pid)
message = ""
finish_details = None
response = None
async with self.session.stream(
method="POST",
url=f"{self.base_url}conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
await self.__check_response(response)
async for line in response.aiter_lines():
if not line or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise ValueError(f"Field missing. Details: {str(line)}")
message: str = line["message"]["content"]["parts"][0]
cid = line["conversation_id"]
pid = line["message"]["id"]
metadata = line["message"].get("metadata", {})
model = metadata.get("model_slug", None)
finish_details = metadata.get("finish_details", {"type": None})["type"]
yield {
"message": message,
"conversation_id": cid,
"parent_id": pid,
"model": model,
"finish_details": finish_details,
"end_turn": line["message"].get("end_turn", True),
"recipient": line["message"].get("recipient", "all"),
}
self.conversation_mapping[cid] = pid
if pid:
self.parent_id = pid
if cid:
self.conversation_id = cid
if not (auto_continue and finish_details == "max_tokens"):
return
async for msg in self.continue_write(
conversation_id=cid,
auto_continue=False,
timeout=timeout,
):
msg["message"] = message + msg["message"]
yield msg
async def post_messages(
self,
messages: list[dict],
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: int = 360,
) -> AsyncGenerator[dict, None]:
"""Post messages to the chatbot
Args:
messages (list[dict]): the messages to post
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to "".
model (str, optional): The model to use. Defaults to "".
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": messages,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
async for msg in self.__send_request(
data=data,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: int = 360,
) -> AsyncGenerator[dict, None]:
"""Ask a question to the chatbot
Args:
prompt (str): The question to ask
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to "".
model (str, optional): The model to use. Defaults to "".
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
messages = [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
]
async for msg in self.post_messages(
messages=messages,
conversation_id=conversation_id,
parent_id=parent_id,
model=model,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def continue_write(
self,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: float = 360,
) -> AsyncGenerator[dict, None]:
"""let the chatbot continue to write
Args:
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to None.
model (str, optional): Model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue writing automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "continue",
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
async for msg in self.__send_request(
data=data,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def get_conversations(self, offset: int = 0, limit: int = 20) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{self.base_url}conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
await self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(
self,
convo_id: str,
encoding: str | None = "utf-8",
) -> dict:
"""
Get message history
:param id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
await self.__check_response(response)
return json.loads(response.text)
return None
async def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
url = f"{self.base_url}conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
await self.__check_response(response)
async def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
await self.__check_response(response)
async def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{self.base_url}conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
await self.__check_response(response)
async def __map_conversations(self) -> None:
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
async def __check_response(self, response: httpx.Response) -> None:
# 改成自带的错误处理
try:
response.raise_for_status()
except httpx.HTTPStatusError as ex:
await response.aread()
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error from ex
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure() -> dict:
"""
Looks for a config file in the following locations:
"""
config_files: list[Path] = [Path("config.json")]
if xdg_config_home := getenv("XDG_CONFIG_HOME"):
config_files.append(Path(xdg_config_home, "revChatGPT/config.json"))
if user_home := getenv("HOME"):
config_files.append(Path(user_home, ".config/revChatGPT/config.json"))
if windows_home := getenv("HOMEPATH"):
config_files.append(Path(f"{windows_home}/.config/revChatGPT/config.json"))
if config_file := next((f for f in config_files if f.exists()), None):
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise FileNotFoundError("No config file found.")
return config
@logger(is_timed=False)
def main(config: dict) -> NoReturn:
"""
Main function for the chatGPT program.
"""
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
plugins: dict[str, Recipient] = {}
chatbot.recipients["python"] = PythonRecipient
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!plugins - Show the current plugins
!switch x - Switch to plugin x. Need to reset the conversation to ativate the plugin.
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!setconversation - Changes the conversation
!exit - Exit this program
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command.startswith("!continue"):
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.continue_write():
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
elif command == "!plugins":
print("Plugins:")
for plugin, docs in chatbot.recipients.available_recipients.items():
print(" [x] " if plugin in plugins else " [ ] ", plugin, ": ", docs)
print()
elif command.startswith("!switch"):
try:
plugin = command.split(" ")[1]
if plugin in plugins:
del plugins[plugin]
else:
plugins[plugin] = chatbot.recipients[plugin]()
print(
f"Plugin {plugin} has been "
+ ("enabled" if plugin in plugins else "disabled"),
)
print()
except IndexError:
log.exception("Please include plugin name in command")
print("Please include plugin name in command")
elif command == "!exit":
if isinstance(chatbot.session, httpx.AsyncClient):
chatbot.session.aclose()
exit()
else:
return False
return True
session = create_session()
completer = create_completer(
[
"!help",
"!reset",
"!config",
"!rollback",
"!exit",
"!setconversation",
"!continue",
"!plugins",
"!switch",
],
)
print()
try:
msg = {}
result = {}
times = 0
while True:
if not msg:
times = 0
print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}")
prompt = get_input(session=session, completer=completer)
if prompt.startswith("!") and handle_commands(prompt):
continue
if not chatbot.conversation_id and plugins:
prompt = (
(
f"""You are ChatGPT.
Knowledge cutoff: 2021-09
Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}
###Available Tools:
"""
+ ";".join(plugins)
+ "\n\n"
+ "\n\n".join([i.API_DOCS for i in plugins.values()])
)
+ "\n\n\n\n"
+ prompt
)
msg = {
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
}
else:
print(
f"{bcolors.OKCYAN + bcolors.BOLD}{result['recipient'] if result['recipient'] != 'user' else 'You'}: {bcolors.ENDC}",
)
print(msg["content"]["parts"][0])
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.post_messages([msg], auto_continue=True):
result = data
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
msg = {}
if not result.get("end_turn", True):
times += 1
if times >= 5:
continue
api = plugins.get(result["recipient"], None)
if not api:
msg = {
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {
"content_type": "text",
"parts": [f"Error: No plugin {result['recipient']} found"],
},
}
continue
msg = api.process(result)
except (KeyboardInterrupt, EOFError):
exit()
except Exception as exc:
error = t.CLIError("command line program unknown error")
raise error from exc
if __name__ == "__main__":
print(
f"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
Version: {__version__}
""",
)
print("Type '!help' to show a full list of commands")
print(
f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}",
)
main(configure())
| [
"\n\n",
"{'content_type': 'text', 'parts': ['Error: No plugin PLACEHOLDER found']}",
"text",
"\n\n\n\n",
"content_type"
] |
2024-01-10 | eliotthergat/qcm | functions~writer.py | import os
import openai
import streamlit as st
from dotenv import load_dotenv
import time
load_dotenv()
prompt = "Tu es un rédacteur de QCM pour la première année de médecine expert. Tu rédiges des QCM depuis de nombreuses années et tu sais parfaitement reformuler des annales de QCM pour formuler de nouveaux QCM. Pour rédiger de nouveaux QCM il existe plusieurs méthodes pour créer de fausses propositions, dont notamment :\n - Les fausses négations\n - Les inversions de terme comme mitose/meiose, altérer/modifier\n - Les paronymes\n - Les mauvaises données chiffrées\n - Les propositions incohérentes\n - Les propositions fantaisistes\n - Les illogismes\n - Les anachronismes\n Ta tâche est maintenant de rédiger de 5 nouveaux QCMs à partir des annales données. Ne fais pas de hors sujets. N’invente pas de notion, n’utilise pas de notions non données dans les annales. Sois précis. Utilise le ton de rédaction utilisé dans les annales données. Ne te répète pas entre les différentes propositions. Donne une correction pour chaque QCM à chaque item faux. Chaque QCM doit avoir entre 1 et 5 réponses justes. Structure ta réponse au format markdown. Ne donne pas de numérotation de QCM (type (Q6, 21-22) ou QCM X)."
def writer(annales):
for attempt in range(st.session_state["max_retries"]):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=st.session_state.get("TEMPERATURE"),
max_tokens=st.session_state.get("MAX_TOKENS"),
top_p=1,
frequency_penalty=st.session_state.get("FREQUENCY_PENALTY"),
presence_penalty=st.session_state.get("PRESENCE_PENALTY"),
messages=[{"role": "system", "content": prompt},
{"role": "user", "content": "[Annales :]\n" + annales }]
)
st.session_state["total_tokens"] = st.session_state["total_tokens"] + response["usage"]["total_tokens"]
st.session_state["completion_tokens"] = st.session_state["completion_tokens"] + response["usage"]['completion_tokens']
st.session_state["prompt_tokens"] = st.session_state["prompt_tokens"] + response["usage"]['prompt_tokens']
return response["choices"][0]["message"]["content"]
break
except openai.error.Timeout as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API request timed out: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1
except openai.error.APIError as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API returned an API Error: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1
except openai.error.APIConnectionError as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API request failed to connect: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1
except openai.error.InvalidRequestError as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API request was invalid: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1
except openai.error.AuthenticationError as e:
st.write(f"OpenAI API request was not authorized: {e}, retrying...")
st.write("Please change your OpenAI key.")
st.session_state["error"] = 1
pass
except openai.error.PermissionError as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API request was not permitted: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1
except openai.error.RateLimitError as e:
if attempt < st.session_state["max_retries"] - 1: # ne pas attendre après la dernière tentative
st.write(f"OpenAI API request exceeded rate limit: {e}, retrying...")
time.sleep(st.session_state["wait_time"])
else:
st.write("Max retries reached. Aborting.")
st.session_state["error"] = 1 | [
"Tu es un rédacteur de QCM pour la première année de médecine expert. Tu rédiges des QCM depuis de nombreuses années et tu sais parfaitement reformuler des annales de QCM pour formuler de nouveaux QCM. Pour rédiger de nouveaux QCM il existe plusieurs méthodes pour créer de fausses propositions, dont notamment :\n - Les fausses négations\n - Les inversions de terme comme mitose/meiose, altérer/modifier\n - Les paronymes\n - Les mauvaises données chiffrées\n - Les propositions incohérentes\n - Les propositions fantaisistes\n - Les illogismes\n - Les anachronismes\n Ta tâche est maintenant de rédiger de 5 nouveaux QCMs à partir des annales données. Ne fais pas de hors sujets. N’invente pas de notion, n’utilise pas de notions non données dans les annales. Sois précis. Utilise le ton de rédaction utilisé dans les annales données. Ne te répète pas entre les différentes propositions. Donne une correction pour chaque QCM à chaque item faux. Chaque QCM doit avoir entre 1 et 5 réponses justes. Structure ta réponse au format markdown. Ne donne pas de numérotation de QCM (type (Q6, 21-22) ou QCM X).",
"[Annales :]\nPLACEHOLDER"
] |
2024-01-10 | eliotthergat/qcm | components~sidebar.py | import os
import openai
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
def sidebar():
with st.sidebar:
st.markdown(
"## Comment fonctionne Khontenu ?\n"
"1. 🔑 Entrez une clé OpenAI \n"
"2. 🏴☠️ Choisissez les annales sourcces \n"
"3. 🖊️ Lancez la rédaction \n"
)
api_key_input = st.text_input(
"OpenAI API Key",
type="password",
placeholder="Collez votre clé OpenAI ici",
help="Nécessaire pour utiliser l'API",
value=os.environ.get("OPENAI_API_KEY", None)
or st.session_state.get("OPENAI_API_KEY", ""),
)
st.session_state["OPENAI_API_KEY"] = api_key_input
st.markdown("---")
st.markdown("# Paramètres")
max_tokens = st.slider("Longueur maximale (`max_tokens`):", min_value=1, max_value=8000, value=st.session_state.get("MAX_TOKENS", 4000), step=25, help="Nombre maximum de tokens à utiliser")
st.session_state["MAX_TOKENS"] = max_tokens
st.markdown("## (Ne pas toucher)")
temperature = st.slider("Température (`randomness`):", min_value=0.0, max_value=2.0, value=st.session_state.get("TEMPERATURE", 1.0), step=0.1, help="###")
st.session_state["TEMPERATURE"] = temperature
presence_penalty = st.slider("Pénalité de présence (`presence_penalty`):", min_value=0.0, max_value=2.0, value=st.session_state.get("PRESENCE_PENALTY", 0.0), step=0.01, help="###")
st.session_state["PRESENCE_PENALTY"] = presence_penalty
frequency_penalty = st.slider("Pénalité de fréquence (`frequency_penalty`):", min_value=0.0, max_value=2.0, value=st.session_state.get("FREQUENCY_PENALTY", 0.0), step=0.01, help="###")
st.session_state["FREQUENCY_PENALTY"] = frequency_penalty
max_retries = st.slider("Nombre d'essais (`max_retries`):", min_value=1, max_value=5, value=st.session_state.get("max_retries", 3), step=1, help="Nombre de tentatives en cas d'erreur de l'API")
st.session_state["max_retries"] = max_retries
wait_time = st.slider("Temps d'attente (`wait_time`):", min_value=1, max_value=20, value=st.session_state.get("wait_time", 5), step=1, help="Attente en secondes avant un nouvel appel API")
st.session_state["wait_time"] = wait_time
st.markdown("---")
st.markdown("# À propos")
url = "https://khlinic.fr"
st.markdown(
"📖 Tous les crédits appartiennent à [Khlinic](%s)." % url
)
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
| [] |
2024-01-10 | eliotthergat/qcm | home.py | import os
import openai
import streamlit as st
import requests
from time import perf_counter
from streamlit_pills import pills
from components.sidebar import sidebar
from functions.writer import writer
from PIL import Image
image = Image.open("assets/favicon.png")
st.set_page_config(
page_title="Khontenu",
page_icon=image,
layout="wide",
menu_items={
'Get help': 'mailto:[email protected]'
}
)
st.header("✅ Khontenu pour les QCMs")
st.markdown("---")
if "shared" not in st.session_state:
st.session_state["shared"] = True
sidebar()
openai.api_key = st.session_state.get("OPENAI_API_KEY")
st.markdown("#### v0.1 du prompt")
st.markdown("Modifications attendues : moins de notions non données dans le cours, meilleures corrections")
with st.expander("Contenu des annales", expanded=False):
annale = st.text_area("Annales", placeholder="Une série de 6 à 10 QCMs d'annales")
col1, col2, col3 = st.columns(3)
submit = col3.button("Rédiger ✍🏻", use_container_width=1)
if submit:
st.session_state["total_tokens"] = 0
st.session_state["completion_tokens"] = 0
st.session_state["prompt_tokens"] = 0
st.session_state["error"] = 0
with st.spinner("Requête en cours..."):
ts_start = perf_counter()
if st.session_state["error"] == 0:
final_text = writer(annale)
st.write(final_text)
ts_end = perf_counter()
st.info(f" {round(((ts_end - ts_start)/60), 3)} minutes d'exécution")
cost = st.session_state["prompt_tokens"] * 0.00003 + st.session_state["completion_tokens"] * 0.00006
st.write("Coût de l'article : " + str(cost) + " $")
col1, col2, col3 = st.columns([2, 2,1])
rewrite = col3.button("Réécrire ✍🏻", use_container_width=1)
col1, col2, col3 = st.columns([2, 2,1])
col3.download_button(
label="Télécharger 💾",
data=final_text,
file_name='qcm.txt',
mime='text/plain',
)
| [] |
2024-01-10 | ystemsrx/QR-Code-Generator | QR%20Code%20Generator%20with%20DALLE3.py | import tkinter as tk
from tkinter import messagebox, colorchooser, filedialog
import qrcode
from PIL import Image, ImageTk
from openai import OpenAI
import requests
client = OpenAI()
def add_logo_to_qr(qr_code, logo_path, logo_size=(50, 50)):
# Load logo and resize
logo = Image.open(logo_path)
logo = logo.resize(logo_size, Image.Resampling.LANCZOS)
# Calculate position to place logo
qr_size = qr_code.size
logo_position = ((qr_size[0] - logo_size[0]) // 2, (qr_size[1] - logo_size[1]) // 2)
# Embed logo into QR code
qr_with_logo = qr_code.copy()
qr_with_logo.paste(logo, logo_position, logo.convert('RGBA'))
return qr_with_logo
# Main window class
class QRCodeGenerator(tk.Tk):
def __init__(self):
super().__init__()
self.file_name = None
self.english_text = {
"language_label": "Language:",
"url_label": "URL:",
"bg_color_label": "Background Color",
"qr_color_label": "QR Code Color",
"preview_label": "Preview",
"save_button": "Save",
"edit_color_button": "Edit Color",
"cancel_button": "Cancel",
"chinese_rb": "Chinese (simplified)",
"english_rb": "English",
"quantity_label": "Quantity",
"logo_path": "Logo Path",
"select_logo": "Select Logo(Preview)",
"background_label": "AI Background (Preview)",
"generate_button": "Generate",
"remove_button": "Remove"
}
self.chinese_text = {
"language_label": "语言:",
"url_label": "网址:",
"bg_color_label": "背景颜色",
"qr_color_label": "二维码颜色",
"preview_label": "预览",
"save_button": "保存",
"edit_color_button": "编辑颜色",
"cancel_button": "取消",
"chinese_rb": "简体中文",
"english_rb": "英文",
"quantity_label": "数量",
"logo_path": "Logo路径",
"select_logo": "选择Logo(预览)",
"background_label": "AI背景 (预览)",
"generate_button": "生成",
"remove_button": "移除"
}
self.title('QR Code Generator')
# self.geometry('550x550')
# Set initial language
self.current_language = self.english_text
# Language Selection
self.language_var = tk.StringVar(value="English")
self.language_label = tk.Label(self, text=self.current_language["language_label"])
self.language_label.grid(row=0, column=0, sticky="w")
self.chinese_rb = tk.Radiobutton(self, text=self.current_language["chinese_rb"],
variable=self.language_var, value="Chinese",
command=self.switch_language)
self.chinese_rb.grid(row=0, column=1)
self.english_rb = tk.Radiobutton(self, text=self.current_language["english_rb"],
variable=self.language_var, value="English",
command=self.switch_language)
self.english_rb.grid(row=0, column=2)
# URL Entry
self.url_label = tk.Label(self, text=self.current_language["url_label"])
self.url_label.grid(row=1, column=0, sticky="w")
self.url_entry = tk.Entry(self)
self.url_entry.grid(row=1, column=1, columnspan=2, sticky="we")
self.url_entry.bind("<KeyRelease>", self.generate_preview)
# Color Selection
self.bg_color_var = tk.StringVar(value="white")
self.qr_color_var = tk.StringVar(value="black")
# Background Color Button
self.bg_color_label = tk.Label(self, text=self.current_language["bg_color_label"])
self.bg_color_label.grid(row=2, column=0, sticky="w")
self.bg_color_button = tk.Button(self, bg=self.bg_color_var.get(), width=2,
command=lambda: self.choose_color(self.bg_color_var))
self.bg_color_button.grid(row=2, column=1, sticky="we")
# QR Code Color Button
self.qr_color_label = tk.Label(self, text=self.current_language["qr_color_label"])
self.qr_color_label.grid(row=3, column=0, sticky="w")
self.qr_color_button = tk.Button(self, bg=self.qr_color_var.get(), width=2,
command=lambda: self.choose_color(self.qr_color_var))
self.qr_color_button.grid(row=3, column=1, sticky="we")
# Preview Label (placeholder for the QR code image)
self.preview_label = tk.Label(self, text=self.current_language["preview_label"])
self.preview_label.grid(row=4, column=0, columnspan=3)
# Save Button
self.save_button = tk.Button(self, text=self.current_language["save_button"], command=self.save_qr_code)
self.save_button.grid(row=5, column=1, sticky="we")
# Quantity Entry
self.quantity_label = tk.Label(self, text=self.current_language["quantity_label"])
self.quantity_label.grid(row=7, column=0, sticky="w")
self.quantity_entry = tk.Entry(self)
self.quantity_entry.grid(row=7, column=1, columnspan=2, sticky="we")
self.quantity_entry.insert(0, "1") # Default quantity
# Logo File Selection
self.logo_label = tk.Label(self, text=self.current_language["logo_path"])
self.logo_label.grid(row=6, column=0, sticky="w") # Adjust the row and column accordingly
self.logo_entry = tk.Entry(self)
self.logo_entry.grid(row=6, column=1)
self.logo_button = tk.Button(self, text=self.current_language["select_logo"], command=self.choose_logo)
self.logo_button.grid(row=6, column=2)
self.background_label = tk.Label(self, text=self.current_language["background_label"])
self.background_label.grid(row=8, column=0, sticky="w") # Adjust the row and column accordingly
self.background_entry = tk.Entry(self)
self.background_entry.grid(row=8, column=1)
self.generate_button = tk.Button(self, text=self.current_language["generate_button"],command=self.generate_button_clicked)
self.generate_button.grid(row=8, column=2)
self.remove_button = tk.Button(self, text=self.current_language["remove_button"],command=self.remove_button_clicked)
self.remove_button.grid(row=8, column=3)
def remove_button_clicked(self):
# 清除背景图像设置
self.file_name = None # 将背景文件名设置为空
self.generate_preview() # 重新生成预览
def generate_button_clicked(self):
self.background_content = self.background_entry.get()
self.generate_button.config(state='disabled', text='Generating')
response = client.images.generate(
model="dall-e-3",
prompt=f"Generate a background image with low color concentration, the content is: {self.background_content}",
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
self.file_name = f"{self.background_content}.png"
response = requests.get(image_url)
if response.status_code == 200:
with open(self.file_name, 'wb') as file:
file.write(response.content)
self.generate_button.config(state='normal', text='Generate')
self.generate_preview() # 调用预览函数
messagebox.showinfo("Success", "Successfully create the background.")
else:
print("Error: Unable to download the image.")
messagebox.showinfo("Error", "Unable to download the image.")
def switch_language(self):
# Switch the language text and update labels/buttons
language = self.language_var.get()
self.current_language = self.chinese_text if language == "Chinese" else self.english_text
self.language_label.config(text=self.current_language["language_label"])
self.url_label.config(text=self.current_language["url_label"])
self.bg_color_label.config(text=self.current_language["bg_color_label"])
self.qr_color_label.config(text=self.current_language["qr_color_label"])
self.save_button.config(text=self.current_language["save_button"])
self.preview_label.config(text=self.current_language["preview_label"])
self.chinese_rb.config(text=self.current_language["chinese_rb"])
self.english_rb.config(text=self.current_language["english_rb"])
self.quantity_label.config(text=self.current_language["quantity_label"])
self.logo_label.config(text=self.current_language["logo_path"])
self.logo_button.config(text=self.current_language["select_logo"])
self.background_label.config(text=self.current_language["background_label"])
self.generate_button.config(text=self.current_language["generate_button"])
self.remove_button.config(text=self.current_language["remove_button"])
self.generate_preview()
def generate_preview(self, event=None):
data = self.url_entry.get()
if data:
try:
# 生成二维码
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(data)
qr.make(fit=True)
if self.file_name:
# 有背景图像的情况
bg_img = Image.open(self.file_name).convert("RGBA")
bg_img = bg_img.resize((300, 300), Image.Resampling.LANCZOS)
qr_code_img = qr.make_image(fill_color=self.qr_color_var.get(), back_color="transparent").convert("RGBA")
position = ((bg_img.width - qr_code_img.width) // 2, (bg_img.height - qr_code_img.height) // 2)
bg_img.paste(qr_code_img, position, qr_code_img)
final_img = bg_img
else:
# 无背景图像,使用纯色背景的情况
qr_code_img = qr.make_image(fill_color=self.qr_color_var.get(), back_color=self.bg_color_var.get())
final_img = qr_code_img
# 显示在 GUI 上
self.qr_img = ImageTk.PhotoImage(final_img)
self.preview_label.config(image=self.qr_img, text="")
except IOError as e:
print("Error:", e)
self.preview_label.config(image='', text="Error in generating QR code.")
else:
self.preview_label.config(image='', text="No data for QR code.")
def choose_color(self, color_var):
# Open a color dialog and set the selected color to the button and variable
color = colorchooser.askcolor()[1]
if color:
color_var.set(color)
if color_var == self.bg_color_var:
self.bg_color_button.config(bg=color)
elif color_var == self.qr_color_var:
self.qr_color_button.config(bg=color)
# Generate a new preview with the updated colors
self.generate_preview()
def save_qr_code(self):
base_data = self.url_entry.get()
logo_path = self.logo_entry.get() # Get the logo path from the entry widget
if base_data:
try:
quantity = int(self.quantity_entry.get())
except ValueError:
messagebox.showerror("Error", "Invalid quantity.")
return
base_file_path = filedialog.asksaveasfilename(defaultextension=".png", filetypes=[("PNG files", "*.png")])
if not base_file_path:
# User canceled the save operation
return
for i in range(quantity):
unique_data = base_data + " " * i
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(unique_data)
qr.make(fit=True)
qr_code_img = qr.make_image(fill_color=self.qr_color_var.get(), back_color="transparent").convert("RGBA")
if self.file_name:
# 如果存在背景图像
try:
bg_img = Image.open(self.file_name).convert("RGBA")
bg_img = bg_img.resize((300, 300), Image.Resampling.LANCZOS)
position = ((bg_img.width - qr_code_img.width) // 2, (bg_img.height - qr_code_img.height) // 2)
bg_img.paste(qr_code_img, position, qr_code_img)
final_img = bg_img
except IOError:
messagebox.showerror("Error", "Unable to load the background image.")
return
else:
# 无背景图像,使用二维码图像
final_img = qr_code_img
if logo_path:
# 添加 Logo
final_img_with_logo = add_logo_to_qr(final_img, logo_path)
file_path = f"{base_file_path}_{i + 1}.png"
final_img_with_logo.save(file_path)
else:
# 保存二维码图像
file_path = f"{base_file_path}_{i + 1}.png"
final_img.save(file_path)
messagebox.showinfo("Success", f"{quantity} QR Codes saved successfully.")
else:
messagebox.showerror("Error", "No base data to encode.")
def choose_logo(self):
logo_path = filedialog.askopenfilename(filetypes=[("Image files", "*.png;*.jpg;*.jpeg")])
if logo_path:
self.logo_entry.delete(0, tk.END)
self.logo_entry.insert(0, logo_path)
# Run the application
if __name__ == "__main__":
app = QRCodeGenerator()
app.mainloop()
| [] |
2024-01-10 | Warlour/AIBot | src~command.py | import discord, os, requests, asyncio
from discord import app_commands
import openai
discord_token = os.environ["DISCORD_TOKEN_BOT1"]
models_path = os.environ["AIMODELSPATH"]
guildObject = discord.Object(id = 1084203391519051786)
class aclient(discord.Client):
def __init__(self):
intents = discord.Intents.default()
super().__init__(intents=intents)
self.synced = False
async def on_ready(self):
await self.wait_until_ready()
if not self.synced:
await tree.sync(guild = guildObject)
self.synced = True
print(f"We have logged in as {self.user}")
client = aclient()
tree = app_commands.CommandTree(client)
# Default values
pygmalionState = False
pygmalionCharacter = None
@tree.command(name = "test", description = "Used for testing purposes", guild = guildObject)
async def self(interaction: discord.Interaction):
await interaction.response.defer()
await interaction.followup.send(content="Testing", ephemeral=True, silent=True)
@tree.command(name = "deletelastmessages", description = "Delete the x last messages", guild = guildObject)
async def self(interaction: discord.Interaction, count:int = 1):
await interaction.response.defer()
messages_list = []
async for message in interaction.channel.history(limit=count+1):
if message.author.id != client.user.id:
await message.delete()
interaction.followup.send(content=f"Deleted last {count} messages.", ephemeral=True, silent=True)
@tree.command(name = "clear", description = "Clear the current channel", guild = guildObject)
async def self(interaction: discord.Interaction):
await interaction.response.defer()
if interaction.user.id == 152917625700089856:
await interaction.channel.purge()
else:
interaction.followup.send(content="You do not have permissions to do this!", delete_after=5, ephemeral=True, silent=True)
'''Custom'''
from logic import *
'''AI general'''
device = 'cuda'
import torch
'''Stable-diffusion | Image generation'''
from diffusers import StableDiffusionPipeline
from diffusers import logging as difflog
from transformers import logging as translog
translog.set_verbosity_error()
difflog.set_verbosity_error()
'''OpenJourney'''
@tree.command(name = "openjhelp", description="Get help with OpenJourney", guild = guildObject)
async def self(interaction: discord.Interaction):
await interaction.response.defer()
message = "**Prompt:** the text that the model with generate an image with\n"
message += "**Negative prompt:** the text that the model with avoid generating an image with\n"
message += "**Count:** amount of images to generate\n"
message += "**Seed:** the seed to use when generating the image\n"
message += "**Guidance scale:** how similar the image is to the prompt\n"
message += "**Steps:** More steps = more quality and time to generate\n"
message += "**Width and height:** image dimensions\n"
await interaction.followup.send(message, ephemeral=True, silent=True)
@tree.command(name = "openjourney", description="Generate text to image using OpenJourney", guild = guildObject)
async def self(interaction: discord.Interaction, prompt:str, negative_prompt:str = None, guidance_scale:float = 7.5, count:int = 1, seed:int = None, steps:int = 50, width:int = 512, height:int = 512):
await interaction.response.defer()
if width % 8 != 0 and height % 8 != 0:
await interaction.followup.send(content=f"Height and Width have to be divisible by 8 but are {height} and {width}\nFor height subtract: {height % 8} (change to {height-(height % 8)})\nFor width subtract: {width % 8} (change to {width-(width % 8)})", silent=True, ephemeral=True)
return
if count < 1 or count > 10:
await interaction.followup.send(content="I cannot generate less than 1 or more than 10 pictures!", ephemeral=True, silent=True)
return
if not prompt:
await interaction.followup.send(content="No prompt given", ephemeral=True, silent=True)
files = []
for i in range(count):
print(f"Generating image {i+1} of {count}")
try:
model_id = models_path+"/openjourney"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
generator = torch.Generator(device)
generator = generator.manual_seed(seed+i) if seed else generator.manual_seed(generator.seed()+i)
outputtext = f"**Text prompt:** {prompt}\n"
outputtext += f"**Negative text prompt:** {negative_prompt}\n"
outputtext += f"**Count:** {count}\n"
outputtext += f"**Seed:** {generator.initial_seed()}\n"
outputtext += f"**Guidance scale:** {guidance_scale}\n"
outputtext += f"**Steps:** {steps}\n"
outputtext += f"**Size:** {width}x{height}\n"
filename = f"{generator.initial_seed()}_{guidance_scale}-{steps}.png"
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
width=width,
height=height,
generator=generator
)
for im, image in enumerate(result.images):
# If NSFW Detected
if result.nsfw_content_detected[im] == True:
outputtext += f"NSFW detected on image {im + 1} of {count}\n"
name = f"{im+1}_{filename}"
image.save(name, 'PNG')
files.append(discord.File(fp=name, description=f"Prompt: {prompt}\nNegative prompt: {negative_prompt}"))
except RuntimeError as e:
if 'out of CUDA out of memory' in str(e):
outputtext += f"Out of memory: try another prompt"
await interaction.followup.send(content=outputtext, files=files, silent=True)
for file in files:
os.remove(file.filename)
@tree.command(name = "openjourneywithincrease", description="Generate text to image using OpenJourney", guild = guildObject)
async def self(interaction: discord.Interaction, prompt:str, negative_prompt:str = None, increase_guidance_by:float = 2.0, guidance_start:float = 0.0, count:int = 1, seed:int = None, steps:int = 50, width:int = 512, height:int = 512, creategif:bool = False):
await interaction.response.defer()
if width % 8 != 0 and height % 8 != 0:
await interaction.followup.send(content=f"Height and Width have to be divisible by 8 but are {height} and {width}\nFor height subtract: {height % 8} (change to {height-(height % 8)})\nFor width subtract: {width % 8} (change to {width-(width % 8)})", silent=True, ephemeral=True)
return
if increase_guidance_by <= 0.0 or increase_guidance_by > 10.0:
await interaction.followup.send(content="Guidance should not be lower or equal to zero. And it should not be higher than 10", ephemeral=True, silent=True)
return
if count > 10:
await interaction.followup.send(content="Discord can only send the first 10 images!", ephemeral=True, silent=True)
if count < 1:
await interaction.followup.send(content="I cannot generate less than 1 image!", ephemeral=True, silent=True)
return
if not prompt:
await interaction.followup.send(content="No prompt given", ephemeral=True, silent=True)
return
files = []
files2 = []
guidance_scale_list = []
for generation in range(count):
guidance_scale_list.append(increase_guidance_by*generation+guidance_start)
generator = torch.Generator(device)
if not seed:
seed = generator.seed()
outputtext = f"**Text prompt:** {prompt}\n"
outputtext += f"**Negative text prompt:** {negative_prompt}\n"
outputtext += f"**Seed:** {seed}\n"
outputtext += f"**Guidance scale start:** {guidance_start}\n"
outputtext += f"**Guidance scale increase:** {increase_guidance_by}\n"
outputtext += f"**Count:** {count}\n"
outputtext += f"**Steps:** {steps}\n"
outputtext += f"**Size:** {width}x{height}\n"
for i, guidance_scale in enumerate(guidance_scale_list):
try:
generator = generator.manual_seed(seed)
model_id = models_path+"/openjourney"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
filename = f"{generator.initial_seed()}_{guidance_scale}-{steps}.png"
iter_filename = f"imagestoGIF/{i+1}_{filename}"
print(f"Generating: {filename}")
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
width=width,
height=height,
generator=generator
)
for im, image in enumerate(result.images):
# If NSFW Detected
if result.nsfw_content_detected[im] == True:
outputtext += f"NSFW detected on image {i + 1} of {count}\n"
image.save(iter_filename, 'PNG')
if not (len(files) >= 10):
files.append(discord.File(fp=iter_filename, description=f"Prompt: {prompt}\nNegative prompt: {negative_prompt}"))
else:
files2.append(iter_filename)
except RuntimeError as e:
if 'out of CUDA out of memory' in str(e):
outputtext += f"Out of memory: try another prompt"
break
except discord.app_commands.errors.CommandInvokeError as e:
print(e)
break
if files:
await interaction.followup.send(content=outputtext, files=files, silent=True)
# If error occured
else:
await interaction.followup.send(content=outputtext, silent=True, ephemeral=True)
if creategif and files:
output_gif = create_gif(find_image_paths(r'imagestoGIF'), r'imagestoGIF/output.gif', 1)
gif_file = discord.File(fp=output_gif, description=f"GIF of output images")
files.append(gif_file)
await interaction.followup.send(content='Created GIF from images', file=gif_file, silent=True)
for file in files:
print(f"Deleting: imagestoGIF/{file.filename}")
os.remove(f"imagestoGIF/{file.filename}")
for filename in files2:
print(f"Deleting: {filename}")
os.remove(f"{filename}")
from diffusers import StableDiffusionImg2ImgPipeline
@tree.command(name = "openjourneyimg", description="Generate image to image using OpenJourney", guild = guildObject)
async def self(interaction: discord.Interaction, file: discord.Attachment, prompt:str = "", negative_prompt:str = None, seed:int = None, guidance_scale:float = 7.5, steps:int = 50):
await interaction.response.defer()
if not file or not file.filename.endswith(('.png', '.jpg', '.webp', 'jpeg')):
await interaction.followup.send(content="Invalid file extension", ephemeral=True, silent=True)
return
r = requests.get(file.url)
with open(file.filename, 'wb') as f:
f.write(r.content)
filename = file.filename
files = []
files.append(discord.File(fp=filename, description="Prompt file"))
try:
model_id = models_path+"/openjourney"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
generator = torch.Generator(device)
if not seed:
generator.seed()
outputtext = f"**Text prompt:** {prompt}\n"
outputtext += f"**Negative text prompt:** {negative_prompt}\n"
outputtext += f"**Seed:** {generator.initial_seed()}\n"
outputtext += f"**Guidance scale:** {guidance_scale}\n"
outputtext += f"**Steps:** {steps}\n"
if seed:
generator = generator.manual_seed(seed)
with Image.open(filename) as im:
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
image=im,
generator=generator
)
for i, image in enumerate(result.images):
# If NSFW Detected
if result.nsfw_content_detected[i] == True:
outputtext += f"NSFW Detected on image\n"
name = f"{i+1}_{filename}"
image.save(name, 'PNG')
files.append(discord.File(fp=name, description=f"Image of {prompt}"))
except RuntimeError as e:
if 'out of CUDA out of memory' in str(e):
outputtext += f"Out of memory: try another prompt"
await interaction.followup.send(content=outputtext, files=files, silent=True)
for file in files:
os.remove(file.filename)
'''Whisper | Transcription of audio and video'''
import whisper
import validators
from pytube import YouTube
# Attach audio file and output text
@tree.command(name = "whisper", description="Generate transcriptions and detect language using OpenAI's Whisper model", guild = guildObject)
async def self(interaction: discord.Interaction, file:discord.Attachment = None, url:str = None, transcribe:bool = True, prompt:str = "", detect:bool = False):
await interaction.response.defer()
if not transcribe and not detect:
await interaction.followup.send(content="No operation given; use transcribe and/or detect!", ephemeral=True, silent=True)
return
if not file and not url:
await interaction.followup.send(content="No file or url attached", ephemeral=True, silent=True)
if file and url:
await interaction.followup.send(content="You can only add a file __or__ an url!", ephemeral=True, silent=True)
if file:
if not file.filename.endswith(('.mp3', '.mp4', '.mpeg', '.mpga', '.m4a', '.wav', '.webm')):
await interaction.followup.send(content="Invalid file extension", ephemeral=True, silent=True)
return
filename = file.filename
print(f"Downloading {filename}")
r = requests.get(file.url)
with open(filename, 'wb') as f:
f.write(r.content)
elif url:
if (validators.url(url)):
output = f"<{url}>\n"
filename = "yt_download.mp3"
YouTube(url).streams.filter(only_audio=True).first().download(filename=filename)
if not filename:
await interaction.followup.send(content="Could not find file, contact admin", ephemeral=True, silent=True)
return
print(f"Downloaded {filename}")
else:
await interaction.followup.send(content="Invalid url!", ephemeral=True, silent=True)
return
model_name = "medium"
model = whisper.load_model(model_name, device=device)
if detect:
audio = whisper.load_audio(filename)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
_, probs = model.detect_language(mel)
output += f"Detected language: {max(probs, key=probs.get)}"
if transcribe:
result = model.transcribe(filename, initial_prompt=prompt) if prompt else model.transcribe(filename)
if detect:
output += " | "
output += f"Transcribed {filename}"
if result['text']:
inputPrompt = discord.File(fp=filename)
with open(f"transcription_{filename}.txt", "w") as f:
f.write(result['text'].strip())
outputfile = discord.File(fp=f"transcription_{filename}.txt")
files = [inputPrompt, outputfile]
await interaction.followup.send(content=output, files=files, silent=True)
else:
await interaction.followup.send(content="Could not create a transcription", ephemeral=True, silent=True)
for file in files:
os.remove(file.filename)
'''Clip | Guessing'''
import clip
import numpy as np
# Attach image and output text
@tree.command(name = "clip", description="Attach an image and possible guesses to make AI guess what is in image", guild = guildObject)
async def self(interaction: discord.Interaction, file:discord.Attachment, prompt:str):
await interaction.response.defer()
if not file:
await interaction.followup.send(content="No file attached!", ephemeral=True, silent=True)
return
if not file.filename.endswith(('.png', '.jpg', '.jpeg', '.webp')):
await interaction.followup.send(content="Invalid file extension", ephemeral=True, silent=True)
return
if not prompt:
await interaction.followup.send(content="No prompt given!", ephemeral=True, silent=True)
return
print(f"Downloading {file.filename}")
r = requests.get(file.url)
with open(file.filename, 'wb') as f:
f.write(r.content)
filename = file.filename
model_name = "ViT-B/32"
# Load model
model, preprocess = clip.load(model_name, device=device)
image = preprocess(Image.open(filename)).unsqueeze(0).to(device)
possibilities = prompt.split(", ")
textprob = clip.tokenize(possibilities).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(textprob)
logits_per_image, logits_per_text = model(image, textprob)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
# Create list of percentages
probas = []
for item in probs[0]:
probas.append(float(np.format_float_positional(item, precision=4)))
# Create and sort list with possibilites and percentages combined
list_sorted = sorted(zip(possibilities, probas), key=lambda x: x[1], reverse=True)
print(list_sorted)
# Format list
text_list = []
for item in list_sorted:
text_list.append(f"{item[0]}: {item[1] * 100:.2f}%")
output = "\n".join(text_list)
print(text_list)
# Send output to discord
if output:
imagePrompt = discord.File(fp=filename)
with open(f"guess_{filename}.txt", "w") as f:
f.write(output)
outputfile = discord.File(fp=f"guess_{filename}.txt")
files = [imagePrompt, outputfile]
await interaction.followup.send(content="", files=files, silent=True)
# Remove files
for file in files:
os.remove(file.filename)
@tree.command(name = "chatgpt", description="Chat Completion with gpt-3.5-turbo", guild = guildObject)
async def self(interaction: discord.Interaction, prompt:str):
if prompt == 'Test':
openai.api_key = os.environ["OPENAI_API_KEY"]
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
)
print(responses['choices'][0]['message']['content'])
from transformers import AutoTokenizer, AutoModelForCausalLM
@tree.command(name = "pygmalion", description="Conversational bot set state with character", guild = guildObject)
async def self(interaction: discord.Interaction, state:bool, character_name:str = 'Discord user', character_description:str = 'The average discord user, likes to be edgy, cringe and sometimes very offensive.'):
await interaction.response.defer()
if interaction.channel.id != 1091464570943574076:
channel = client.get_channel(1091464570943574076)
interaction.followup.send(content=f"Due to it being a conversational bot, this bot is only available in the {channel} channel.", ephemeral=True, silent=True)
return
global pygmalionState, pygmalionCharacter, pygmalionCharacterName
pygmalionState = state
pygmalionCharacter = character_description
pygmalionCharacterName = character_name
if not pygmalionState:
status = "Pygmalion is now disabled"
else:
status = f"{pygmalionCharacterName} is now **active** with the persona: {pygmalionCharacter}"
print(status)
await interaction.followup.send(status, silent=True)
# await asyncio.sleep(5)
if pygmalionState and pygmalionCharacterName and pygmalionCharacter:
global pygmaTokenizer, pygmaModel
model_id = models_path+"/pygmalion-350m"
pygmaTokenizer = AutoTokenizer.from_pretrained(model_id)
print('Set tokenizer')
pygmaModel = AutoModelForCausalLM.from_pretrained(model_id).to(torch.device("cuda:0"))
print('Set model (ready)')
@client.event
async def on_message(ctx: discord.Message):
if ctx.author.id == client.user.id:
return
if ctx.channel.id != 1091464570943574076:
return
if not pygmalionState:
await ctx.channel.send(content="Pygmalion is not active!", delete_after=3, silent=True)
return
if not pygmalionCharacter:
await ctx.channel.send(content="No character to play!", delete_after=3, silent=True)
return
try:
messages_list = []
async for message in ctx.channel.history(limit=200):
messages_list.append(f"{message.author.name}: {message.content}\n")
input_message = messages_list.pop()
messages = "".join(reversed(messages_list))
input_text = f"{pygmalionCharacterName}'s Persona: {pygmalionCharacter}\n<START>\n"
input_text += messages
input_text += f"{input_message}\n"
input_text += f"{pygmalionCharacterName}: "
print(input_text)
input_ids = pygmaTokenizer.encode(input_text, return_tensors='pt').to('cuda')
print("test")
output = pygmaModel.generate(input_ids, max_length=1000, do_sample=True, temperature=0.7).to(torch.device("cuda:0"))
output_text = pygmaTokenizer.decode(output[0], skip_special_tokens=True)
await ctx.channel.send(output_text, silent=True)
print(output_text)
except:
pass
client.run(discord_token) | [
"Where was it played?",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020.",
"Who won the world series in 2020?"
] |
2024-01-10 | LorisPolenz/semantIQ | backend~semantiq~gen_random_words.py | # script to generate a list of random words
import json
import os
import random
from copy import deepcopy
from typing import List
import re
import openai
from semantiq.embeddings import load_default_embeddings, cosine_similarity, embedding
WORDS = '''
cat, dog, house, car, book, tree, sun, moon, star, lamp, chair, table, pen, paper, pencil, computer,
phone, flower, butterfly, bird, fish, rock, river, ocean, mountain, road, cloud, grass, shoe, hat,
shirt, dress, pants, sock, glove, wallet, key, clock, spoon, fork, knife, plate, cup, bowl, towel,
soap, toothbrush, toothpaste, mirror, window, door, bed, pillow, blanket, picture, music, movie,
game, puzzle, paint, brush, camera, microphone, guitar, piano, drum, soap, milk, bread, egg, cheese,
apple, orange, banana, strawberry, lemon, lime, grape, cherry, cucumber, tomato, potato, carrot, lettuce,
onion, garlic, rice, pasta, pizza, burger, fries, ice cream, chocolate, candy, cookie, cake, sugar, salt, pepper,
innovation, discovery, adventure, laughter, kindness, creativity, harmony, tranquility, wisdom, ambition, freedom,
courage, integrity, passion, loyalty, honesty, resilience, compassion, generosity, optimism, empathy, tranquility,
discipline, dedication, sincerity, spontaneity, serenity, perseverance, patience, curiosity, gratitude, humility,
ambition, endurance, inspiration, perspective, integrity, determination, enthusiasm, courage, diversity, justice,
peace, vibrancy, faith, devotion, dignity, elegance, empathy, forgiveness, gratitude, hope, humility, joy, love,
motivation, optimism, pride, respect, trust, unity, wisdom, zest, balance, beauty, focus, growth, satisfaction,
success, change, discipline, gratitude, curiosity, determination, peace, tranquility, passion, victory, versatility,
vibrant, wonder, accomplishment, adaptability, assertiveness, authenticity, bravery, calmness, charity,
cheerfulness, commitment, compassion, confidence, cooperation, creativity, credibility, dynamism, efficiency,
empathy, flexibility, happiness, harmony, hope, independence, inspiration, integrity, joy, kindness, love,
loyalty, motivation, patience, persistence, positivity, resilience, respect, responsibility, simplicity, sincerity,
spontaneity, strength, trust, understanding, unity, wisdom, zeal, balance, innovation, respect, motivation, optimism,
gratitude, determination, humility, discipline, satisfaction, confidence, patience, courage, success, passion,
perseverance, focus, laughter, generosity, curiosity, ambition, tranquility, joy, creativity, harmony, wisdom,
freedom, integrity, loyalty, resilience, kindness, empathy, spontaneity, serenity, dignity, elegance, pride,
trust, accomplishment, adaptability, assertiveness, authenticity, bravery, calmness, charity, cheerfulness,
commitment, cooperation, dynamism, efficiency, flexibility, happiness, hope, independence, inspiration, love,
persistence, positivity, responsibility, simplicity, strength, understanding, unity, zeal,
table, chair, computer, book, pen, camera, phone, clock, lamp, window, door, carpet, bed, blanket, pillow, mirror,
television, radio, fridge, stove, oven, toaster, kettle, mug, plate, spoon, fork, knife, bowl, glass, bottle,
bag, backpack, purse, wallet, key, watch, bracelet, necklace, ring, shoe, sock, hat, coat, shirt, pants, dress,
skirt, sweater, glove, scarf, belt, umbrella, bicycle, car, truck, bus, boat, airplane, train, dog, cat, bird,
fish, horse, cow, sheep, pig, chicken, rabbit, lion, tiger, bear, elephant, monkey, giraffe, zebra, kangaroo,
dolphin, rose, tulip, daisy, sunflower, tree, bush, grass, leaf, fruit, vegetable, apple, banana, orange, strawberry,
blueberry, pineapple, grape, cherry, tomato, cucumber, carrot, potato, onion, bread, cheese, butter, egg, milk, juice,
water, tea, coffee, sugar, salt, pepper, rice, pasta, chocolate, cake, cookie, ice cream, pizza, sandwich, hamburger,
soup, salad, music, art, dance, sports, soccer, basketball, baseball, tennis, golf, swimming, movie, game, puzzle,
book, newspaper, magazine, photograph, painting, drawing, sculpture, map, ticket, money, coin, dollar, heart, star,
moon, sun, rainbow, cloud, rain, snow, wind, fire, river, lake, ocean, mountain, forest, desert, island, city, town,
village, street, park, garden, school, hospital, store, restaurant, hotel, library, museum, zoo, farm, beach, bridge,
tower, castle, book, dog, cat, flower, phone, house, tree, chair, bike, mountain, river, cloud, bed, pen, computer,
moon, sun, star, dress, camera, necklace, song, movie, game, coffee, tea, ocean, painting, hat, bookshelf, car, bus,
school, elephant, pizza, sandwich, guitar, violin, key, door, window, jewelry, wristwatch, butterfly, rainbow, cupcake,
umbrella, fire, snow, suitcase, heart, rose, diamond, kitchen, garden, forest, candle, blanket, pineapple, strawberry,
laptop, bread, mirror, soap, rain, spoon, beach, kite, museum, zoo, park, apple, baby, castle, bread, watch, photo,
refrigerator, sculpture, pillow, map, ring, candy, perfume, airplane, volcano, hammock, lipstick, turtle, socks,
shark, bracelet, dragon, spider, robot, dinosaur, ghost, cowboy, pirate, vampire, detective, astronaut, mermaid,
superhero, princess, alien, wizard, zombie, knight, fairy, soldier, ninja, scar, angel, cowboy, tiger, lion, eagle,
owl, parrot, dolphin, whale, kangaroo, bear, wolf, squirrel, rabbit, fox, giraffe, chameleon, panda, cheetah, zebra,
monkey, raccoon, leopard, peacock, flamingo, swan, pigeon, dove, sparrow, duck, turkey, chicken, goldfish, penguin,
octopus, lobster, jellyfish, bee, snail, ladybug, ant, cricket, grasshopper, earthworm, turtle, snake, frog, alligator,
crocodile, mosquito, bat, hedgehog, beaver, horse, donkey, deer, buffalo, hamster, guinea pig, mouse, rat, bat, skunk,
otter, seal, walrus, platypus, raccoon, porcupine, koala, armadillo, hummingbird, cardinal, bluebird, toucan,
woodpecker, ostrich, canary, budgie, falcon, hawk, dodo, rooster, pheasant,
adventure, mystery, creativity, passion, universe, dream, courage, harmony, wisdom, paradise, freedom, masterpiece,
destiny, fascination, magic, miracle, curiosity, sensation, rebellion, illusion, romance, treasure, eternity,
nostalgia, pleasure, fantasy, infinity, serendipity, euphoria, legend,
sunset, galaxy, melody, velvet, laughter, echo, whisper, illusion, silhouette, labyrinth, oasis, kaleidoscope,
euphoria, infinity, paradox, serendipity, hurricane, labyrinth, nostalgia, mirage, twilight, chimera, solstice,
autumn, symphony, horizon, passion, daydream, silhouette, wildfire, mountain, sunrise, river, laughter, galaxy,
forest, rainbow, melody, dream, adventure, mystery, passion, freedom, harmony, butterfly, wildfire, echo, infinity,
journey, twilight, whisper, horizon, solitude, miracle, poetry, masterpiece, eclipse, heartbeat, comet, silhouette,
moon, bicycle, rainbow, dragon, ocean, city, garden, volcano, mirror, elephant, castle, forest, robot, violin, pirate,
ghost, universe, tornado, diamond, eagle, pizza, warrior, jellyfish, skyscraper, galaxy, river, book, cactus,
butterfly, spaceship, waterfall, dinosaur, snowflake, wizard, zebra, ballet, chocolate, sphinx, treasure, festival,
compass, mermaid, sunflower, labyrinth, island, dream, mammoth, kangaroo, carnival, sunrise, honey, statue, gypsy,
desert, fairy, astronaut, labyrinth, compass, phoenix, avalanche, meadow, comet, ninja, hurricane, glacier, waterfall,
rainbow, lighthouse, crystal, dolphin, rhinoceros, cyborg, chocolate, skyscraper, diamond, rose, snowflake, daisy,
raccoon, parrot, sunflower, tarantula, tornado, cactus, unicorn, mammoth, warrior, dragon, garden, forest, castle,
ocean, universe, ghost, pirate, violin, robot, city, bicycle, moon, dolphin, zebra, avalanche, comet
'''
def get_all_words() -> List[str]:
# words deduplicated and split using a regexp
return list(set(re.split(r'\s*,\s*', WORDS.strip())))
SIZE_PER_GROUP = 3
def random_puzzle():
# sample 8 words without replacement
words = get_all_words()
random.shuffle(words)
return {
'groupPos': sorted(words[:SIZE_PER_GROUP], key=lambda x: len(x)),
'groupNeg': sorted(words[SIZE_PER_GROUP:SIZE_PER_GROUP*2], key=lambda x: len(x)),
}
def find_closest_words(puzzle):
# print which of the word from group one is closest to which of the words of group2
group_neg = puzzle['groupNeg']
group_pos = puzzle['groupPos']
most_similar = None
max_similarity = 0
for w1 in group_neg:
for w2 in group_pos:
sim = cosine_similarity(embedding(w1), embedding(w2))
# print(f'{w1} {w2} {sim}')
if sim > max_similarity:
max_similarity = sim
most_similar = (w1, w2)
return most_similar, max_similarity
def main():
random.seed(0)
i = 0
while i < 10000:
puzzle = random_puzzle()
puzzle['id'] = i
most_similar, max_similarity = find_closest_words(puzzle)
if max_similarity > 0.82:
print(f'Skipping due to similarity {max_similarity} ({most_similar})')
else:
print(f'Writing puzzle #{i} {puzzle}')
with open(os.path.join(os.path.dirname(__file__), f'../puzzles/{i}.json'), 'w') as f:
json.dump(puzzle, f)
i += 1
if __name__ == '__main__':
main()
| [] |
2024-01-10 | LeoKwo/GuoGenius | upsert_doc~testQuery.py | import os
from dotenv import load_dotenv
import pinecone
import openai
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
##################################################
# #
# This file tests pinecone connection and query. #
# #
##################################################
load_dotenv()
openai.api_key = os.getenv('api_key')
os.environ['PINECONE_API_KEY'] = os.getenv('pinecone_api_key')
os.environ['PINECONE_ENVIRONMENT'] = os.getenv('pinecone_env')
index_name = "ruikang-guo-knowledge-base"
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
pinecone_index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index,
)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = GPTVectorStoreIndex.from_vector_store(
vector_store=vector_store,
service_context=service_context
)
query_engine = index.as_query_engine()
res = query_engine.query("What can you tell me about ruikang guo's work at day and nite?")
print(res) | [] |
2024-01-10 | LeoKwo/GuoGenius | upsert_doc~upsert.py | from pathlib import Path
from llama_hub.file.unstructured import UnstructuredReader
from pathlib import Path
from llama_index import download_loader
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from dotenv import load_dotenv
import os
from llama_index.node_parser import SimpleNodeParser
import pinecone
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
import openai
####################################################
# #
# This file upserts documents in data to pinecone. #
# #
####################################################
load_dotenv()
openai.api_key = os.getenv('api_key')
# find API key in console at app.pinecone.io
os.environ['PINECONE_API_KEY'] = os.getenv('pinecone_api_key')
# environment is found next to API key in the console
os.environ['PINECONE_ENVIRONMENT'] = os.getenv('pinecone_env')
# loader = UnstructuredReader()
# initialize connection to pinecone
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
# setup the index/query process, ie the embedding model (and completion if used)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# Readers
PDFReader = download_loader("PDFReader")
MarkdownReader = download_loader("MarkdownReader")
# Load docs
def upsert_docs(input_dir: str, index_name: str):
print(f"Building from {input_dir} under index {index_name}...\n")
documents = SimpleDirectoryReader(input_dir=input_dir).load_data()
# create the index if it does not exist already
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=1536,
metric='cosine'
)
# connect to the index
pineconeIndex = pinecone.Index(index_name)
vectorStore = PineconeVectorStore(
pinecone_index=pineconeIndex
)
# setup our storage (vector db)
storageContext = StorageContext.from_defaults(
vector_store=vectorStore
)
index = GPTVectorStoreIndex.from_documents(
documents=documents,
storage_context=storageContext,
service_context=service_context
)
print(f"Done building !\n")
upsert_docs(input_dir="upsert_doc/docs", index_name="ruikang-guo-knowledge-base")
| [] |
2024-01-10 | hakeemadli/vacation-itinerary-AI-planner | local~app_local.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
auth_key = os.getenv('auth_api_key')
openai.api_key= auth_key
with open('static/prompt.txt', 'r') as f:
prompt = f.read()
# input prompt
print('Enter number of days: ')
days = input()
print('Enter vacation destination: ')
destination = input()
input_prompt = ('List '+ days + 'days itinerary at' + destination)
# Set the model and prompt
model_engine = 'text-davinci-003'
compiled_prompt = prompt + input_prompt
def text_completion():
# Generate a response
completion = openai.Completion.create(
engine=model_engine,
prompt=compiled_prompt,
max_tokens=500,
temperature=0.5,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop = ['\#']
)
result = completion.choices[0].text
return result
def textfile_separator():
output = text_completion()
output_formatted = str(output.replace('Day', '++Day'))
split_output = output_formatted.split('++')
for i in split_output:
split_days = i.split('\n\n')
for i in split_days:
print(i)
textfile_separator()
| [
"List PLACEHOLDERdays itinerary atPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | JCaraballo113/pdf-chat | app~chat~memories~sql_memory.py | from langchain.memory import ConversationBufferMemory
from app.chat.models import ChatArgs
from app.chat.memories.histories.sql_history import SqlMessageHistory
def build_memory(chat_args: ChatArgs) -> ConversationBufferMemory:
return ConversationBufferMemory(
chat_memory=SqlMessageHistory(
conversation_id=chat_args.conversation_id),
return_messages=True,
memory_key="chat_history",
output_key="answer"
)
| [] |
2024-01-10 | JCaraballo113/pdf-chat | app~chat~memories~histories~sql_history.py | from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.schema import BaseChatMessageHistory
from pydantic import BaseModel
from app.web.api import (
get_messages_by_conversation_id,
add_message_to_conversation,
)
class SqlMessageHistory(BaseChatMessageHistory, BaseModel):
conversation_id: str
@property
def messages(self):
return get_messages_by_conversation_id(self.conversation_id)
def add_message(self, message: HumanMessage | AIMessage | SystemMessage):
return add_message_to_conversation(conversation_id=self.conversation_id, role=message.type, content=message.content)
def clear(self) -> None:
pass
| [] |
2024-01-10 | JCaraballo113/pdf-chat | app~chat~memories~window_memory.py | from langchain.memory import ConversationBufferWindowMemory
from app.chat.memories.histories.sql_history import SqlMessageHistory
from app.chat.models import ChatArgs
def window_buffer_memory_builder(chat_args: ChatArgs) -> ConversationBufferWindowMemory:
return ConversationBufferWindowMemory(
chat_memory=SqlMessageHistory(
conversation_id=chat_args.conversation_id),
return_messages=True,
memory_key="chat_history",
output_key="answer",
k=2
)
| [] |
2024-01-10 | JCaraballo113/pdf-chat | app~chat~llms~chatopenai.py | from langchain.chat_models import ChatOpenAI
from app.chat.models import ChatArgs
from app.chat.vector_stores.pinecone import build_retriever
def build_llm(chat_args: ChatArgs, model_name: str) -> ChatOpenAI:
"""
:param chat_args: ChatArgs object containing
conversation_id, pdf_id, metadata, and streaming flag.
"""
return ChatOpenAI(streaming=chat_args.streaming, model=model_name)
| [] |
2024-01-10 | nburn42/chopper | chopper.py | # Attempt to play Chopper Command using OpenAI library
# There are 2 versions of the game
#
# 1. RAM as input (ChopperCommand-ram-v0)
# RAM of Atari 2600 consists of 128 bytes
# AI nets score higher using this as input
# 2. Screen images as input (ChopperCommand-v0)
# RGB image, array of shape (210, 160, 3)
#
# Each action is repeatedly performed for k frames,
# with k being uniformly sampled from {2,3,4}
#
# It seems that the highest scores were made using DQN,
# but not many used policy gradient methods. I will
# attempt to use policy gradient.
# Import OpenAI gym and other needed libraries
import gym
import tensorflow as tf
import numpy as np
import random
# import math
import time
# def policy_gradient():
# with tf.variable_scope("policy"):
# def value_gradient():
# with tf.variable_scope("value"):
def cnn_model(x, bn_is_training):
# Batch Norm HyperParameters
# bn_is_training = True
bn_scale = True
# We will create the model for our CNN here
# Input layer takes in 104x80x3 = 25200
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 104, 80, 3])
# print(x_image)
# Conv 3x3 box across 3 color channels into 32 features
with tf.name_scope('conv1'):
W_conv1 = weight_variable([3,3,3,32])
b_conv1 = bias_variable([32])
pre_bn_conv1 = conv2d(x_image, W_conv1) + b_conv1
post_bn_conv1 = tf.contrib.layers.batch_norm(pre_bn_conv1, center = True, scale = bn_scale, is_training = bn_is_training, scope = 'bn1')
h_conv1 = tf.nn.relu(post_bn_conv1)
# print(tf.shape(h_conv1))
# print(h_conv1) shows shape = (1,104,80,32)
# Not sure if doing this right, but adding a 2nd 3x3 filter...?
# W_conv1_2 = weight_variable([3,3,3,32])
# b_conv1_2 = bias_variable([32])
# pre_bn_conv1_2 = conv2d(x_image, W_conv1_2) + b_conv1_2
# post_bn_conv1_2 = tf.contrib.layers.batch_norm(pre_bn_conv1_2, center = True, scale = bn_scale, is_training = bn_is_training, scope = 'bn1_2')
# h_conv1_2 = tf.nn.relu(post_bn_conv1_2)
# Now, combine these two tensors? Should they be combined?
# Before or after maxpool?
# h_conv1_combined = tf.concat([h_conv1, h_conv1_2], axis_to_combine = 3)
# Max pool to half size (52x40)
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# 2nd conv, 3x3 box from 32 to 64 features
with tf.name_scope('conv2'):
W_conv2 = weight_variable([3,3,32,64])
b_conv2 = bias_variable([64])
pre_bn_conv2 = conv2d(h_pool1, W_conv2) + b_conv2
post_bn_conv2 = tf.contrib.layers.batch_norm(pre_bn_conv2, center = True, scale = bn_scale, is_training = bn_is_training, scope = 'bn2')
h_conv2 = tf.nn.relu(post_bn_conv2)
# 2nd max pool, half size again (26x20)
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# 3rd conv, 3x3 box from 64 to 128 features
with tf.name_scope('conv3'):
W_conv3 = weight_variable([3,3,64,128])
b_conv3 = bias_variable([128])
pre_bn_conv3 = conv2d(h_pool2, W_conv3) + b_conv3
post_bn_conv3 = tf.contrib.layers.batch_norm(pre_bn_conv3, center = True, scale = bn_scale, is_training = bn_is_training, scope = 'bn3')
h_conv3 = tf.nn.relu(post_bn_conv3)
# 3rd max pool, half size last time (13x10)
with tf.name_scope('pool3'):
h_pool3 = max_pool_2x2(h_conv3)
print(h_pool3)
# First fully connected layer, 13*10*128 = 16640 to 512 fully connected
with tf.name_scope('fc1'):
W_fc1 = weight_variable([13*10*128, 512])
b_fc1 = bias_variable([512])
# Flatten max pool to enter fully connected layer
h_pool3_flat = tf.reshape(h_pool3, [-1, 13*10*128])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Fully connected from 512 to 6 (1 for each action possible)
with tf.name_scope('fc2'):
W_fc2 = weight_variable([512, 6])
b_fc2 = bias_variable([6])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
# Return full stride 2d conv
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
# 2x2 max pool
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def weight_variable(shape):
# initial = tf.truncated_normal(shape, stddev=0.1)
# USE XAVIER INITIALIZATION
initial = tf.contrib.layers.xavier_initializer()
return tf.Variable(initial(shape))
def bias_variable(shape):
# initial = tf.constant(0.1, shape=shape)
# USE XAVIER INITIALIZATION
initial = tf.contrib.layers.xavier_initializer()
return tf.Variable(initial(shape))
def choose_action(moveprobs):
# Feed in probability and return an action
# Actions: up, down, left, right, shoot, nothing
# 2 5 4 3 1 0
# Fix this return after neural network is created
# return random.randint(1,6)
# sample_uniform = np.random.uniform()
# Sample uniform from tensor??? Tensor is shape (-1, 6)
# STOCHASTIC APPROACH
# Psuedocode:
# sample_uniform = np.random.uniform()
# cumulated_sum = 0
# for i in range(5)
# cumulated_sum = cumulated_sum + moveprobs[current,0]
# if sample_uniform < cumulated_sum:
# return i
# return 5
# This should return uniform distribution sampling of softmax probability
def main():
# Prepare Chopper Command as env variable
# and start access to images
env = gym.make('ChopperCommand-v0')
observation = env.reset()
# observation now holds unsigned 8 bit int array
# with shape (210, 160, 3). Let's half this for
# our neural network to allow easier processing
# by taking every other pixel
reduced_observation = observation[::2, ::2, :]
# Remove odd number from first observation
reduced_observation = reduced_observation[1:, :, :]
# reduced_observation is now shape (104,80,3)
# Confirm reduced observation shape
print("Reduced observation shape: ", reduced_observation.shape)
float_input = reduced_observation.astype(np.float32)
# reduced_observation.view('<f4')
sess = tf.InteractiveSession()
y_conv, keep_prob = cnn_model(float_input, True)
moveprobs = tf.nn.softmax(y_conv)
sess.run(tf.global_variables_initializer())
print("Keep_prob: ", keep_prob)
print("Y_Conv: ", y_conv)
# Choosing to keep colors since enemies have different
# colors. We can now feed this into our CNN.
# Blue planes and white helicoptors = enemies
# Black trucks = friendly
# Reshape our array into 4-D tensor to allow input to NN
# input_layer = tf.reshape(reduced_observation, [-1,105,80,3])
# print("Input Layer shape: ", input_layer.shape)
#env.render allows us to render the graphics
while True:
observation, reward, done, info = env.step(choose_action(moveprobs))
# print(observation)
print(reward)
# info is an object from the dict class, holding an attribute
# called ale.lives, returns number of lives we have left
# print(info['ale.lives'])
time.sleep(0.05)
env.render()
# env.step()
if done:
# Compute weight changes and backpropagate here
# Then reset the environment for another run.
env.reset()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2010.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2010
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.