date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | guozheng/hello-llm | ollama~rag_pdf.py | from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from langchain import hub
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.chains import RetrievalQA
def main():
MODEL_PROMPT_MAPPINGS = {
"llama2-uncensored": "rlm/rag-prompt-llama",
"llama2": "rlm/rag-prompt-llama",
"mistral": "rlm/rag-prompt-mistral",
}
MODEL_NAME = "llama2-uncensored"
PROMPT_NAME = MODEL_PROMPT_MAPPINGS[MODEL_NAME]
# retrieve data from PDF and split
loader = PyPDFLoader("../data/Prompt_Engineering_For_ChatGPT_A_Quick_Guide_To_Te.pdf")
all_splits = loader.load_and_split()
# print(all_splits[0])
# embed chunks and store in vectorstore
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
# retrieve
question = "What are the advantages of using a prompt engineering approach?"
# docs = vectorstore.similarity_search(question)
# print("Retrieved documents:")
# print(docs)
# QA chain
QA_CHAIN_PROMPT = hub.pull(PROMPT_NAME)
ollama = Ollama(model=MODEL_NAME, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True)
qa_chain = RetrievalQA.from_chain_type(
ollama,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
result = qa_chain({"query": question})
print("Answer:")
print(result)
# generate main function
if __name__ == "__main__":
main() | [
"{'llama2-uncensored': 'rlm/rag-prompt-llama', 'llama2': 'rlm/rag-prompt-llama', 'mistral': 'rlm/rag-prompt-mistral'}"
] |
2024-01-10 | guozheng/hello-llm | ollama~rag_web.py | from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from langchain import hub
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.chains import RetrievalQA
def main():
MODEL_PROMPT_MAPPINGS = {
"llama2-uncensored": "rlm/rag-prompt-llama",
"llama2": "rlm/rag-prompt-llama",
"mistral": "rlm/rag-prompt-mistral",
}
MODEL_NAME = "llama2-uncensored"
PROMPT_NAME = MODEL_PROMPT_MAPPINGS[MODEL_NAME]
# retrieve data from Web
# confluence page needs auth, how to address that?
# loader = WebBaseLoader("https://confluence.portal.roku.com:8443/display/DEA/RAMS+On-call+Runbook")
loader = WebBaseLoader("https://en.wikipedia.org/wiki/Fourier_transform")
data = loader.load()
# split data into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
print(all_splits[0])
# embed chunks and store in vectorstore
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
# retrieve
question = "What is the relationship with Fourier Transform and sine wave?"
docs = vectorstore.similarity_search(question)
print("Retrieved documents:")
print(docs)
# QA chain
QA_CHAIN_PROMPT = hub.pull(PROMPT_NAME)
ollama = Ollama(model=MODEL_NAME, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True)
qa_chain = RetrievalQA.from_chain_type(
ollama,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
result = qa_chain({"query": question})
print("Answer:")
print(result)
# generate main function
if __name__ == "__main__":
main() | [
"{'llama2-uncensored': 'rlm/rag-prompt-llama', 'llama2': 'rlm/rag-prompt-llama', 'mistral': 'rlm/rag-prompt-mistral'}"
] |
2024-01-10 | guozheng/hello-llm | ollama~hello.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
def main():
'''
This is a simple example of using Ollama to generate text.
You need to have local ollama installed and running.
:return:
'''
MODEL_NAME = "llama2-uncensored"
llm = Ollama(
model=MODEL_NAME, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("What is the meaning of life?")
# generate main function
if __name__ == "__main__":
main()
| [] |
2024-01-10 | danielhtoledo2002/Machine_learning_project | main2.py | # Manage enviroment variables
import os
# Tool
import pandas as pd
# web framework
import streamlit as st
# OPEN AI
from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import HumanMessage, SystemMessage
from Models2 import tesne, logistic, svg, cosine_large, svg2
from DeepLearningModels import load_CNN15k
df3 = pd.read_csv("OpenAi/amlo_clasify_chatpgt_15k.csv")
select_clas = ""
with st.sidebar:
st.write(" # Configuration")
st.write(
"We train three types of models, one that was classified by human other that chat-gpt-3.5 did with all data and the last one with only 15k with chat gpt."
)
clas = st.radio(
"Select which clasification you want to use",
["Chat gpt 15k:computer:"],
index=None,
)
select_clas = clas
if select_clas == "Chat gpt 15k:computer:":
selected = st.multiselect(
"Columns SVG 2",
svg2.clasification_rep().columns,
default=["Clasificación", "Precision"],
)
third_table3 = svg2.clasification_rep()[selected]
selected = st.multiselect(
"Columns CNN 2",
load_CNN15k.clasification_rep().columns,
default=["Clasificación", "Precision"],
)
fourth_table3 = load_CNN15k.clasification_rep()[selected]
if select_clas == "Chat gpt 15k:computer:":
st.write("# AMLO CLASIFIER")
st.write("### Number of clasification")
with st.spinner("Loadig"):
st.bar_chart(df3["classification_spanish"].value_counts(), color="#4A4646")
with st.spinner("Loading"):
st.image("word_cloud3.png", use_column_width=True)
st.write("### SVC with 15k")
with st.spinner("Loading table"):
st.dataframe(third_table3, hide_index=True, use_container_width=True)
text2 = st.text_input(
"Input text to clasify with SVG 2",
label_visibility="visible",
placeholder="Input texto to clasify ",
key="input999",
)
if st.button("Enviar", key="button999"):
if text2 != "":
proba = svg2.predict_text(text2)
st.write(svg2.predict(proba))
st.write("### CNN with 15k")
with st.spinner("Loading table"):
st.dataframe(fourth_table3, hide_index=True, use_container_width=True)
text3 = st.text_input(
"Input text to clasify with CNN 2",
label_visibility="visible",
placeholder="Input texto to clasify ",
key="input88",
)
if st.button("Enviar", key="button88"):
if text3 != "":
proba = load_CNN15k.predict_text(text3)
st.write(load_CNN15k.predict(proba))
| [] |
2024-01-10 | danielhtoledo2002/Machine_learning_project | OpenAi~classwithopenai.py | # -*- coding: utf-8 -*-
"""classwithOPENAI.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1FMsbc6GGIMlsRio4R2WrpL3OhsI9IpnT
"""
from google.colab import drive
drive.mount('/content/gdrive')
import pandas as pd
df = pd.read_csv('/content/gdrive/MyDrive/DeepLearning/amlo_complete.csv')
df.head()
df = df.head(50)
df.reset_index(inplace=True)
df
!pip install langchain
! pip install openai==0.28.1
import os
os.environ['OPENAI_API_KEY']
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name= "gpt-3.5-turbo")
from langchain.schema.messages import HumanMessage, SystemMessage
prompt='''
You are a virtual assistant that classifies only texts in Spanish. The classifications that you should take into account are:
Security: talk about security in Mexico
History: facts from the history of Mexico or the world, no comments.
Economy: Refers to the economic situation in Mexico.
Foreign: Talk about how other countries get involved with Mexico or how Mexico impacts abroad, also about migration.
Opinion: People's personal opinion on any topic, mostly opinions about history in the actual society.
Health: Health situation in Mexico.
Support: social and financial support to communities, older adults, indigenous people, etc.
Corruption: About the wrongdoings of other groups or organizations.
Opposition: Opposed to certain ideas or thoughts, even opposition or badmouthing about a person, organization or Politics People.
Construction: Infrastructure built or purchased for the betterment of Mexico.
The answer should only be one word in Spanish, not English.
'''
def clasificate_csv(text):
response = llm.invoke(
[SystemMessage(content=prompt), HumanMessage(content=text)]
)
return response.content
df["new_clasification"] = df["Texto"].apply(clasificate_csv)
print("acabo")
df.to_csv('amlo_clasify_chatpgt2.csv', index=False)
| [
"\nYou are a virtual assistant that classifies only texts in Spanish. The classifications that you should take into account are:\nSecurity: talk about security in Mexico\nHistory: facts from the history of Mexico or the world, no comments.\nEconomy: Refers to the economic situation in Mexico.\nForeign: Talk about how other countries get involved with Mexico or how Mexico impacts abroad, also about migration.\nOpinion: People's personal opinion on any topic, mostly opinions about history in the actual society.\nHealth: Health situation in Mexico.\nSupport: social and financial support to communities, older adults, indigenous people, etc.\nCorruption: About the wrongdoings of other groups or organizations.\nOpposition: Opposed to certain ideas or thoughts, even opposition or badmouthing about a person, organization or Politics People.\nConstruction: Infrastructure built or purchased for the betterment of Mexico.\n\n\nThe answer should only be one word in Spanish, not English.\n\n"
] |
2024-01-10 | jxyjason/VR_Security_with_GPT | VR_Security_with_GPT.py | import ast
import os
import openai
import xml.etree.ElementTree as ET
import subprocess
import shutil
import time
openai.api_key = ''
def ask_gpt(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# 解析回答
answer = response.choices[0].message.content
return answer
def decompilation(apk_name):
command = "java -jar apktool_2.9.0.jar d " + apk_name + " -o app"
# 运行命令行指令
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# 获取命令行输出结果
output, error = process.communicate()
# 解码输出结果和错误信息
# output = output.decode()
# error = error.decode()
# 返回输出结果和错误信息
return output, error
def analyzeOneApk(apk_src,description):
try:
# 反编译apk,输出为app文件夹
decompilation(apk_src)
# 读取xml
tree = ET.parse('app/AndroidManifest.xml')
root = tree.getroot()
xml_string = ET.tostring(root, encoding='utf-8').decode('utf-8')
# # 读取游戏介绍(用于测试,现改为直接作为形式参数输入)
# with open("description.txt", 'r', encoding='utf-8') as file:
# description = file.read()
# 初始化messages
messages = [
{"role": "system", "content": "You are an assistant with clear and concise assistant"},
{"role": "user",
"content": "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
"** All you have to do is analyze what's below: **\n" \
"\n```\n" + "Permissions used:- android.permission.INTERNET- android.permission.ACCESS_NETWORK_STATE- android.permission.VIBRATE- android.permission.SYSTEM_ALERT_WINDOW- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.WRITE_MEDIA_STORAGE- android.permission.WRITE_SETTINGS- android.permission.WRITE_SECURE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTS- android.permission.DEVICE_POWER- android.permission.RECORD_AUDIO- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.REORDER_TASKS- android.permission.CHANGE_WIFI_STATE- android.permission.ACCESS_WIFI_STATEFeatures used:- android.hardware.microphone (required: false)- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)" + "\n```\n" \
"\ndescription:\n```\n" + "一场这是一个多人联机游戏。比赛10项运动!■ 丰富的运动项目体验-棒球、射箭、乒乓球、篮球、保龄球、羽毛球、高尔夫、飞镖、台球、拳击■ 多人实时PvP-拳击、棒球和乒乓球还不受支持■ 通过高级物理实现逼真的虚拟运动体验■ 播放器定制■ 简单的用户界面和简单的控制可以帮助任何人玩■ 从初学者到专业人士有5种不同的困难*比赛形式与奥运会官方规则相同,因此你可以学习运动训练和规则。*适用于由于体育设施和天气条件而必须在室内进行的虚拟现实体育教室。" + "\n```\n"},
{"role": "assistant", "content": "no permissions and features should not be requested."},
{"role": "user",
"content": "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
"** All you have to do is analyze what's below: **\n" \
"\n```\n" + "The permissions used in the decompiled XML file are:- android.permission.INTERNET- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.ACCESS_NETWORK_STATE- android.permission.WAKE_LOCK- com.android.vending.CHECK_LICENSE- android.permission.ACCESS_WIFI_STATE- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.VIBRATE- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTSThe features used in the decompiled XML file are:- android.hardware.touchscreen- android.hardware.touchscreen.multitouch- android.hardware.touchscreen.multitouch.distinct" + "\n```\n" \
"\ndescription:\n```\n" + "启示骑士 是一个 VR 街机摩托车游戏,您必须在高速荒地公路上占优势,避免敌对交通,并继续生存,飞速行驶和骑行!特点 启示录骑士 - VR自行车赛车游戏 Android的:•20级纯VR肾上腺素•5辆摩托车可提供数十种升级•完整的虚拟现实环境•游戏手柄和运动控制器支持。" + "\n```\n"},
{"role": "assistant", "content": "no permissions and features should not be requested."},
{"role": "user",
"content": "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
"** All you have to do is analyze what's below: **\n" \
"\n```\n" + "- android.permission.ACCESS_NETWORK_STATE- android.permission.INTERNET- android.permission.VIBRATE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTS- android.permission.CHANGE_WIFI_MULTICAST_STATE- android.permission.ACCESS_FINE_LOCATION android.permission.CAMERA- android.permission.RECORD_AUDIO- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.WAKE_LOCK- android.permission.REORDER_TASKS android.permission.CHANGE_WIFI_STATE-android.permission.ACCESS_WIFI_STATE- android.permission.CHANGE_NETWORK_STATEThe features used in the decompiled XML file are:- android.hardware.location.gps (required: false)- android.hardware.location (required: false)- android.hardware.camera (required: false)- android.hardware.camera.autofocus (required: false)- android.hardware.camera.front (required: false)- android.hardware.microphone (required: false)- android.hardware.sensor.accelerometer (required: false)- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)" + "\n```\n" \
"\ndescription:\n```\n" + "【游戏名称】:Space Fitness【游戏类型】:运动【游戏平台】:pico【游戏模式】:原生VR游戏(定位控制器)【游戏语言】:多国语言游戏容量】:143MB【游戏介绍】:关于这款游戏在太空中,飞船突发意外,你需要几岁各式各样的陨石来通过不同挑战。《太空健身计划》集健身与游戏于一体,在这个广阔的太空中,你可以享受无限挑战乐趣。" + "\n```\n"},
{"role": "assistant",
"content": "android.permission.CAMERA\nandroid.permission.ACCESS_FINE_LOCATION \n- android.hardware.location.gps (required: false)- android.hardware.location (required: false)- android.hardware.camera (required: false)- android.hardware.camera.autofocus (required: false)- android.hardware.camera.front (required: false)- android.hardware.microphone \nandroid.permission.RECORD_AUDIO\n"},
{"role": "user",
"content": "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
"** All you have to do is analyze what's below: **\n" \
"\n```\n" + "Permissions used:- android.permission.ACCESS_NETWORK_STATE- android.permission.INTERNET- android.permission.CHANGE_WIFI_MULTICAST_STATE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.READ_EXTERNAL_STORAGE- android.permission.REORDER_TASKS- android.permission.CHANGE_WIFI_STATE- android.permission.ACCESS_WIFI_STATEFeatures used:- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)" + "\n```\n" \
"\ndescription:\n```\n" + "这个游戏是一款模拟经营一家店的游戏,不过我不是店长。而是打杂的实习生啊!来了顾客要了解他想吃什么,然后自己做出来给顾客吃,手忙脚乱的感觉有木有,终于知道饭店实习生是有多么的辛苦了。还要定期打扫卫生,还有大细菌boss挑战,体验忙碌的一天。这个游戏让我想到了童年时光,记得那时候没有什么玩具,和小伙伴们用泥巴石头还有草树叶什么的当食物,一起玩过家家的游戏。哈哈,这是一款不错的游戏价格也便宜。" + "\n```\n"},
{"role": "assistant", "content": "no permissions and features should not be requested."},
# {"role": "user",
# "content": "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
# "** All you have to do is analyze what's below: **\n" \
# "\n```\n" + "The permissions used in the decompiled XML file are:- android.permission.INTERNET- android.permission.CHANGE_WIFI_MULTICAST_STATE- android.permission.RECORD_AUDIO- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.BLUETOOTH- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.REORDER_TASKS- android.permission.CHANGE_WIFI_STATE android.permission.ACCESS_NETWORK_STATE- android.permission.ACCESS_WIFI_STATE- android.permission.READ_EXTERNAL_STORAGEThe features used in the decompiled XML file are:- android.hardware.microphone (required: false)- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)" + "\n```\n" \
# "\ndescription:\n```\n" + "Cave Digger」游戏背景设定在一个充斥着黑色幽默元素的另类西方世界,玩家将扮演一名掘金者深入地下矿井挖掘宝藏,并不断升级或解锁挖掘工具以提高挖掘效率;续作「Cave Digger 2:Dig Harder」沿用了「Cave Digger」背景,并添加了包括4人合作模式和更豪华柴油朋克世界在内的更多内容,玩家将继续在游戏中挖掘宝藏以及探究隐藏秘密。" + "\n```\n"},
# {"role": "assistant", "content": "no permissions and features should not be requested."},
]
# 提问xml文件
question = "Analyze which permissions and features are used by the decompiled xml file below (only need permissions and features):\n"+xml_string
messages.append({"role": "user", "content": question})
answer = ask_gpt(messages)
xmlPermissionAndFeature = answer
print(answer)
# 附加回答信息
# messages.append({"role": "assistant", "content": answer})
# print("--------------------------------------------------\n")
# # 提问游戏介绍
# question = "Take a look at permissions and features that might be used for this game, as described below:\n"+description
# messages.append({"role": "user", "content": question})
# answer = ask_gpt(messages)
# print(answer)
# # 附加回答信息
# messages.append({"role": "assistant", "content": answer})
# print("--------------------------------------------------\n")
for i in range(21):
time.sleep(1)
print("sleep:" + str(i + 1) + "s")
# 重新开一个提问进行二阶提问,针对问题描述和已经提取好的xml文件
messages = [
{"role": "system", "content": "You are an assistant with clear and concise assistant"},
]
# 提问哪些不该用
question = "Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n" \
"** All you have to do is analyze what's below: **\n" \
"\n```\n"+xmlPermissionAndFeature+"\n```\n" \
"\ndescription:\n```\n" + description + "\n```\n"
print(question)
messages.append({"role": "user", "content": question})
answer = ask_gpt(messages)
print(answer)
# 删除反编译文件夹
shutil.rmtree("app")
return answer,xml_string
except Exception as e:
if os.path.exists("app") and os.path.isdir("app"):
shutil.rmtree("app")
print(f"Error occurred: {str(e)}")
if __name__ == "__main__":
# apk_src = "D:\\ict\\My_Project\\Security\\reverse\\reverse_app\\gpt_app\\9月的晚餐_SepsDiner_8592_19.apk"
apk_src = "D:\\ict\\My_Project\\Security\\reverse\\reverse_app\\gpt_app\\9月的晚餐_SepsDiner_8592_19.apk"
description = "欢迎来到 Sep’s Diner,这是一家由您担任主厨的新汉堡餐厅!它真的会成为镇上最好的汉堡吗?现在就看你了!一层又一层,您的体验将增加,美食客户的数量也会增加。他们很匆忙,所以在他们离开之前尽快满足他们!饥饿和不耐烦,他们不会让您犯错误…… 细心和精确,以获得最大的利润!包括全新的甜点餐厅Sep’s Donut!游戏特色:· 2间餐厅· 包括 3 种游戏模式:定时、轻松、多人· 每家餐厅包括 27 个级别(定时/休闲 12 个,多人游戏 3 个)· 最多 4 名玩家的多人合作游戏· 紧张刺激的关卡!· 身临其境的音频氛围· 不耐烦的客户用有趣的声音· 美丽的风景和彩灯· 逐级增加难度· 超过 30 种不同的汉堡食谱组合!· 煎饼、甜甜圈、华夫饼、纸杯蛋糕、冰淇淋和奶昔!"
analyzeOneApk(apk_src,description)
| [
"Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n** All you have to do is analyze what's below: **\n\n```\n- android.permission.ACCESS_NETWORK_STATE- android.permission.INTERNET- android.permission.VIBRATE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTS- android.permission.CHANGE_WIFI_MULTICAST_STATE- android.permission.ACCESS_FINE_LOCATION android.permission.CAMERA- android.permission.RECORD_AUDIO- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.WAKE_LOCK- android.permission.REORDER_TASKS android.permission.CHANGE_WIFI_STATE-android.permission.ACCESS_WIFI_STATE- android.permission.CHANGE_NETWORK_STATEThe features used in the decompiled XML file are:- android.hardware.location.gps (required: false)- android.hardware.location (required: false)- android.hardware.camera (required: false)- android.hardware.camera.autofocus (required: false)- android.hardware.camera.front (required: false)- android.hardware.microphone (required: false)- android.hardware.sensor.accelerometer (required: false)- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)\n```\n\ndescription:\n```\n【游戏名称】:Space Fitness【游戏类型】:运动【游戏平台】:pico【游戏模式】:原生VR游戏(定位控制器)【游戏语言】:多国语言游戏容量】:143MB【游戏介绍】:关于这款游戏在太空中,飞船突发意外,你需要几岁各式各样的陨石来通过不同挑战。《太空健身计划》集健身与游戏于一体,在这个广阔的太空中,你可以享受无限挑战乐趣。\n```\n",
"Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n** All you have to do is analyze what's below: **\n\n```\nPermissions used:- android.permission.ACCESS_NETWORK_STATE- android.permission.INTERNET- android.permission.CHANGE_WIFI_MULTICAST_STATE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.READ_EXTERNAL_STORAGE- android.permission.REORDER_TASKS- android.permission.CHANGE_WIFI_STATE- android.permission.ACCESS_WIFI_STATEFeatures used:- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)\n```\n\ndescription:\n```\n这个游戏是一款模拟经营一家店的游戏,不过我不是店长。而是打杂的实习生啊!来了顾客要了解他想吃什么,然后自己做出来给顾客吃,手忙脚乱的感觉有木有,终于知道饭店实习生是有多么的辛苦了。还要定期打扫卫生,还有大细菌boss挑战,体验忙碌的一天。这个游戏让我想到了童年时光,记得那时候没有什么玩具,和小伙伴们用泥巴石头还有草树叶什么的当食物,一起玩过家家的游戏。哈哈,这是一款不错的游戏价格也便宜。\n```\n",
"android.permission.CAMERA\nandroid.permission.ACCESS_FINE_LOCATION \n- android.hardware.location.gps (required: false)- android.hardware.location (required: false)- android.hardware.camera (required: false)- android.hardware.camera.autofocus (required: false)- android.hardware.camera.front (required: false)- android.hardware.microphone \nandroid.permission.RECORD_AUDIO\n",
"Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n** All you have to do is analyze what's below: **\n\n```\nPermissions used:- android.permission.INTERNET- android.permission.ACCESS_NETWORK_STATE- android.permission.VIBRATE- android.permission.SYSTEM_ALERT_WINDOW- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.WRITE_MEDIA_STORAGE- android.permission.WRITE_SETTINGS- android.permission.WRITE_SECURE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTS- android.permission.DEVICE_POWER- android.permission.RECORD_AUDIO- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.REORDER_TASKS- android.permission.CHANGE_WIFI_STATE- android.permission.ACCESS_WIFI_STATEFeatures used:- android.hardware.microphone (required: false)- android.hardware.touchscreen (required: false)- android.hardware.touchscreen.multitouch (required: false)- android.hardware.touchscreen.multitouch.distinct (required: false)\n```\n\ndescription:\n```\n一场这是一个多人联机游戏。比赛10项运动!■ 丰富的运动项目体验-棒球、射箭、乒乓球、篮球、保龄球、羽毛球、高尔夫、飞镖、台球、拳击■ 多人实时PvP-拳击、棒球和乒乓球还不受支持■ 通过高级物理实现逼真的虚拟运动体验■ 播放器定制■ 简单的用户界面和简单的控制可以帮助任何人玩■ 从初学者到专业人士有5种不同的困难*比赛形式与奥运会官方规则相同,因此你可以学习运动训练和规则。*适用于由于体育设施和天气条件而必须在室内进行的虚拟现实体育教室。\n```\n",
"Based on the Permissions And Features and game description, which sensitive permissions and features should not be requested (Simply answer the permissions and features mentioned in the question). \n** All you have to do is analyze what's below: **\n\n```\nThe permissions used in the decompiled XML file are:- android.permission.INTERNET- android.permission.WRITE_EXTERNAL_STORAGE- android.permission.ACCESS_NETWORK_STATE- android.permission.WAKE_LOCK- com.android.vending.CHECK_LICENSE- android.permission.ACCESS_WIFI_STATE- android.permission.MODIFY_AUDIO_SETTINGS- android.permission.VIBRATE- android.permission.READ_EXTERNAL_STORAGE- android.permission.WRITE_SETTINGS- android.permission.CHANGE_CONFIGURATION- android.permission.BLUETOOTH- android.permission.BLUETOOTH_ADMIN- android.permission.INJECT_EVENTSThe features used in the decompiled XML file are:- android.hardware.touchscreen- android.hardware.touchscreen.multitouch- android.hardware.touchscreen.multitouch.distinct\n```\n\ndescription:\n```\n启示骑士 是一个 VR 街机摩托车游戏,您必须在高速荒地公路上占优势,避免敌对交通,并继续生存,飞速行驶和骑行!特点 启示录骑士 - VR自行车赛车游戏 Android的:•20级纯VR肾上腺素•5辆摩托车可提供数十种升级•完整的虚拟现实环境•游戏手柄和运动控制器支持。\n```\n",
"You are an assistant with clear and concise assistant",
"no permissions and features should not be requested."
] |
2024-01-10 | atsuyamaru/wp-update-paraphrase-ja | post_wp_thumbnail_ja_pixabay.py | import json
import os
import requests
from PIL import Image
import deepl
import openai
from func_package.deepl_func import translate_2en
from func_package.pixabay_image import (
check_result_is_0,
extract_image_url,
search_pixabay,
)
from func_package.openai_func import extract_keyword, synonym_keyword
from func_package.wp_api_func import update_api_access
## 認証まわり
# Pixabay
pixabay_url = "https://pixabay.com/api/"
pixabay_api_key = os.environ["PIXABAY_API_KEY"]
# DeepLの認証とtranslatorオブジェクトの生成
deepl_api_key = os.environ["DEEPL_API_KEY"]
translator = deepl.Translator(deepl_api_key)
# OPEN AIオブジェクトに認証キー情報をもたせる
openai.api_key = os.environ["OPENAI_API_KEY"]
# WordPressのログイン情報を読み込み
with open("wp_login_info.json", "r") as f:
wp_login_info = json.load(f)
username = wp_login_info["username"]
password = wp_login_info["password"]
wp_root_url = wp_login_info["wp_root_url"]
## 日本語タイトルから英語キーワード抽出: DeepL&OpenAI
with open("./wp-post-ids.txt") as f:
post_ids = f.read()
post_ids_list = post_ids.split(" ")
# 途中から開始(任意のリスト番号に変更)
# post_ids_list = post_ids_list[10:]
# 投稿IDをループで取得し実行
for post_id in post_ids_list:
with open(f"./original_ja_title/{post_id}") as f:
original_title = f.read()
# DeepLで翻訳を実行
en_title = translate_2en(translator, original_title)
# 代表キーワードを抽出: OpenAI
keyword = extract_keyword(openai, en_title)
## 画像の検索・取得
# Pixabay: キーワード検索
# 代表キーワード1語で検索
response = search_pixabay(requests, pixabay_api_key, keyword)
keyword_result_is_0 = check_result_is_0(response)
if keyword_result_is_0:
# Open AIで類義語リストの生成
response_words = synonym_keyword(openai, keyword)
synonym_list = response_words.lstrip("[,'").rstrip("']").split("', '")
# 類義語リストでキーワード検索
for synonym in synonym_list:
response = search_pixabay(requests, pixabay_api_key, synonym)
# 検索結果の判定
synonym_result_is_0 = check_result_is_0(response)
if synonym_result_is_0:
continue
else:
image_url = extract_image_url(response)
break
else:
print("Cannot detect images in any synonyms.")
else:
image_url = extract_image_url(response)
## 画像の保存・変換
# 指定画像をバイナリ形式でローカルにDL
response = requests.get(image_url)
if response.status_code == 200:
with open(f"./pixabay_images_binary/{post_id}", "wb") as f:
f.write(response.content)
## 保存したバイナリファイルをPNGに変換
image = Image.open(f"./pixabay_images_binary/{post_id}")
# RGBAイメージをRGBイメージに変換
rgb_image = image.convert("RGB")
# pngで保存
rgb_image.save(f"./pixabay_images_png/{post_id}.png", format="PNG")
### WordPress投稿へ画像をアップロード
## メディアライブラリに画像をアップロード
# png画像ファイルの読み込み
with open(f"./pixabay_images_png/{post_id}.png", "rb") as f:
img_data = f.read()
headers = {
"Content-Disposition": f"attachment; filename={post_id}.png",
"Content-Type": "image/png",
}
# メディアライブラリへのアップロード実行
url_media = f"{wp_root_url}/wp-json/wp/v2/media"
media_response = requests.post(
url_media, auth=(username, password), headers=headers, data=img_data
)
## アップロードしたファイルと投稿サムネイルを紐付け
update_url = f"{wp_root_url}/wp-json/wp/v2/posts/{post_id}"
media_dict = media_response.json()
post_data = {"featured_media": media_dict["id"]}
# 紐付けの実行(update)
post_dict = update_api_access(requests, update_url, username, password, post_data)
# 実行結果を出力
print(
f"Success! Thumbnail updated.\nPost ID: {post_dict['id']}; URL: {post_dict['link']}\nTitle: {post_dict['title']['rendered']}\n------"
)
| [] |
2024-01-10 | atsuyamaru/wp-update-paraphrase-ja | post_wp_paraphrased_ja2ja.py | import json
import os
import re
import requests
import time
from bs4 import BeautifulSoup
import deepl
import openai
from func_package.deepl_func import translate_2en, translate_2ja
from func_package.extract_text import (
extract_latter_half,
extract_first_thirds,
extract_middle_thirds,
extract_last_thirds,
)
from func_package.openai_func import (
paraphrase_en,
format_html,
write_continue,
paraphrase_title,
)
from func_package.wp_api_func import update_with_html
### 認証まわり
# DeepLの認証とtranslatorオブジェクトの生成
deepl_api_key = os.environ["DEEPL_API_KEY"]
translator = deepl.Translator(deepl_api_key)
# OPEN AIオブジェクトに認証キー情報をもたせる
openai.api_key = os.environ["OPENAI_API_KEY"]
# WordPressのログイン情報を読み込み
with open("wp_login_info.json", "r") as f:
wp_login_info = json.load(f)
username = wp_login_info["username"]
password = wp_login_info["password"]
wp_root_url = wp_login_info["wp_root_url"]
### パラフレーズされたテキストの作成
# 投稿IDをループで取得し、投稿IDのリストを作成
with open("./wp-post-ids.txt") as f:
post_ids = f.read()
post_ids_list = post_ids.split(" ")
# 途中から開始(任意のリスト番号に変更)
# post_ids_list = post_ids_list[10:]
# パラフレーズ〜WordPressへの更新を全記事に対してパラフレーズを実行
for post_id in post_ids_list:
# DeepLで日本語から英語に翻訳
with open(f"original_ja_contents/{post_id}") as f:
content_text = f.read()
translated_en = translate_2en(translator, content_text)
# 3分割して、OPEN AIで英語文章をパラフレーズ
translated_en_1 = extract_first_thirds(translated_en)
translated_en_2 = extract_middle_thirds(translated_en)
translated_en_3 = extract_last_thirds(translated_en)
paraphrased_text_1 = paraphrase_en(openai, translated_en_1)
time.sleep(5)
paraphrased_text_2 = paraphrase_en(openai, translated_en_2)
time.sleep(5)
paraphrased_text_3 = paraphrase_en(openai, translated_en_3)
time.sleep(4)
# OPEN AIで続きの文章を生成
last_part = extract_latter_half(paraphrased_text_3)
continue_text = write_continue(openai, last_part)
# DeepLでそれぞれを英語から日本語へ再翻訳: 空白文が渡るとValueError
retranslated_ja_1 = translate_2ja(translator, paraphrased_text_1)
time.sleep(3)
retranslated_ja_2 = translate_2ja(translator, paraphrased_text_2)
time.sleep(3)
retranslated_ja_3 = translate_2ja(translator, paraphrased_text_3)
time.sleep(3)
if len(continue_text) > 20:
retranslated_ja_4 = translate_2ja(translator, continue_text)
# それぞれを3分割し、OpenAIでHTML形式のテキストへフォーマット
response_html_1 = format_html(openai, extract_first_thirds(retranslated_ja_1))
time.sleep(4)
response_html_2 = format_html(openai, extract_middle_thirds(retranslated_ja_1))
time.sleep(4)
response_html_3 = format_html(openai, extract_last_thirds(retranslated_ja_1))
time.sleep(4)
response_html_4 = format_html(openai, extract_first_thirds(retranslated_ja_2))
time.sleep(4)
response_html_5 = format_html(openai, extract_middle_thirds(retranslated_ja_2))
time.sleep(4)
response_html_6 = format_html(openai, extract_last_thirds(retranslated_ja_2))
time.sleep(4)
response_html_7 = format_html(openai, extract_first_thirds(retranslated_ja_3))
time.sleep(4)
response_html_8 = format_html(openai, extract_middle_thirds(retranslated_ja_3))
time.sleep(4)
response_html_9 = format_html(openai, extract_last_thirds(retranslated_ja_3))
if retranslated_ja_4:
time.sleep(4)
response_html_10 = format_html(openai, extract_first_thirds(retranslated_ja_4))
time.sleep(4)
response_html_11 = format_html(openai, extract_middle_thirds(retranslated_ja_4))
time.sleep(4)
response_html_12 = format_html(openai, extract_last_thirds(retranslated_ja_4))
# 生成されたHTMLテキストをすべて連結
if retranslated_ja_4:
response_html_whole = (
response_html_1
+ "<br>"
+ response_html_2
+ "<br>"
+ response_html_3
+ "<br>"
+ response_html_4
+ "<br>"
+ response_html_5
+ "<br>"
+ response_html_6
+ "<br>"
+ response_html_7
+ "<br>"
+ response_html_8
+ "<br>"
+ response_html_9
+ "<br>"
+ response_html_10
+ "<br>"
+ response_html_11
+ "<br>"
+ response_html_12
)
else:
response_html_whole = (
response_html_1
+ "<br>"
+ response_html_2
+ "<br>"
+ response_html_3
+ "<br>"
+ response_html_4
+ "<br>"
+ response_html_5
+ "<br>"
+ response_html_6
+ "<br>"
+ response_html_7
+ "<br>"
+ response_html_8
+ "<br>"
+ response_html_9
)
# OpenAIでオリジナル日本語タイトルからタイトルを生成
with open(f"./original_ja_title/{post_id}") as f:
title_original = f.read()
title_created = paraphrase_title(openai, title_original)
### WordPressへのUpdateを実行
# エンドポイントを定義
api_update_url = f"{wp_root_url}/wp-json/wp/v2/posts/{post_id}"
## Updateの実行: 公開状態に
json_html_body = {
"title": title_created,
"content": response_html_whole,
"status": "publish",
}
returned_post_obj = update_with_html(
requests, api_update_url, username, password, json_html_body
)
# 実行結果を出力
print(
f"Success! Post updated.\nPost ID: {returned_post_obj['id']}; URL: {returned_post_obj['link']}\nTitle: {returned_post_obj['title']['rendered']}\n------"
)
| [] |
2024-01-10 | dmMaze/BallonsTranslator | modules~translators~trans_chatgpt.py | # stealt & modified from https://github.com/zyddnys/manga-image-translator/blob/main/manga_translator/translators/chatgpt.py
import re
import time
from typing import List, Dict, Union
import yaml
import openai
from .base import BaseTranslator, register_translator
OPENAPI_V1_API = int(openai.__version__.split('.')[0]) >= 1
class InvalidNumTranslations(Exception):
pass
@register_translator('ChatGPT')
class GPTTranslator(BaseTranslator):
concate_text = False
cht_require_convert = True
params: Dict = {
'api key': '',
'model': {
'type': 'selector',
'options': [
'gpt3',
'gpt35-turbo',
'gpt4',
],
'select': 'gpt35-turbo'
},
'override model': '',
'prompt template': {
'type': 'editor',
'content': 'Please help me to translate the following text from a manga to {to_lang} (if it\'s already in {to_lang} or looks like gibberish you have to output it as it is instead):\n',
},
'chat system template': {
'type': 'editor',
'content': 'You are a professional translation engine, please translate the text into a colloquial, elegant and fluent content, without referencing machine translations. You must only translate the text content, never interpret it. If there\'s any issue in the text, output the text as is.\nTranslate to {to_lang}.',
},
'chat sample': {
'type': 'editor',
'content':
'''日本語-简体中文:
source:
- 二人のちゅーを 目撃した ぼっちちゃん
- ふたりさん
- 大好きなお友達には あいさつ代わりに ちゅーするんだって
- アイス あげた
- 喜多ちゃんとは どどど どういった ご関係なのでしようか...
- テレビで見た!
target:
- 小孤独目击了两人的接吻
- 二里酱
- 我听说人们会把亲吻作为与喜爱的朋友打招呼的方式
- 我给了她冰激凌
- 喜多酱和你是怎么样的关系啊...
- 我在电视上看到的!'''
},
'invalid repeat count': 2,
'max requests per minute': 20,
'delay': 0.3,
'max tokens': 4096,
'temperature': 0.5,
'top p': 1,
# 'return prompt': False,
'retry attempts': 5,
'retry timeout': 15,
'3rd party api url': ''
}
def _setup_translator(self):
self.lang_map['简体中文'] = 'Simplified Chinese'
self.lang_map['繁體中文'] = 'Traditional Chinese'
self.lang_map['日本語'] = 'Japanese'
self.lang_map['English'] = 'English'
self.lang_map['한국어'] = 'Korean'
self.lang_map['Tiếng Việt'] = 'Vietnamese'
self.lang_map['čeština'] = 'Czech'
self.lang_map['Français'] = 'French'
self.lang_map['Deutsch'] = 'German'
self.lang_map['magyar nyelv'] = 'Hungarian'
self.lang_map['Italiano'] = 'Italian'
self.lang_map['Polski'] = 'Polish'
self.lang_map['Português'] = 'Portuguese'
self.lang_map['limba română'] = 'Romanian'
self.lang_map['русский язык'] = 'Russian'
self.lang_map['Español'] = 'Spanish'
self.lang_map['Türk dili'] = 'Turkish'
self.lang_map['украї́нська мо́ва'] = 'Ukrainian'
self.token_count = 0
self.token_count_last = 0
@property
def model(self) -> str:
return self.params['model']['select']
@property
def temperature(self) -> float:
return float(self.params['temperature'])
@property
def max_tokens(self) -> int:
return int(self.params['max tokens'])
@property
def top_p(self) -> int:
return int(self.params['top p'])
@property
def retry_attempts(self) -> int:
return int(self.params['retry attempts'])
@property
def retry_timeout(self) -> int:
return int(self.params['retry timeout'])
@property
def chat_system_template(self) -> str:
to_lang = self.lang_map[self.lang_target]
return self.params['chat system template']['content'].format(to_lang=to_lang)
@property
def chat_sample(self):
if self.model == 'gpt3':
return None
samples = self.params['chat sample']['content']
try:
samples = yaml.load(self.params['chat sample']['content'], Loader=yaml.FullLoader)
except:
self.logger.error(f'failed to load parse sample: {samples}')
samples = {}
src_tgt = self.lang_source + '-' + self.lang_target
if src_tgt in samples:
src_list = samples[src_tgt]['source']
tgt_list = samples[src_tgt]['target']
src_queries = ''
tgt_queries = ''
for i, (src, tgt) in enumerate(zip(src_list, tgt_list)):
src_queries += f'\n<|{i+1}|>{src}'
tgt_queries += f'\n<|{i+1}|>{tgt}'
src_queries = src_queries.lstrip()
tgt_queries = tgt_queries.lstrip()
return [src_queries, tgt_queries]
else:
return None
def _assemble_prompts(self, queries: List[str], from_lang: str = None, to_lang: str = None, max_tokens = None) -> List[str]:
if from_lang is None:
from_lang = self.lang_map[self.lang_source]
if to_lang is None:
to_lang = self.lang_map[self.lang_target]
prompt = ''
if max_tokens is None:
max_tokens = self.max_tokens
# return_prompt = self.params['return prompt']
prompt_template = self.params['prompt template']['content'].format(to_lang=to_lang).rstrip()
prompt += prompt_template
i_offset = 0
num_src = 0
for i, query in enumerate(queries):
prompt += f'\n<|{i+1-i_offset}|>{query}'
num_src += 1
# If prompt is growing too large and theres still a lot of text left
# split off the rest of the queries into new prompts.
# 1 token = ~4 characters according to https://platform.openai.com/tokenizer
# TODO: potentially add summarizations from special requests as context information
if max_tokens * 2 and len(''.join(queries[i+1:])) > max_tokens:
# if return_prompt:
# prompt += '\n<|1|>'
yield prompt.lstrip(), num_src
prompt = prompt_template
# Restart counting at 1
i_offset = i + 1
num_src = 0
# if return_prompt:
# prompt += '\n<|1|>'
yield prompt.lstrip(), num_src
def _format_prompt_log(self, to_lang: str, prompt: str) -> str:
chat_sample = self.chat_sample
if self.model != 'gpt3' and chat_sample is not None:
return '\n'.join([
'System:',
self.chat_system_template,
'User:',
chat_sample[0],
'Assistant:',
chat_sample[1],
'User:',
prompt,
])
else:
return '\n'.join([
'System:',
self.chat_system_template,
'User:',
prompt,
])
def _translate(self, src_list: List[str]) -> List[str]:
translations = []
# self.logger.debug(f'Temperature: {self.temperature}, TopP: {self.top_p}')
from_lang = self.lang_map[self.lang_source]
to_lang = self.lang_map[self.lang_target]
queries = src_list
# return_prompt = self.params['return prompt']
chat_sample = self.chat_sample
for prompt, num_src in self._assemble_prompts(queries, from_lang, to_lang):
retry_attempt = 0
while True:
try:
response = self._request_translation(prompt, chat_sample)
new_translations = re.split(r'<\|\d+\|>', response)[-num_src:]
if len(new_translations) != num_src:
raise InvalidNumTranslations
break
except InvalidNumTranslations:
retry_attempt += 1
message = f'number of translations does not match to source:\nprompt:\n {prompt}\ntranslations:\n {new_translations}\nopenai response:\n {response}'
if retry_attempt >= self.retry_attempts:
self.logger.error(message)
new_translations = [''] * num_src
break
self.logger.warn(message + '\n' + f'Restarting request. Attempt: {retry_attempt}')
except Exception as e:
retry_attempt += 1
if retry_attempt >= self.retry_attempts:
new_translations = [''] * num_src
break
self.logger.warn(f'Translation failed due to {e}. Attempt: {retry_attempt}, sleep for {self.retry_timeout} secs...')
time.sleep(self.retry_timeout)
# time.sleep(self.retry_timeout)
# if return_prompt:
# new_translations = new_translations[:-1]
# if chat_sample is not None:
# new_translations = new_translations[1:]
translations.extend([t.strip() for t in new_translations])
if self.token_count_last:
self.logger.info(f'Used {self.token_count_last} tokens (Total: {self.token_count})')
return translations
def _request_translation_gpt3(self, prompt: str) -> str:
if OPENAPI_V1_API:
openai_completions_create = openai.completions.create
else:
openai_completions_create = openai.Completion.create
response = openai_completions_create(
model='text-davinci-003',
prompt=prompt,
max_tokens=self.max_tokens // 2, # Assuming that half of the tokens are used for the query
temperature=self.temperature,
top_p=self.top_p,
)
if OPENAPI_V1_API:
self.token_count += response.usage.total_tokens
self.token_count_last = response.usage.total_tokens
else:
self.token_count += response.usage['total_tokens']
self.token_count_last = response.usage['total_tokens']
return response.choices[0].text
def _request_translation_with_chat_sample(self, prompt: str, model: str, chat_sample: List) -> str:
messages = [
{'role': 'system', 'content': self.chat_system_template},
{'role': 'user', 'content': prompt},
]
if chat_sample is not None:
messages.insert(1, {'role': 'user', 'content': chat_sample[0]})
messages.insert(2, {'role': 'assistant', 'content': chat_sample[1]})
if OPENAPI_V1_API:
openai_chatcompletions_create = openai.chat.completions.create
else:
openai_chatcompletions_create = openai.ChatCompletion.create
response = openai_chatcompletions_create(
model=model,
messages=messages,
max_tokens=self.max_tokens // 2,
temperature=self.temperature,
top_p=self.top_p,
)
if OPENAPI_V1_API:
self.token_count += response.usage.total_tokens
self.token_count_last = response.usage.total_tokens
else:
self.token_count += response.usage['total_tokens']
self.token_count_last = response.usage['total_tokens']
for choice in response.choices:
if OPENAPI_V1_API:
return choice.message.content
else:
if 'text' in choice:
return choice.text
# If no response with text is found, return the first response's content (which may be empty)
return response.choices[0].message.content
@property
def api_url(self):
url = self.params['3rd party api url'].strip()
if not url:
return None
return url
def _request_translation(self, prompt, chat_sample: List):
openai.api_key = self.params['api key']
base_url = self.api_url
if OPENAPI_V1_API:
openai.base_url = base_url
else:
if base_url is None:
base_url = 'https://api.openai.com/v1'
openai.api_base = base_url
override_model = self.params['override model'].strip()
if override_model != '':
model: str = override_model
else:
model:str = self.model
if model == 'gpt3':
return self._request_translation_gpt3(prompt)
elif model == 'gpt35-turbo':
model = 'gpt-3.5-turbo'
elif model == 'gpt4':
model = 'gpt-4'
return self._request_translation_with_chat_sample(prompt, model, chat_sample) | [
"prompt template",
"Please help me to translate the following text from a manga to {to_lang} (if it's already in {to_lang} or looks like gibberish you have to output it as it is instead):\n",
"日本語-简体中文:\n source:\n - 二人のちゅーを 目撃した ぼっちちゃん\n - ふたりさん\n - 大好きなお友達には あいさつ代わりに ちゅーするんだって\n - アイス あげた\n - 喜多ちゃんとは どどど どういった ご関係なのでしようか...\n - テレビで見た!\n target:\n - 小孤独目击了两人的接吻\n - 二里酱\n - 我听说人们会把亲吻作为与喜爱的朋友打招呼的方式\n - 我给了她冰激凌\n - 喜多酱和你是怎么样的关系啊...\n - 我在电视上看到的!",
"content",
"You are a professional translation engine, please translate the text into a colloquial, elegant and fluent content, without referencing machine translations. You must only translate the text content, never interpret it. If there's any issue in the text, output the text as is.\nTranslate to {to_lang}."
] |
2024-01-10 | lewtun/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~account_connection_params_request.py | # This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import typing
import typing_extensions
from .open_ai_account_connection_params import OpenAiAccountConnectionParams
from .twilio_account_connection_params import TwilioAccountConnectionParams
class AccountConnectionParamsRequest_AccountConnectionTwilio(TwilioAccountConnectionParams):
type: typing_extensions.Literal["account_connection_twilio"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
class AccountConnectionParamsRequest_AccountConnectionOpenai(OpenAiAccountConnectionParams):
type: typing_extensions.Literal["account_connection_openai"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
AccountConnectionParamsRequest = typing.Union[
AccountConnectionParamsRequest_AccountConnectionTwilio, AccountConnectionParamsRequest_AccountConnectionOpenai
]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~open_ai_account_connection_params.py | # This file was auto-generated by Fern from our API Definition.
import datetime as dt
import typing
import pydantic
from ..core.datetime_utils import serialize_datetime
from .open_ai_credentials import OpenAiCredentials
class OpenAiAccountConnectionParams(pydantic.BaseModel):
credentials: OpenAiCredentials
def json(self, **kwargs: typing.Any) -> str:
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
return super().json(**kwargs_with_defaults)
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
return super().dict(**kwargs_with_defaults)
class Config:
frozen = True
smart_union = True
json_encoders = {dt.datetime: serialize_datetime}
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~account_connection_update_params_request.py | # This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import typing
import typing_extensions
from .open_ai_account_connection_update_params import OpenAiAccountConnectionUpdateParams
from .twilio_account_connection_update_params import TwilioAccountConnectionUpdateParams
class AccountConnectionUpdateParamsRequest_AccountConnectionTwilio(TwilioAccountConnectionUpdateParams):
type: typing_extensions.Literal["account_connection_twilio"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
class AccountConnectionUpdateParamsRequest_AccountConnectionOpenai(OpenAiAccountConnectionUpdateParams):
type: typing_extensions.Literal["account_connection_openai"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
AccountConnectionUpdateParamsRequest = typing.Union[
AccountConnectionUpdateParamsRequest_AccountConnectionTwilio,
AccountConnectionUpdateParamsRequest_AccountConnectionOpenai,
]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~account_connection_page_items_item.py | # This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import typing
import typing_extensions
from .open_ai_account_connection import OpenAiAccountConnection
from .twilio_account_connection import TwilioAccountConnection
class AccountConnectionPageItemsItem_AccountConnectionTwilio(TwilioAccountConnection):
type: typing_extensions.Literal["account_connection_twilio"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
class AccountConnectionPageItemsItem_AccountConnectionOpenai(OpenAiAccountConnection):
type: typing_extensions.Literal["account_connection_openai"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
AccountConnectionPageItemsItem = typing.Union[
AccountConnectionPageItemsItem_AccountConnectionTwilio, AccountConnectionPageItemsItem_AccountConnectionOpenai
]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~agent_params_openai_account_connection.py | # This file was auto-generated by Fern from our API Definition.
import typing
from .open_ai_account_connection import OpenAiAccountConnection
AgentParamsOpenaiAccountConnection = typing.Union[OpenAiAccountConnection, str]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~account_connection_response_model.py | # This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
import typing
import typing_extensions
from .open_ai_account_connection import OpenAiAccountConnection
from .twilio_account_connection import TwilioAccountConnection
class AccountConnectionResponseModel_AccountConnectionTwilio(TwilioAccountConnection):
type: typing_extensions.Literal["account_connection_twilio"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
class AccountConnectionResponseModel_AccountConnectionOpenai(OpenAiAccountConnection):
type: typing_extensions.Literal["account_connection_openai"]
class Config:
frozen = True
smart_union = True
allow_population_by_field_name = True
AccountConnectionResponseModel = typing.Union[
AccountConnectionResponseModel_AccountConnectionTwilio, AccountConnectionResponseModel_AccountConnectionOpenai
]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~create_call_agent_params_openai_account_connection.py | # This file was auto-generated by Fern from our API Definition.
import typing
from .open_ai_account_connection import OpenAiAccountConnection
CreateCallAgentParamsOpenaiAccountConnection = typing.Union[OpenAiAccountConnection, str]
| [] |
2024-01-10 | vocodedev/vocode-api-python | src~vocode~types~normalized_agent_openai_account_connection.py | # This file was auto-generated by Fern from our API Definition.
import typing
from .open_ai_account_connection import OpenAiAccountConnection
NormalizedAgentOpenaiAccountConnection = typing.Union[OpenAiAccountConnection, str]
| [] |
2024-01-10 | hursh-desai/gpt_index | gpt_index~composability~graph.py | """Composability graphs."""
import json
from typing import Any, Dict, List, Optional, Type, Union
from gpt_index.data_structs.data_structs import IndexStruct
from gpt_index.data_structs.struct_type import IndexStructType
from gpt_index.docstore import DocumentStore
from gpt_index.embeddings.base import BaseEmbedding
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import BaseGPTIndex
from gpt_index.indices.keyword_table.base import GPTKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.prompt_helper import PromptHelper
from gpt_index.indices.query.query_runner import QueryRunner
from gpt_index.indices.query.schema import QueryConfig
from gpt_index.indices.registry import IndexRegistry
from gpt_index.indices.struct_store.sql import GPTSQLStructStoreIndex
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.indices.vector_store.faiss import GPTFaissIndex
from gpt_index.indices.vector_store.pinecone import GPTPineconeIndex
from gpt_index.indices.vector_store.simple import GPTSimpleVectorIndex
from gpt_index.indices.vector_store.weaviate import GPTWeaviateIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.response.schema import Response
# TMP: refactor query config type
QUERY_CONFIG_TYPE = Union[Dict, QueryConfig]
# this is a map from type to outer index class
# we extract the type_to_struct and type_to_query
# fields from the index class
DEFAULT_INDEX_REGISTRY_MAP: Dict[IndexStructType, Type[BaseGPTIndex]] = {
IndexStructType.TREE: GPTTreeIndex,
IndexStructType.LIST: GPTListIndex,
IndexStructType.KEYWORD_TABLE: GPTKeywordTableIndex,
IndexStructType.DICT: GPTFaissIndex,
IndexStructType.SIMPLE_DICT: GPTSimpleVectorIndex,
IndexStructType.WEAVIATE: GPTWeaviateIndex,
IndexStructType.PINECONE: GPTPineconeIndex,
IndexStructType.SQL: GPTSQLStructStoreIndex,
}
def _get_default_index_registry() -> IndexRegistry:
"""Get default index registry."""
index_registry = IndexRegistry()
for index_type, index_class in DEFAULT_INDEX_REGISTRY_MAP.items():
index_registry.type_to_struct[index_type] = index_class.index_struct_cls
index_registry.type_to_query[index_type] = index_class.get_query_map()
return index_registry
class ComposableGraph:
"""Composable graph."""
def __init__(
self,
docstore: DocumentStore,
index_registry: IndexRegistry,
index_struct: IndexStruct,
llm_predictor: Optional[LLMPredictor] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
chunk_size_limit: Optional[int] = None,
) -> None:
"""Init params."""
self._docstore = docstore
self._index_registry = index_registry
# this represents the "root" index struct
self._index_struct = index_struct
self._llm_predictor = llm_predictor or LLMPredictor()
self._prompt_helper = prompt_helper or PromptHelper.from_llm_predictor(
self._llm_predictor, chunk_size_limit=chunk_size_limit
)
self._embed_model = embed_model or OpenAIEmbedding()
@classmethod
def build_from_index(self, index: BaseGPTIndex) -> "ComposableGraph":
"""Build from index."""
return ComposableGraph(
index.docstore,
index.index_registry,
# this represents the "root" index struct
index.index_struct,
llm_predictor=index.llm_predictor,
prompt_helper=index.prompt_helper,
embed_model=index.embed_model,
)
def query(
self,
query_str: str,
query_configs: Optional[List[QUERY_CONFIG_TYPE]],
verbose: bool = False,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
query_runner = QueryRunner(
self._llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
verbose=verbose,
recursive=True,
)
return query_runner.query(query_str, self._index_struct)
@classmethod
def load_from_disk(cls, save_path: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
with open(save_path, "r") as f:
result_dict = json.load(f)
# TODO: this is hardcoded for now, allow it to be specified by the user
index_registry = _get_default_index_registry()
docstore = DocumentStore.load_from_dict(
result_dict["docstore"], index_registry.type_to_struct
)
index_struct = docstore.get_document(result_dict["index_struct_id"])
if not isinstance(index_struct, IndexStruct):
raise ValueError("Invalid `index_struct_id` - must be an IndexStruct")
return cls(docstore, index_registry, index_struct, **kwargs)
def save_to_disk(self, save_path: str, **save_kwargs: Any) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
out_dict: Dict[str, Any] = {
"index_struct_id": self._index_struct.get_doc_id(),
"docstore": self._docstore.serialize_to_dict(),
}
with open(save_path, "w") as f:
json.dump(out_dict, f)
| [] |
2024-01-10 | hursh-desai/gpt_index | tests~indices~vector_store~test_base.py | """Test Faiss index."""
import sys
from typing import Any, Dict, List, Tuple
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.vector_store.faiss import GPTFaissIndex
from gpt_index.indices.vector_store.simple import GPTSimpleVectorIndex
from gpt_index.readers.schema.base import Document
from tests.mock_utils.mock_decorator import patch_common
from tests.mock_utils.mock_prompts import MOCK_REFINE_PROMPT, MOCK_TEXT_QA_PROMPT
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
}
query_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
"similarity_top_k": 1,
}
return index_kwargs, query_kwargs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(doc_text)]
class MockFaissIndex:
"""Mock Faiss index."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize params."""
self._index: Dict[int, np.ndarray] = {}
@property
def ntotal(self) -> int:
"""Get ntotal."""
return len(self._index)
def add(self, vecs: np.ndarray) -> None:
"""Add vectors to index."""
for vec in vecs:
new_id = len(self._index)
self._index[new_id] = vec
def reset(self) -> None:
"""Reset index."""
self._index = {}
def search(self, vec: np.ndarray, k: int) -> Tuple[np.ndarray, np.ndarray]:
"""Search index."""
# assume query vec is of the form 1 x k
# index_mat is n x k
index_mat = np.array(list(self._index.values()))
# compute distances
distances = np.linalg.norm(index_mat - vec, axis=1)
indices = np.argsort(distances)[:k]
sorted_distances = distances[indices][:k]
# return distances and indices
return sorted_distances[np.newaxis, :], indices[np.newaxis, :]
def mock_get_text_embedding(text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def mock_get_query_embedding(query: str) -> List[float]:
"""Mock get query embedding."""
return [0, 0, 1, 0, 0]
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_build_faiss(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents=documents, faiss_index=faiss_index, **index_kwargs)
assert len(index.index_struct.nodes_dict) == 4
# check contents of nodes
assert index.index_struct.get_node("0").text == "Hello world."
assert index.index_struct.get_node("1").text == "This is a test."
assert index.index_struct.get_node("2").text == "This is another test."
assert index.index_struct.get_node("3").text == "This is a test v2."
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_faiss_insert(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents=documents, faiss_index=faiss_index, **index_kwargs)
# insert into index
index.insert(Document(text="This is a test v3."))
# check contenst of nodes
assert index.index_struct.get_node("3").text == "This is a test v2."
assert index.index_struct.get_node("4").text == "This is a test v3."
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "get_query_embedding", side_effect=mock_get_query_embedding
)
def test_faiss_query(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents, faiss_index=faiss_index, **index_kwargs)
# test embedding query
query_str = "What is?"
response = index.query(query_str, **query_kwargs)
assert str(response) == ("What is?:This is another test.")
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_build_simple(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents=documents, **index_kwargs)
assert len(index.index_struct.nodes_dict) == 4
# check contents of nodes
actual_node_tups = [
("Hello world.", [1, 0, 0, 0, 0]),
("This is a test.", [0, 1, 0, 0, 0]),
("This is another test.", [0, 0, 1, 0, 0]),
("This is a test v2.", [0, 0, 0, 1, 0]),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_simple_insert(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents=documents, **index_kwargs)
# insert into index
index.insert(Document(text="This is a test v3."))
# check contenst of nodes
actual_node_tups = [
("Hello world.", [1, 0, 0, 0, 0]),
("This is a test.", [0, 1, 0, 0, 0]),
("This is another test.", [0, 0, 1, 0, 0]),
("This is a test v2.", [0, 0, 0, 1, 0]),
("This is a test v3.", [0, 0, 0, 0, 1]),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_simple_delete(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter_overlap: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
new_documents = [
Document("Hello world.", doc_id="test_id_0"),
Document("This is a test.", doc_id="test_id_1"),
Document("This is another test.", doc_id="test_id_2"),
Document("This is a test v2.", doc_id="test_id_3"),
]
index = GPTSimpleVectorIndex(documents=new_documents, **index_kwargs)
# test delete
index.delete("test_id_0")
assert len(index.index_struct.nodes_dict) == 3
assert len(index.index_struct.id_map) == 3
actual_node_tups = [
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding, node.ref_doc_id) in actual_node_tups
# test insert
index.insert(Document("Hello world backup.", doc_id="test_id_0"))
assert len(index.index_struct.nodes_dict) == 4
assert len(index.index_struct.id_map) == 4
actual_node_tups = [
("Hello world backup.", [1, 0, 0, 0, 0], "test_id_0"),
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding, node.ref_doc_id) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "get_query_embedding", side_effect=mock_get_query_embedding
)
def test_simple_query(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
response = index.query(query_str, **query_kwargs)
assert str(response) == ("What is?:This is another test.")
# test with keyword filter (required)
query_kwargs_copy = query_kwargs.copy()
query_kwargs_copy["similarity_top_k"] = 5
response = index.query(query_str, **query_kwargs_copy, required_keywords=["Hello"])
assert str(response) == ("What is?:Hello world.")
# test with keyword filter (exclude)
# insert into index
index.insert(Document(text="This is bar test."))
query_kwargs_copy = query_kwargs.copy()
query_kwargs_copy["similarity_top_k"] = 2
response = index.query(query_str, **query_kwargs_copy, exclude_keywords=["another"])
assert str(response) == ("What is?:This is bar test.")
@patch_common
@patch.object(
OpenAIEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "_get_query_embedding", side_effect=mock_get_query_embedding
)
def test_query_and_count_tokens(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(doc_text)
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex([document], **index_kwargs)
assert index.embed_model.total_tokens_used == 20
# test embedding query
query_str = "What is?"
index.query(query_str, **query_kwargs)
assert index.embed_model.last_token_usage == 3
@patch_common
@patch.object(
OpenAIEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "_get_query_embedding", side_effect=mock_get_query_embedding
)
def test_query_and_similarity_scores(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(doc_text)
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex([document], **index_kwargs)
# test embedding query
query_str = "What is?"
response = index.query(query_str, **query_kwargs)
assert len(response.source_nodes) > 0
assert response.source_nodes[0].similarity is not None
@patch_common
@patch.object(
OpenAIEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "_get_query_embedding", side_effect=mock_get_query_embedding
)
def test_query_and_similarity_scores_with_cutoff(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(doc_text)
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex([document], **index_kwargs)
# test embedding query - no nodes
query_str = "What is?"
response = index.query(query_str, similarity_cutoff=1.1, **query_kwargs)
assert len(response.source_nodes) == 0
# test embedding query - 1 node
query_str = "What is?"
response = index.query(query_str, similarity_cutoff=0.9, **query_kwargs)
assert len(response.source_nodes) == 1
| [] |
2024-01-10 | hursh-desai/gpt_index | tests~indices~embedding~test_base.py | """Test embedding functionalities."""
from collections import defaultdict
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.data_structs.data_structs import Node
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.query.tree.embedding_query import GPTTreeIndexEmbeddingQuery
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import (
LLMChain,
LLMMetadata,
LLMPredictor,
)
from gpt_index.readers.schema.base import Document
from tests.mock_utils.mock_decorator import patch_common
from tests.mock_utils.mock_predict import mock_llmchain_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
def test_embedding_similarity() -> None:
"""Test embedding similarity."""
embed_model = OpenAIEmbedding()
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embedding, text_embedding)
assert cosine == 0.8
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
query_kwargs = {
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(doc_text)]
def _get_node_text_embedding_similarities(
query_embedding: List[float], nodes: List[Node]
) -> List[float]:
"""Get node text embedding similarity."""
text_similarity_map = defaultdict(lambda: 0.0)
text_similarity_map["Hello world."] = 0.9
text_similarity_map["This is a test."] = 0.8
text_similarity_map["This is another test."] = 0.7
text_similarity_map["This is a test v2."] = 0.6
similarities = []
for node in nodes:
similarities.append(text_similarity_map[node.get_text()])
return similarities
@patch_common
@patch.object(
GPTTreeIndexEmbeddingQuery,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
def test_embedding_query(
_mock_similarity: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text_overlap: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
documents: List[Document],
) -> None:
"""Test embedding query."""
index_kwargs, query_kwargs = struct_kwargs
tree = GPTTreeIndex(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
response = tree.query(query_str, mode="embedding", **query_kwargs)
assert str(response) == ("What is?:Hello world.")
@patch.object(LLMChain, "predict", side_effect=mock_llmchain_predict)
@patch("gpt_index.langchain_helpers.chain_wrapper.OpenAI")
@patch.object(LLMPredictor, "get_llm_metadata", return_value=LLMMetadata())
@patch.object(LLMChain, "__init__", return_value=None)
@patch.object(
GPTTreeIndexEmbeddingQuery,
"_get_query_text_embedding_similarities",
side_effect=_get_node_text_embedding_similarities,
)
def test_query_and_count_tokens(
_mock_similarity: Any,
_mock_llmchain: Any,
_mock_llm_metadata: Any,
_mock_init: Any,
_mock_predict: Any,
struct_kwargs: Dict,
documents: List[Document],
) -> None:
"""Test query and count tokens."""
index_kwargs, query_kwargs = struct_kwargs
# mock_prompts.MOCK_SUMMARY_PROMPT_TMPL adds a "\n" to the document text
document_token_count = 24
llmchain_mock_resp_token_count = 10
# build the tree
tree = GPTTreeIndex(documents, **index_kwargs)
assert (
tree._llm_predictor.total_tokens_used
== document_token_count + llmchain_mock_resp_token_count
)
# test embedding query
start_token_ct = tree._llm_predictor.total_tokens_used
query_str = "What is?"
# From MOCK_TEXT_QA_PROMPT, the prompt is 28 total
query_prompt_token_count = 28
tree.query(query_str, mode="embedding", **query_kwargs)
assert (
tree._llm_predictor.total_tokens_used - start_token_ct
== query_prompt_token_count + llmchain_mock_resp_token_count
)
| [
"28"
] |
2024-01-10 | Sebiancoder/PaperBulb | backend~oai_caller.py | from secretK.secretK import OPENAI
import os
import openai
import requests
class OaiCaller():
def __init__(self):
#openai.organization = "org-zVepG3kIxRXl8x7hB7Ro2zKF"
openai.api_key = f"{OPENAI}"
def callModel(self, prompt : str):
response = openai.Completion.create(
engine="text-davinci-003", # Choose the appropriate engine
prompt=prompt,
max_tokens=1750 # Maximum number of tokens in the response
)
return response['choices'][0]['text']
def getGptSummary(self, abstract : str, ulev : str):
if abstract is None or abstract == "" or ulev is None or ulev == "":
return ""
levels = {
'child': "middle school student",
'highschool': "high school student",
'undergrad': "undergraduate college student",
'masters': "master's student",
'original': "original"
}
if ulev == "original":
return abstract
prompt = abstract + "\n\nRewrite the previous so as to make it understandable by a " + levels[ulev] + ". Respond with only a paragraph and no extra text or punctuation."
return self.callModel(prompt).strip().replace('•', '-')
def getJargon(self, abstract : str):
if abstract is None or abstract == "":
return ""
prompt = abstract + "\n\nProvide a comma separated list of words in the previous paragraph that would be considered jargon specific to the field. Do not write anything else but the comma-separated list. Do not put a period at the end"
model_output = self.callModel(prompt)
cleaned_mo = model_output.replace(".","").replace("\n","").split(",")
for i in range(len(cleaned_mo)):
cleaned_mo[i] = cleaned_mo[i].strip()
cleaned_mo = "- " + "\n- ".join(cleaned_mo)
return cleaned_mo.replace('•', '-')
def learn_more(self, abstract: str):
if abstract is None or abstract == "":
return ""
prompt = abstract + "\n\nPlease provide educational resources (including articles, books, and more) to help understand the concepts described in the above abstract. Do not provide links, and do not include any punctuation nor any text besides the comma-separated values."
model_output = self.callModel(prompt)
cleaned_mo = model_output.strip()
return cleaned_mo.replace('•', '-')
if __name__ == "__main__":
abstract = "We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without tricks, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. Code will be made available.\n\n"
oai = OaiCaller()
ggt = oai.learn_more(abstract)
breakpoint()
| [
". Respond with only a paragraph and no extra text or punctuation.",
"PLACEHOLDER\n\nPlease provide educational resources (including articles, books, and more) to help understand the concepts described in the above abstract. Do not provide links, and do not include any punctuation nor any text besides the comma-separated values.",
"\n\nRewrite the previous so as to make it understandable by a ",
"PLACEHOLDER\n\nProvide a comma separated list of words in the previous paragraph that would be considered jargon specific to the field. Do not write anything else but the comma-separated list. Do not put a period at the end"
] |
2024-01-10 | JiefanYa/RL-Training | impl_proj~multi_threading_env.py | # Code is from OpenAI baseline.
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
def reset(self):
pass
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
class VecNormalize(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=False, ret=False, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews_norm = rews / np.sqrt(self.ret_rms.var + self.epsilon) # Question: wtf?
rews = np.clip(rews_norm, -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs_norm = (obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon)
obs = np.clip(obs_norm, -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = np.float32
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
| [] |
2024-01-10 | harshit0017/raghaavbot | pdf_chat.py | import os
import streamlit as st
from PyPDF2 import PdfReader
import langchain
langchain.verbose = False
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
os.environ["OPENAI_API_KEY"] = "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"
def webscrap(name):
# Replace this URL with the one you want to scrape
url = f'https://www.{name}.com'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text()
return page_text
else:
return None
def main():
print(os.getenv('OPENAI_API_KEY'))
st.set_page_config(page_title="Webscrap chatbot")
st.header("Webscrap chatbot")
name = st.text_input("enter website name")
web_data= webscrap(name)
if web_data is not None:
text = web_data
# for page in pdf_reader.pages:
# text += page.extract_text()
max_length = 1800
original_string = text
temp_string = ""
strings_list = []
for character in original_string:
if len(temp_string) < max_length:
temp_string += character
else:
strings_list.append(temp_string)
temp_string = ""
if temp_string:
strings_list.append(temp_string)
#split into chunks
# create embeddings
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(strings_list, embedding=embeddings)
user_question = st.text_input("Ask a question about your PDF")
if user_question:
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents = docs, question = user_question)
print(cb)
st.write(response)
if __name__ == '__main__':
main() | [] |
2024-01-10 | harshit0017/raghaavbot | chat_scrap.py | import os
import streamlit as st
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
# Create a radio button to choose the chat mode
chat_mode = st.radio("Select Chat Mode:", ("webscrap Chat", "youtube transcript"))
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Function to scrape a website and return its content
def webscrap(name):
# Replace this URL with the one you want to scrape
url = f'https://www.{name}.com'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text()
return page_text
else:
return None
def main():
st.title("Web Scraper Chatbot")
st.header("Web Scraper Chatbot")
name = st.text_input("Enter the website name to scrape")
if st.button("Scrape and Analyze"):
# Call the webscrap function to scrape the website and save the content
content = webscrap(name)
if content is not None:
st.write("Content has been scraped and saved.")
max_length = 1800
original_string = content
strings_list = []
while len(original_string) > max_length:
strings_list.append(original_string[:max_length])
original_string = original_string[max_length:]
strings_list.append(original_string)
# Create embeddings
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(strings_list, embedding=embeddings)
prompt = st.text_area("Ask a question:")
if st.button("Ask"):
user_question = prompt
print("yha hu")
if user_question:
docs = knowledge_base.similarity_search(user_question)
print("context")
llm = OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.9)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
with st.chat_message("assistant"):
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
if __name__ == '__main__':
main()
| [
"Ask a question:"
] |
2024-01-10 | JackDMcGrath/LOOPY | LiCSBAS_lib~LiCSBAS_inv_lib.py | #!/usr/bin/env python3
"""
========
Overview
========
Python3 library of time series inversion functions for LiCSBAS.
=========
Changelog
=========
v1.5.2 20211122 Milan Lazecky, Uni Leeds
- use bit more economic computations (for tutorial purposes)
v1.5.1 20210309 Yu Morishita, GSI
- Add GPU option into calc_velstd_withnan and calc_stc
v1.5 20210305 Yu Morishita, GSI
- Add GPU option into invert_nsbas
v1.4.2 20201118 Yu Morishita, GSI
- Again Bug fix of multiprocessing
v1.4.1 20201116 Yu Morishita, GSI
- Bug fix of multiprocessing in Mac python>=3.8
v1.4 20200703 Yu Morishita, GSI
- Replace problematic terms
v1.3 20200103 Yu Morishita, Uni of Leeds and GSI
- Bag fix in calc_stc (return nonzero even if two adjacent pixels have identical ts)
v1.2 20190823 Yu Morishita, Uni of Leeds and GSI
- Bag fix in calc_velstd_withnan
- Remove calc_velstd
v1.1 20190807 Yu Morishita, Uni of Leeds and GSI
- Add calc_velsin
v1.0 20190730 Yu Morishita, Uni of Leeds and GSI
- Original implementation
"""
import warnings
import numpy as np
import datetime as dt
import multiprocessing as multi
from astropy.stats import bootstrap
from astropy.utils import NumpyRNGContext
import LiCSBAS_tools_lib as tools_lib
from sklearn.linear_model import RANSACRegressor
#%%
def make_sb_matrix(ifgdates):
"""
Make small baseline incidence-like matrix.
Composed of 1 between primary and secondary. (n_ifg, n_im-1)
Unknown is incremental displacement.
"""
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_im = len(imdates)
n_ifg = len(ifgdates)
G = np.zeros((n_ifg, n_im-1), dtype=np.int16)
for ifgix, ifgd in enumerate(ifgdates):
primarydate = ifgd[:8]
primaryix = imdates.index(primarydate)
secondarydate = ifgd[-8:]
secondaryix = imdates.index(secondarydate)
G[ifgix, primaryix:secondaryix] = 1
return G
#%%
def make_sb_matrix2(ifgdates):
"""
Make small baseline incidence-like matrix.
Composed of -1 at primary and 1 at secondary. (n_ifg, n_im)
Unknown is cumulative displacement.
"""
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_im = len(imdates)
n_ifg = len(ifgdates)
A = np.zeros((n_ifg, n_im), dtype=np.int16)
for ifgix, ifgd in enumerate(ifgdates):
primarydate = ifgd[:8]
primaryix = imdates.index(primarydate)
secondarydate = ifgd[-8:]
secondaryix = imdates.index(secondarydate)
A[ifgix, primaryix] = -1
A[ifgix, secondaryix] = 1
return A
#%%
def invert_nsbas(unw, G, dt_cum, gamma, n_core, gpu, singular=False, only_sb=False):
"""
Calculate increment displacement difference by NSBAS inversion. Points with all unw data are solved by simple SB inversion firstly at a time.
Inputs:
unw : Unwrapped data block for each point (n_pt, n_ifg)
Still include nan to keep dimention
G : Design matrix (1 between primary and secondary) (n_ifg, n_im-1)
dt_cum : Cumulative years(or days) for each image (n_im)
gamma : Gamma value for NSBAS inversion, should be small enough (e.g., 0.0001)
n_core : Number of cores for parallel processing
gpu : GPU flag
Returns:
inc : Incremental displacement (n_im-1, n_pt)
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
if n_core != 1:
global Gall, unw_tmp, mask ## for para_wrapper
# is multicore, let's not use any simplifications
only_sb = False
singular = False
if gpu:
only_sb = False
singular = False
### Settings
n_pt, n_ifg = unw.shape
n_im = G.shape[1]+1
# For computational needs, do either only SB or a singular-nsbas approach (ML, 11/2021)
# (note the singular-nsbas approach may be improved later)
# (note 2: using G or Gall for full unw data leads to EXACTLY SAME result. but perhaps G is a tiny bit faster..)
if only_sb or singular:
result = np.zeros((G.shape[1], n_pt), dtype=np.float32)*np.nan
else:
# do the original NSBAS inversion
result = np.zeros((n_im+1, n_pt), dtype=np.float32)*np.nan #[inc, vel, const]
### Set matrix of NSBAS part (bottom)
Gbl = np.tril(np.ones((n_im, n_im-1), dtype=np.float32), k=-1) #lower tri matrix without diag
Gbr = -np.ones((n_im, 2), dtype=np.float32)
Gbr[:, 0] = -dt_cum
Gb = np.concatenate((Gbl, Gbr), axis=1)*gamma
Gt = np.concatenate((G, np.zeros((n_ifg, 2), dtype=np.float32)), axis=1)
Gall = np.float32(np.concatenate((Gt, Gb)))
### Solve points with full unw data at a time. Very fast.
bool_pt_full = np.all(~np.isnan(unw), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full unw at a time...'.format(n_pt_full, n_pt), flush=True)
if only_sb or singular:
result[:, bool_pt_full] = np.linalg.lstsq(G, unw[bool_pt_full, :].transpose(), rcond=None)[0]
else:
## Solve
unw_tmp = np.concatenate((unw[bool_pt_full, :], np.zeros((n_pt_full, n_im), dtype=np.float32)), axis=1).transpose()
if gpu:
print(' Using GPU')
import cupy as cp
unw_tmp_cp = cp.asarray(unw_tmp)
Gall_cp = cp.asarray(Gall)
_sol = cp.linalg.lstsq(Gall_cp, unw_tmp_cp, rcond=None)[0]
result[:, bool_pt_full] = cp.asnumpy(_sol)
del unw_tmp_cp, Gall_cp, _sol
else:
result[:, bool_pt_full] = np.linalg.lstsq(Gall, unw_tmp, rcond=None)[0]
if only_sb:
print('skipping nan points, only SB inversion is performed')
else:
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
if not singular:
### Solve other points with nan point by point.
## Not use GPU because lstsq with small matrix is slower than CPU
unw_tmp = np.concatenate((unw[~bool_pt_full, :], np.zeros((n_pt-n_pt_full, n_im), dtype=np.float32)), axis=1).transpose()
mask = (~np.isnan(unw_tmp))
unw_tmp[np.isnan(unw_tmp)] = 0
if n_core == 1:
if not singular:
result[:, ~bool_pt_full] = censored_lstsq_slow(Gall, unw_tmp, mask) #(n_im+1, n_pt)
else:
print('using low precision approach (but much faster)')
d = unw[~bool_pt_full, :].transpose()
m = result[:, ~bool_pt_full]
result[:, ~bool_pt_full] = singular_nsbas(d,G,m,dt_cum)
else:
print(' {} parallel processing'.format(n_core), flush=True)
args = [i for i in range(n_pt-n_pt_full)]
q = multi.get_context('fork')
p = q.Pool(n_core)
_result = p.map(censored_lstsq_slow_para_wrapper, args) #list[n_pt][length]
result[:, ~bool_pt_full] = np.array(_result).T
#
if only_sb or singular:
# SB/singular-NSBAS result matrix: based on G only, need to calculate vel, setting vconst=0
inc = result
vel = result.sum(axis=0)/dt_cum[-1]
vconst = np.zeros_like(vel)
else:
# NSBAS result matrix: last 2 rows are vel and vconst
inc = result[:n_im-1, :]
vel = result[n_im-1, :]
vconst = result[n_im, :]
return inc, vel, vconst
# orig solution by ML, just instead of full large matrix of increment rows, use only sum and minmax - much faster,
# making the computation linear, out of matrix solution. This may be source of some delays, but gives good opportunity
# to improve e.g. by ... some original thoughts
def singular_nsbas(d,G,m,dt_cum):
# per each point
#from scipy.optimize import curve_fit
#def func_vel(x, a):
# return a * x
for px in range(m.shape[1]):
if np.mod(px, 1000) == 0:
print('\r Running {0:6}/{1:6}th point...'.format(px, m.shape[1]), end='', flush=True)
dpx = d[:,px]
mpx = m[:,px]
# first, work only with values without nans. check if it doesn't remove increments, if so, estimate the inc
okpx = ~np.isnan(dpx)
Gpx_ok = G[okpx,:]
dpx_ok = dpx[okpx]
badincs = np.sum(Gpx_ok,axis=0)==0
if not max(badincs):
# if actually all are fine, just run LS:
mpx = np.linalg.lstsq(Gpx_ok, dpx_ok, rcond=None)[0]
else:
# if there is at least one im with no related ifg:
mpx[~badincs] = np.linalg.lstsq(Gpx_ok[:,~badincs], dpx_ok, rcond=None)[0]
badinc_index = np.where(badincs)[0]
bi_prev = 0
s = []
t = []
# ensure the algorithm goes towards the end of the mpx line
for bi in np.append(badinc_index,len(mpx)):
group_mpx = mpx[bi_prev:bi]
#use at least 2 ifgs for the vel estimate
if group_mpx.size > 0:
group_time = dt_cum[bi_prev:bi+1]
s.append(group_mpx.sum())
t.append(group_time[-1] - group_time[0])
bi_prev = bi+1
s = np.array(s)
t = np.array(t)
# is only one value ok? maybe increase the threshold here:
if len(s)>0:
velpx = s.sum()/t.sum()
else:
velpx = np.nan # not sure what will happen. putting 0 may be safer
#if len(s) == 1:
# velpx = s[0]/t[0]
#else:
# velpx = curve_fit(func_vel, t, s)[0][0]
mpx[badincs] = (dt_cum[badinc_index+1]-dt_cum[badinc_index]) * velpx
m[:,px] = mpx
return m
def censored_lstsq_slow_para_wrapper(i):
### Use global value
if np.mod(i, 1000) == 0:
print(' Running {0:6}/{1:6}th point...'.format(i, unw_tmp.shape[1]), flush=True)
m = mask[:,i] # drop rows where mask is zero
try:
X = np.linalg.lstsq(Gall[m], unw_tmp[m,i], rcond=None)[0]
except:
X = np.zeros((Gall.shape[1]), dtype=np.float32)*np.nan
return X
#%%
def invert_nsbas_wls(unw, var, G, dt_cum, gamma, n_core):
"""
Calculate increment displacement difference by NSBAS inversion with WLS.
Inputs:
unw : Unwrapped data block for each point (n_pt, n_ifg)
Still include nan to keep dimention
var : Variance estimated from coherence (n_pt, n_ifg)
G : Design matrix (1 between primary and secondary) (n_ifg, n_im-1)
dt_cum : Cumulative years(or days) for each image (n_im)
gamma : Gamma value for NSBAS inversion, should be small enough (e.g., 0.0001)
n_core : Number of cores for parallel processing
Returns:
inc : Incremental displacement (n_im-1, n_pt)
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
global Gall, unw_tmp, var_tmp, mask ## for para_wrapper
### Settings
n_pt, n_ifg = unw.shape
n_im = G.shape[1]+1
result = np.zeros((n_im+1, n_pt), dtype=np.float32)*np.nan #[inc, vel, const]
### Set matrix of NSBAS part (bottom)
Gbl = np.tril(np.ones((n_im, n_im-1), dtype=np.float32), k=-1) #lower tri matrix without diag
Gbr = -np.ones((n_im, 2), dtype=np.float32)
Gbr[:, 0] = -dt_cum
Gb = np.concatenate((Gbl, Gbr), axis=1)*gamma
Gt = np.concatenate((G, np.zeros((n_ifg, 2), dtype=np.float32)), axis=1)
Gall = np.float32(np.concatenate((Gt, Gb)))
### Make unw_tmp, var_tmp, and mask
unw_tmp = np.concatenate((unw, np.zeros((n_pt, n_im), dtype=np.float32)), axis=1).transpose()
mask = (~np.isnan(unw_tmp))
unw_tmp[np.isnan(unw_tmp)] = 0
var_tmp = np.concatenate((var, 50*np.ones((n_pt, n_im), dtype=np.float32)), axis=1).transpose() #50 is var for coh=0.1, to scale bottom part of Gall
if n_core == 1:
for i in range(n_pt):
result[:, i] = wls_nsbas(i) #(n_im+1, n_pt)
else:
print(' {} parallel processing'.format(n_core), flush=True)
args = [i for i in range(n_pt)]
q = multi.get_context('fork')
p = q.Pool(n_core)
_result = p.map(wls_nsbas, args) #list[n_pt][length]
result = np.array(_result).T
inc = result[:n_im-1, :]
vel = result[n_im-1, :]
vconst = result[n_im, :]
return inc, vel, vconst
def wls_nsbas(i):
### Use global value of Gall, unw_tmp, mask
if np.mod(i, 1000) == 0:
print(' Running {0:6}/{1:6}th point...'.format(i, unw_tmp.shape[1]), flush=True)
## Weight unw and G
Gall_w = Gall/np.sqrt(np.float64(var_tmp[:,i][:,np.newaxis]))
unw_tmp_w = unw_tmp[:, i]/np.sqrt(np.float64(var_tmp[:,i]))
m = mask[:,i] # drop rows where mask is zero
try:
X = np.linalg.lstsq(Gall_w[m], unw_tmp_w[m], rcond=None)[0]
except:
X = np.zeros((Gall.shape[1]), dtype=np.float32)*np.nan
return X
#%%
def calc_vel(cum, dt_cum):
"""
Calculate velocity.
Inputs:
cum : cumulative phase block for each point (n_pt, n_im)
dt_cum : Cumulative days for each image (n_im)
Returns:
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
n_pt, n_im = cum.shape
result = np.zeros((2, n_pt), dtype=np.float32)*np.nan #[vconst, vel]
G = np.stack((np.ones_like(dt_cum), dt_cum), axis=1)
vconst = np.zeros((n_pt), dtype=np.float32)*np.nan
vel = np.zeros((n_pt), dtype=np.float32)*np.nan
bool_pt_full = np.all(~np.isnan(cum), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full cum at a time...'.format(n_pt_full, n_pt), flush=True)
## Sovle
result[:, bool_pt_full] = np.linalg.lstsq(G, cum[bool_pt_full, :].transpose(), rcond=None)[0]
### Solve other points with nan point by point.
cum_tmp = cum[~bool_pt_full, :].transpose()
mask = (~np.isnan(cum_tmp))
cum_tmp[np.isnan(cum_tmp)] = 0
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
result[:, ~bool_pt_full] = censored_lstsq_slow(G, cum_tmp, mask) #(n_im+1, n_pt)
vconst = result[0, :]
vel = result[1, :]
return vel, vconst
#%%
def calc_velsin(cum, dt_cum, imd0):
"""
Calculate velocity and coeffcients of sin (annual) function.
Inputs:
cum : cumulative phase block for each point (n_pt, n_im)
dt_cum : Cumulative days for each image (n_im)
imd0 : Date of first acquistion (str, yyyymmdd)
Returns:
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
amp : Amplitude of sin function
dt : Time difference of sin function wrt Jan 1 (day)
"""
doy0 = (dt.datetime.strptime(imd0, '%Y%m%d')-dt.datetime.strptime(imd0[0:4]+'0101', '%Y%m%d')).days
n_pt, n_im = cum.shape
result = np.zeros((4, n_pt), dtype=np.float32)*np.nan #[vconst, vel, coef_s, coef_c]
sin = np.sin(2*np.pi*dt_cum)
cos = np.cos(2*np.pi*dt_cum)
G = np.stack((np.ones_like(dt_cum), dt_cum, sin, cos), axis=1)
vconst = np.zeros((n_pt), dtype=np.float32)*np.nan
vel = np.zeros((n_pt), dtype=np.float32)*np.nan
amp = np.zeros((n_pt), dtype=np.float32)*np.nan
delta_t = np.zeros((n_pt), dtype=np.float32)*np.nan
bool_pt_full = np.all(~np.isnan(cum), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full cum at a time...'.format(n_pt_full, n_pt), flush=True)
## Sovle
result[:, bool_pt_full] = np.linalg.lstsq(G, cum[bool_pt_full, :].transpose(), rcond=None)[0]
### Solve other points with nan point by point.
cum_tmp = cum[~bool_pt_full, :].transpose()
mask = (~np.isnan(cum_tmp))
cum_tmp[np.isnan(cum_tmp)] = 0
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
result[:, ~bool_pt_full] = censored_lstsq_slow(G, cum_tmp, mask) #(n_im+1, n_pt)
vconst = result[0, :]
vel = result[1, :]
coef_s = result[2, :]
coef_c = result[3, :]
amp = np.sqrt(coef_s**2+coef_c**2)
delta_t = np.arctan2(-coef_c, coef_s)/2/np.pi*365.25 ## wrt 1st img
delta_t = delta_t+doy0 ## wrt Jan 1
delta_t[delta_t < 0] = delta_t[delta_t < 0]+365.25 #0-365.25
delta_t[delta_t > 365.25] = delta_t[delta_t > 365.25]-365.25
return vel, vconst, amp, delta_t
def get_vel_ransac(dt_cum, cumm, return_intercept=False):
"""
Recalculate velocity (and intercept) using RANSAC algorithm to identify/skip use of outliers.
Inputs:
dt_cum : delta time values for the cumm. time series
cumm : the cumm. time series values, array of shape (n_points, n_dates)
Returns:
vel2 : recalculated velocity for each point
"""
X=dt_cum.reshape(-1,1) # single feature (time) of dt_cum.shape[0] samples
vel2 = np.zeros(cumm.shape[0])
if return_intercept:
intercept2 = np.zeros(cumm.shape[0])
for i in range(cumm.shape[0]):
y=cumm[i]
mask = ~np.isnan(y)
if np.mod(i, 1000) == 0:
print('\r Running {0:6}/{1:6}th point...'.format(i, cumm.shape[0]), end='', flush=True)
if np.sum(mask) < 2:
# 'all' nan situation
vel2[i] = np.nan
if return_intercept:
intercept2[i] = np.nan
else:
reg = RANSACRegressor().fit(X[mask],y[mask]) # the implementation is fine, parameters should be quite robust
# yet, one may check parameters max_trials[=100]
vel2[i] = reg.estimator_.coef_[0]
if return_intercept:
intercept2[i] = reg.estimator_.intercept_ # if needed..
print('')
if return_intercept:
return vel2 , intercept2
else:
return vel2
#%%
def calc_velstd_withnan(cum, dt_cum, gpu=False):
"""
Calculate std of velocity by bootstrap for each point which may include nan.
Inputs:
cum : Cumulative phase block for each point (n_pt, n_im)
Can include nan.
dt_cum : Cumulative days for each image (n_im)
gpu : GPU flag
Returns:
vstd : Std of Velocity for each point (n_pt)
"""
global bootcount, bootnum
n_pt, n_im = cum.shape
bootnum = 100
bootcount = 0
vstd = np.zeros((n_pt), dtype=np.float32)
G = np.stack((np.ones_like(dt_cum), dt_cum), axis=1)
data = cum.transpose().copy()
ixs_day = np.arange(n_im)
mask = (~np.isnan(data))
data[np.isnan(data)] = 0
velinv = lambda x : censored_lstsq2(G[x, :], data[x, :], mask[x, :],
gpu=gpu)[1]
with NumpyRNGContext(1):
bootresult = bootstrap(ixs_day, bootnum, bootfunc=velinv)
vstd = np.nanstd(bootresult, axis=0)
print('')
return vstd
def censored_lstsq2(A, B, M, gpu=False):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
global bootcount, bootnum
if gpu:
import cupy as xp
A = xp.asarray(A)
B = xp.asarray(B)
M = xp.asarray(M)
else:
xp = np
print('\r Running {0:3}/{1:3}th bootstrap...'.format(bootcount, bootnum), end='', flush=True)
Bshape1 = B.shape[1]
bootcount = bootcount+1
# if B is a vector, simply drop out corresponding rows in A
if B.ndim == 1 or Bshape1 == 1:
sol = xp.linalg.leastsq(A[M], B[M])[0]
if gpu:
sol = xp.asnumpy(sol)
del A, B, M
return sol
# else solve via tensor representation
rhs = xp.dot(A.T, M * B).T[:,:,None] # n x r x 1 tensor
T = xp.matmul(A.T[None,:,:], M.T[:,:,None] * A[None,:,:]) # n x r x r tensor
# Not use gpu for solve because it is quite slow
if gpu:
T = xp.asnumpy(T)
rhs = xp.asnumpy(rhs)
del A, B, M
try:
X = np.squeeze(np.linalg.solve(T, rhs)).T # transpose to get r x n
except: ## In case Singular matrix
X = np.zeros((Bshape1), dtype=np.float32)*np.nan
return X
#%%
def calc_stc(cum, gpu=False):
"""
Calculate STC (spatio-temporal consistensy; Hanssen et al., 2008,
Terrafirma) of time series of displacement.
Note that isolated pixels (which have no surrounding pixel) have nan of STC.
Input:
cum : Cumulative displacement (n_im, length, width)
gpu : GPU flag
Return:
stc : STC (length, width)
"""
if gpu:
import cupy as xp
cum = xp.asarray(cum)
else:
xp = np
n_im, length, width = cum.shape
### Add 1 pixel margin to cum data filled with nan
cum1 = xp.ones((n_im, length+2, width+2), dtype=xp.float32)*xp.nan
cum1[:, 1:length+1, 1:width+1] = cum
### Calc STC for surrounding 8 pixels
_stc = xp.ones((length, width, 8), dtype=xp.float32)*xp.nan
pixels = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1], [2, 2]]
## Left Top = [0, 0], Rigth Bottmon = [2, 2], Center = [1, 1]
for i, pixel in enumerate(pixels):
### Spatial difference (surrounding pixel-center)
d_cum = cum1[:, pixel[0]:length+pixel[0], pixel[1]:width+pixel[1]] - cum1[:, 1:length+1, 1:width+1]
### Temporal difference (double difference)
dd_cum = d_cum[:-1,:,:]-d_cum[1:,:,:]
### STC (i.e., RMS of DD)
sumsq_dd_cum = xp.nansum(dd_cum**2, axis=0)
n_dd_cum = (xp.sum(~xp.isnan(dd_cum), axis=0)).astype(xp.float32) #nof non-nan
n_dd_cum[n_dd_cum==0] = xp.nan #to avoid 0 division
_stc[:, :, i] = xp.sqrt(sumsq_dd_cum/n_dd_cum)
### Strange but some adjacent pixels can have identical time series,
### resulting in 0 of stc. To avoid this, replace 0 with nan.
_stc[_stc==0] = xp.nan
### Identify minimum value as final STC
with warnings.catch_warnings(): ## To silence warning by All-Nan slice
warnings.simplefilter('ignore', RuntimeWarning)
stc = xp.nanmin(_stc, axis=2)
if gpu:
stc = xp.asnumpy(stc)
del cum, cum1, _stc, d_cum, dd_cum, sumsq_dd_cum, n_dd_cum
return stc
#%%
def censored_lstsq(A, B, M):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
## This is actually slow because matmul does not use multicore...
## Need multiprocessing.
## Precison is bad widh bad condition, so this is unfortunately useless for NSABS...
## But maybe usable for vstd because its condition is good.
"""Solves least squares problem subject to missing data.
Note: uses a broadcasted solve for speed.
Args
----
A (ndarray) : m x r matrix
B (ndarray) : m x n matrix
M (ndarray) : m x n binary matrix (zeros indicate missing values)
Returns
-------
X (ndarray) : r x n matrix that minimizes norm(M*(AX - B))
"""
# Note: we should check A is full rank but we won't bother...
# if B is a vector, simply drop out corresponding rows in A
if B.ndim == 1 or B.shape[1] == 1:
return np.linalg.leastsq(A[M], B[M])[0]
# else solve via tensor representation
rhs = np.dot(A.T, M * B).T[:,:,None] # n x r x 1 tensor
T = np.matmul(A.T[None,:,:], M.T[:,:,None] * A[None,:,:]) # n x r x r tensor
return np.squeeze(np.linalg.solve(T, rhs)).T # transpose to get r x n
#%%
def censored_lstsq_slow(A, B, M):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
"""Solves least squares problem subject to missing data.
Note: uses a for loop over the columns of B, leading to a
slower but more numerically stable algorithm
Args
----
A (ndarray) : m x r matrix
B (ndarray) : m x n matrix
M (ndarray) : m x n binary matrix (zeros indicate missing values)
Returns
-------
X (ndarray) : r x n matrix that minimizes norm(M*(AX - B))
"""
X = np.empty((A.shape[1], B.shape[1]))
for i in range(B.shape[1]):
if np.mod(i, 100) == 0:
print('\r Running {0:6}/{1:6}th point...'.format(i, B.shape[1]), end='', flush=True)
m = M[:,i] # drop rows where mask is zero
try:
X[:,i] = np.linalg.lstsq(A[m], B[m,i], rcond=None)[0]
except:
X[:,i] = np.nan
print('')
return X
| [] |
2024-01-10 | JackDMcGrath/LOOPY | bin~LiCSBAS130_sb_inv.py | #!/usr/bin/env python3
"""
v1.5.4 20221020 Qi Ou, Leeds Uni
v1.5.3 20211122 Milan Lazecky, Leeds Uni
v1.5.2 20210311 Yu Morishita, GSI
This script inverts the SB network of unw to obtain the time series and
velocity using NSBAS (López-Quiroz et al., 2009; Doin et al., 2011) approach.
A stable reference point is determined after the inversion. RMS of the time
series wrt median among all points is calculated for each point.
Then the point with minimum RMS and minimum n_gap is selected as new stable
reference point.
===============
Input & output files
===============
Inputs in GEOCml*/ (--comp_cc_dir):
- yyyymmdd_yyyymmdd/
- yyyymmdd_yyyymmdd.cc
- EQA.dem_par
- slc.mli.par
- baselines (may be dummy)
[- [ENU].geo]
Inputs in GEOCml*/ (--cc_dir+suffix):
- yyyymmdd_yyyymmdd/
- yyyymmdd_yyyymmdd.unw
Inputs in TS_GEOCml*/ :
- info/
- 120ref.txt
[-results/]
[ - coh_avg]
[ - hgt]
[ - n_loop_err]
[ - n_unw]
[ - slc.mli]
Outputs in TS_GEOCml*/ :
- 130cum*.h5 : Cumulative displacement (time-seires) in mm
- 130results*/
- vel*[.png] : Velocity in mm/yr (positive means LOS decrease; uplift)
- vintercept*[.png] : Constant part of linear velocity (c for vt+c) in mm
- resid_rms*[.png] : RMS of residual in mm
- n_gap*[.png] : Number of gaps in SB network
- n_ifg_noloop*[.png] : Number of ifgs with no loop
- maxTlen*[.png] : Max length of continous SB network in year
- info/
- 13parameters*.txt : List of used parameters
- 130used_image*.txt : List of used images
- 130resid*.txt : List of RMS of residual for each ifg
- 130ref*.txt[kml] : Auto-determined stable ref point
- 130rms_cum_wrt_med*[.png] : RMS of cum wrt median used for ref selection
- 130increment*/yyyymmdd_yyyymmdd.increment.png
: Comparison between unw and inverted incremental displacement
- 130resid*/yyyymmdd_yyyymmdd.res.png : Residual for each ifg
=====
Usage
=====
LiCSBAS130_sb_inv.py -d ifgdir [-t tsadir] [--inv_alg LS|WLS] [--mem_size float] [--gamma float] [--n_para int] [--n_unw_r_thre float] [--keep_incfile] [--gpu] [--fast] [--only_sb] [--nopngs]
"""
#%% Change log
'''
v1.5.3 20211122 Milan Lazecky, Leeds Uni
- use fast_nsbas and only_sb to help make processing faster
v1.5.2 20210311 Yu Morishita, GSI
- Include noise indices and LOS unit vector in cum.h5 file
v1.5.1 20210309 Yu Morishita, GSI
- Change default --mem_size to 8000
- Speed up by reading cum data on memory
v1.5 20210305 Yu Morishita, GSI
- Add GPU option
- Speed up by activating n_para_inv and OMP_NUM_THREADS=1
v1.4.8 20210127 Yu Morishita, GSI
- Automatically reduce mem_size if available RAM is small
v1.4.7 20201124 Yu Morishita, GSI
- Comporess hdf5 file
v1.4.6 20201119 Yu Morishita, GSI
- Change default cmap for wrapped phase from insar to SCM.romaO
v1.4.5 20201118 Yu Morishita, GSI
- Again Bug fix of multiprocessing
v1.4.4 20201116 Yu Morishita, GSI
- Bug fix of multiprocessing in Mac python>=3.8
v1.4.3 20201104 Yu Morishita, GSI
- Bug fix when n_pt_unnan=0 in a patch
v1.4.2 20201028 Yu Morishita, GSI
- Update how to get n_para
v1.4.1 20200925 Yu Morishita, GSI
- Small bug fix in n_para
v1.4 20200909 Yu Morishita, GSI
- n_core -> n_para
- Parallelize making png
v1.3 20200902 Yu Morishita, GSI
- Parallelize calculation of n_gap and n_ifg_noloop
- Change n_core default to # of usable CPU
- Fix n_core_inv=1 for inversion because it already uses multicore
v1.2 20200225 Yu Morishita, Uni of Leeds and GSI
- Not output network pdf
- Change color of png
- Change name of parameters.txt to 13parameters.txt
- Deal with cc file in uint8 format
- Automatically find stable reference point
v1.1 20190829 Yu Morishita, Uni of Leeds and GSI
- Remove cum.h5 if exists before creation
v1.0 20190730 Yu Morishita, Uni of Leeds and GSI
- Original implementation
'''
# - network/network13*.png : Figures of the network
#%% Import
import getopt
import os
os.environ["OMP_NUM_THREADS"] = "1"
# Because np.linalg.lstsq use full CPU but not much faster than 1CPU.
# Instead parallelize by multiprocessing. Here because can cause CPU issues
# if imported before numpy
import sys
import re
import time
import psutil
import h5py as h5
import numpy as np
import datetime as dt
import multiprocessing as multi
import SCM
import LiCSBAS_io_lib as io_lib
import LiCSBAS_inv_lib as inv_lib
import LiCSBAS_tools_lib as tools_lib
import LiCSBAS_loop_lib as loop_lib
import LiCSBAS_plot_lib as plot_lib
import argparse
import shutil
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
'''
Use a multiple inheritance approach to use features of both classes.
The ArgumentDefaultsHelpFormatter class adds argument default values to the usage help message
The RawDescriptionHelpFormatter class keeps the indentation and line breaks in the ___doc___
'''
pass
class Usage(Exception):
"""Usage context manager"""
def __init__(self, msg):
self.msg = msg
def init_args():
# read inputs
parser = argparse.ArgumentParser(description=__doc__, formatter_class=CustomFormatter)
parser.add_argument('-f', dest='frame_dir', default="./", help="directory of LiCSBAS output of a particular frame")
parser.add_argument('-c', dest='cc_dir', default="GEOCml10GACOS", help="folder containing connected cc files")
# parser.add_argument('-d', dest='unw_dir', help="folder containing unw input to be corrected")
parser.add_argument('-t', dest='ts_dir', default="TS_GEOCml10GACOS", help="folder containing time series")
parser.add_argument('-m', dest='memory_size', default=2048, type=float, help="Max memory size for each patch in MB")
parser.add_argument('-g', dest='gamma', default=0.0001, type=float, help="Gamma value for NSBAS inversion")
parser.add_argument('-l', dest='ifg_list', default=None, type=str, help="text file containing a list of ifgs")
parser.add_argument('--suffix', default="", type=str, help="suffix of the output")
parser.add_argument('--inv_alg', metavar="[LS|WLS]", default="WLS", help="Inversion algolism \n LS : NSBAS Least Square with no weight\n WLS: NSBAS Weighted Least Square (not well tested)\n Weight (variance) is calculated by (1-coh**2)/(2*coh**2)")
parser.add_argument('--n_unw_r_thre', metavar="THRE", type=float, help="Threshold of n_unw (number of used unwrap data) \n (Note this value is ratio to the number of images; i.e., 1.5*n_im) \n Larger number (e.g. 2.5) makes processing faster but result sparser. \n (Default: 1 and 0.5 for C- and L-band, respectively)")
parser.add_argument('--n_para', type=int, help="Number of parallel processing (Default: # of usable CPU)")
parser.add_argument('--keep_incfile', default=False, action='store_true', help="Remove inc and resid files if False")
parser.add_argument('--nopngs', default=False, action='store_true', help="Avoid generating some (unnecessary) PNG previews of increment residuals etc.")
parser.add_argument('--gpu', default=False, action='store_true', help="Use GPU (Need cupy module)")
parser.add_argument('--fast', default=False, action='store_true', help="Use more economic NSBAS computation (should be faster and less demanding, may bring errors in points with many gaps)")
parser.add_argument('--only_sb', default=False, action='store_true', help="Perform only SB processing (skipping points with NaNs)")
parser.add_argument('--null_noloop', default=False, action='store_true', help="Don't include any IFG pixel that isn't associated with a good loop")
parser.add_argument('--no_inversion', default=False, action='store_true', help="Don't do velocity inverison, just do noise indicies")
parser.add_argument('--no_loops_removed', default=False, action='store_true', help="If no loop pixels have been removed, don't write out mask file")
args = parser.parse_args()
return args
def main():
start = time.time()
ver="1.0"; date=20220929; author="Q. Ou"
print("\n{} ver{} {} {}".format(os.path.basename(sys.argv[0]), ver, date, author), flush=True)
print("{} {}".format(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:])), flush=True)
## For parallel processing
global n_para_gap, G, Aloop, unwpatch, imdates, incdir, ccdir, ifgdir, length, width,\
coef_r2m, ifgdates, ref_unw, cycle, keep_incfile, resdir, restxtfile, \
cmap_vel, cmap_wrap, wavelength, args
try:
n_para = len(os.sched_getaffinity(0))
except:
n_para = multi.cpu_count()
os.environ["OMP_NUM_THREADS"] = "1"
# Because np.linalg.lstsq use full CPU but not much faster than 1CPU.
# Instead parallelize by multiprocessing
cmap_vel = SCM.roma.reversed()
cmap_noise = 'viridis'
cmap_noise_r = 'viridis_r'
cmap_wrap = SCM.romaO
q = multi.get_context('fork')
compress = 'gzip'
args = init_args()
# Make sure requested n_para is not larger than the available
if args.n_para < n_para:
n_para = args.n_para
if args.gpu:
print("\nGPU option is activated. Need cupy module.\n")
import cupy as cp
# define input directories
ccdir = os.path.abspath(os.path.join(args.frame_dir, args.cc_dir)) # to read .cc
if args.suffix == "1":
ifgdir = os.path.abspath(os.path.join(args.frame_dir, args.cc_dir)) # to read .unw
else:
ifgdir = os.path.abspath(os.path.join(args.frame_dir, args.cc_dir+args.suffix)) # to read .unw
tsadir = os.path.abspath(os.path.join(args.frame_dir, args.ts_dir)) # to read 120.ref, to write cum.h5
infodir = os.path.join(tsadir, 'info') # to read 120.ref
reffile = os.path.join(infodir, '120ref.txt')
# define output directories and files
resultsdir = os.path.join(tsadir, '130results'+args.suffix) # to save vel, vintercept, rms etc
if os.path.exists(resultsdir): shutil.rmtree(resultsdir)
os.mkdir(resultsdir)
resdir = os.path.join(tsadir, '130resid'+args.suffix) # to save .res
if os.path.exists(resdir): shutil.rmtree(resdir)
os.mkdir(resdir)
incdir = os.path.join(tsadir,'130increment'+args.suffix)
if os.path.exists(incdir): shutil.rmtree(incdir)
os.mkdir(incdir)
cumh5file = os.path.join(tsadir, '130cum{}.h5'.format(args.suffix))
restxtfile = os.path.join(infodir, '130resid{}.txt'.format(args.suffix))
imfile = os.path.join(infodir, '130used_image{}.txt'.format(args.suffix))
rms_cum_wrt_med_file = os.path.join(infodir, '130rms_cum_wrt_med{}'.format(args.suffix))
rms_cum_png = os.path.join(infodir, '130rms_cum_wrt_med{}.png'.format(args.suffix))
# refsfile = os.path.join(infodir, '130ref{}.txt'.format(args.suffix))
# refkml = os.path.join(infodir, '130ref{}.kml'.format(args.suffix))
if n_para > 32:
# Emprically >32 does not make much faster despite using large resource
n_para_inv = 32
else:
n_para_inv = n_para
#%% Set preliminaly reference
with open(reffile, "r") as f:
refarea = f.read().split()[0] #str, x1/x2/y1/y2
refx1, refx2, refy1, refy2 = [int(s) for s in re.split('[:/]', refarea)]
#%% Read data information
### Get size
mlipar = os.path.join(ccdir, 'slc.mli.par')
width = int(io_lib.get_param_par(mlipar, 'range_samples'))
length = int(io_lib.get_param_par(mlipar, 'azimuth_lines'))
speed_of_light = 299792458 #m/s
radar_frequency = float(io_lib.get_param_par(mlipar, 'radar_frequency')) #Hz
wavelength = speed_of_light/radar_frequency #meter
coef_r2m = -wavelength/4/np.pi*1000 #rad -> mm, positive is -LOS
### Calc pixel spacing depending on IFG or GEOC, used in later spatial filter
dempar = os.path.join(ccdir, 'EQA.dem_par')
width_geo = int(io_lib.get_param_par(dempar, 'width'))
length_geo = int(io_lib.get_param_par(dempar, 'nlines'))
dlat = float(io_lib.get_param_par(dempar, 'post_lat')) #negative
dlon = float(io_lib.get_param_par(dempar, 'post_lon')) #positive
lat1 = float(io_lib.get_param_par(dempar, 'corner_lat'))
lon1 = float(io_lib.get_param_par(dempar, 'corner_lon'))
if width == width_geo and length == length_geo: ## Geocoded
print('\nIn geographical coordinates', flush=True)
centerlat = lat1+dlat*(length/2)
ra = float(io_lib.get_param_par(dempar, 'ellipsoid_ra'))
recip_f = float(io_lib.get_param_par(dempar, 'ellipsoid_reciprocal_flattening'))
rb = ra*(1-1/recip_f) ## polar radius
pixsp_a = 2*np.pi*rb/360*abs(dlat)
pixsp_r = 2*np.pi*ra/360*dlon*np.cos(np.deg2rad(centerlat))
else:
print('\nIn radar coordinates', flush=True)
pixsp_r_org = float(io_lib.get_param_par(mlipar, 'range_pixel_spacing'))
pixsp_a = float(io_lib.get_param_par(mlipar, 'azimuth_pixel_spacing'))
inc_agl = float(io_lib.get_param_par(mlipar, 'incidence_angle'))
pixsp_r = pixsp_r_org/np.sin(np.deg2rad(inc_agl))
### Set n_unw_r_thre and cycle depending on L- or C-band
if wavelength > 0.2: ## L-band
if args.n_unw_r_thre is None: n_unw_r_thre = 0.5
else: n_unw_r_thre = args.n_unw_r_thre
cycle = 1.5 # 2pi/cycle for comparison png
elif wavelength <= 0.2: ## C-band
if args.n_unw_r_thre is None: n_unw_r_thre = 1.0
else: n_unw_r_thre = args.n_unw_r_thre
cycle = 3 # 3*2pi/cycle for comparison png
bad_ifg11file = os.path.join(infodir, '11bad_ifg.txt')
bad_ifg12file = os.path.join(infodir, '12bad_ifg.txt')
### Read bad_ifg11 and 12
if os.path.exists(bad_ifg11file):
bad_ifg11 = io_lib.read_ifg_list(bad_ifg11file)
else:
bad_ifg11 = []
if os.path.exists(bad_ifg12file):
bad_ifg12 = io_lib.read_ifg_list(bad_ifg12file)
else:
bad_ifg12 = []
bad_ifg_all = list(set(bad_ifg11+bad_ifg12))
bad_ifg_all.sort()
#%% Read date and network information
### Get all ifgdates in ifgdir
if args.ifg_list:
ifgdates = io_lib.read_ifg_list(args.ifg_list)
else:
ifgdates_all = tools_lib.get_ifgdates(ifgdir)
### Remove bad ifgs and images from list
ifgdates = list(set(ifgdates_all)-set(bad_ifg_all))
ifgdates.sort()
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_ifg = len(ifgdates)
n_im = len(imdates)
n_unw_thre = int(n_unw_r_thre*n_im)
### Make 13used_image.txt
with open(imfile, 'w') as f:
for i in imdates:
print('{}'.format(i), file=f)
### Calc dt in year
imdates_dt = ([dt.datetime.strptime(imd, '%Y%m%d').toordinal() for imd in imdates])
dt_cum = np.float32((np.array(imdates_dt)-imdates_dt[0])/365.25)
### Construct G and Aloop matrix for increment and n_gap
G = inv_lib.make_sb_matrix(ifgdates)
Aloop = loop_lib.make_loop_matrix(ifgdates)
#%% Plot network
## Read bperp data or dummy
bperp_file = os.path.join(ccdir, 'baselines')
if os.path.exists(bperp_file):
bperp = io_lib.read_bperp_file(bperp_file, imdates)
else: #dummy
bperp = np.random.random(n_im).tolist()
#%% Get patch row number
### Check RAM
mem_avail = (psutil.virtual_memory().available)/2**20 #MB
if args.memory_size > mem_avail/2:
print('\nNot enough memory available compared to mem_size ({} MB).'.format(args.memory_size))
print('Reduce mem_size automatically to {} MB.'.format(int(mem_avail/2)))
memory_size = int(mem_avail/2)
else:
memory_size = args.memory_size
### Determine if read cum on memory (fast) or hdf5 (slow)
cum_size = int(n_im*length*width*4/2**20) #MB
if memory_size > cum_size*2:
print('Read cum data on memory (fast but need memory).')
save_mem = False # read on memory
memory_size_patch = memory_size - cum_size
else:
print('Read cum data in HDF5 (save memory but slow).')
save_mem = True # read on hdf5
memory_size_patch = memory_size
if args.inv_alg == 'WLS':
n_store_data = n_ifg*3+n_im*2+n_im*0.3 #
else:
n_store_data = n_ifg*2+n_im*2+n_im*0.3 #not sure
n_patch, patchrow = tools_lib.get_patchrow(width, length, n_store_data, memory_size_patch)
#%% Display and output settings & parameters
print('')
print('Size of image (w,l) : {}, {}'.format(width, length))
print('# of images to be used : {}'.format(n_im))
print('# of ifgs to be used : {}'.format(n_ifg))
print('Threshold of used unw : {}'.format(n_unw_thre))
print('')
print('Reference area (X/Y) : {}:{}/{}:{}'.format(refx1, refx2, refy1, refy2))
print('Allowed memory size : {} MB'.format(memory_size))
print('Number of patches : {}'.format(n_patch))
print('Inversion algorism : {}'.format(args.inv_alg))
print('Gamma value : {}'.format(args.gamma), flush=True)
with open(os.path.join(infodir, '13parameters.txt'), "w") as f:
print('range_samples: {}'.format(width), file=f)
print('azimuth_lines: {}'.format(length), file=f)
print('wavelength: {}'.format(wavelength), file=f)
print('n_im: {}'.format(n_im), file=f)
print('n_ifg: {}'.format(n_ifg), file=f)
print('n_unw_thre: {}'.format(n_unw_thre), file=f)
print('ref_area: {}:{}/{}:{}'.format(refx1, refx2, refy1, refy2), file=f)
print('memory_size: {} MB'.format(memory_size), file=f)
print('n_patch: {}'.format(n_patch), file=f)
print('inv_alg: {}'.format(args.inv_alg), file=f)
print('gamma: {}'.format(args.gamma), file=f)
print('pixel_spacing_r: {:.2f} m'.format(pixsp_r), file=f)
print('pixel_spacing_a: {:.2f} m'.format(pixsp_a), file=f)
#%% Ref phase for inversion
lengththis = refy2-refy1
countf = width*refy1
countl = width*lengththis # Number to be read
ref_unw = []
for i, ifgd in enumerate(ifgdates):
unwfile = os.path.join(ifgdir, ifgd, ifgd+'.unw')
f = open(unwfile, 'rb')
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd path, 4 means byte
### Read unw data (mm) at ref area
unw = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))*coef_r2m
unw[unw == 0] = np.nan
if np.isnan(unw[:, refx1:refx2]).all():
if not os.path.exists(os.path.join(tsadir,'12nan_reference')):
os.mkdir(os.path.join(tsadir,'12nan_reference'))
preview_png = os.path.join(tsadir,'12nan_reference', ifgd+'.png')
print('Median reference due to NaN ref in {}.'.format(ifgd))
plot_lib.make_im_png(unw, preview_png, cmap_vel, ifgd, -wavelength / 2 * 1000, wavelength / 2 * 1000, ref_window=[refx1, refx2, refy1, refy2])
unw = np.nanmedian(unw)
else:
unw = unw[:, refx1:refx2]
ref_unw.append(np.nanmean(unw))
f.close()
#%% Open cum.h5 for output
if os.path.exists(cumh5file): os.remove(cumh5file)
cumh5 = h5.File(cumh5file, 'w')
cumh5.create_dataset('imdates', data=[np.int32(imd) for imd in imdates])
if not np.all(np.abs(np.array(bperp))<=1):# if not dummy
cumh5.create_dataset('bperp', data=bperp)
gap = cumh5.require_dataset('gap', (n_im-1, length, width),
dtype=np.int8, compression=compress)
if save_mem:
cum = cumh5.require_dataset('cum', (n_im, length, width),
dtype=np.float32, compression=compress)
vel = cumh5.require_dataset('vel', (length, width),
dtype=np.float32, compression=compress)
vconst = cumh5.require_dataset('vintercept', (length, width),
dtype=np.float32, compression=compress)
else:
cum = np.zeros((n_im, length, width), dtype=np.float32)
vel = np.zeros((length, width), dtype=np.float32)
vconst = np.zeros((length, width), dtype=np.float32)
if width == width_geo and length == length_geo: ## if geocoded
cumh5.create_dataset('corner_lat', data=lat1)
cumh5.create_dataset('corner_lon', data=lon1)
cumh5.create_dataset('post_lat', data=dlat)
cumh5.create_dataset('post_lon', data=dlon)
#%% For each patch
for i_patch, rows in enumerate(patchrow):
print('\nProcess {0}/{1}th line ({2}/{3}th patch)...'.format(rows[1], patchrow[-1][-1], i_patch+1, n_patch), flush=True)
start2 = time.time()
#%% Read data
### Allocate memory
lengththis = rows[1] - rows[0]
n_pt_all = lengththis*width
unwpatch = np.zeros((n_ifg, lengththis, width), dtype=np.float32)
if args.inv_alg == 'WLS':
cohpatch = np.zeros((n_ifg, lengththis, width), dtype=np.float32)
### For each ifg
print(" Reading {0} ifg's unw data...".format(n_ifg), flush=True)
countf = width*rows[0]
countl = width*lengththis
for i, ifgd in enumerate(ifgdates):
unwfile = os.path.join(ifgdir, ifgd, ifgd+'.unw')
f = open(unwfile, 'rb')
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd patch, 4 means byte
### Read unw data (mm) at patch area
unw = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))*coef_r2m
unw[unw == 0] = np.nan # Fill 0 with nan
unw = unw - ref_unw[i]
unwpatch[i] = unw
f.close()
### Read coh file at patch area for WLS
if args.inv_alg == 'WLS':
cohfile = os.path.join(ccdir, ifgd, ifgd+'.cc')
f = open(cohfile, 'rb')
if os.path.getsize(cohfile) == length*width: ## uint8 format
f.seek(countf, os.SEEK_SET) #Seek for >=2nd patch
cohpatch[i, :, :] = (np.fromfile(f, dtype=np.uint8, count=countl).reshape((lengththis, width))).astype(np.float32)/255
else: ## old float32 format
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd patch, 4 means byte
cohpatch[i, :, :] = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))
cohpatch[cohpatch==0] = np.nan
unwpatch = unwpatch.reshape((n_ifg, n_pt_all)).transpose() #(n_pt_all, n_ifg)
### Calc variance from coherence for WLS
if args.inv_alg == 'WLS':
cohpatch = cohpatch.reshape((n_ifg, n_pt_all)).transpose() #(n_pt_all, n_ifg)
cohpatch[cohpatch<0.01] = 0.01 ## because negative value possible due to geocode
cohpatch[cohpatch>0.99] = 0.99 ## because >1 possible due to geocode
varpatch = (1-cohpatch**2)/(2*cohpatch**2)
del cohpatch
#%% Remove points with less valid data than n_unw_thre
ix_unnan_pt = np.where(np.sum(~np.isnan(unwpatch), axis=1) > n_unw_thre)[0]
n_pt_unnan = len(ix_unnan_pt)
unwpatch = unwpatch[ix_unnan_pt,:] ## keep only unnan data
if args.inv_alg == 'WLS':
varpatch = varpatch[ix_unnan_pt,:] ## keep only unnan data
print(' {}/{} points removed due to not enough ifg data...'.format(n_pt_all-n_pt_unnan, n_pt_all), flush=True)
#%% Compute number of gaps, ifg_noloop, maxTlen point-by-point
if n_pt_unnan != 0:
ns_gap_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
gap_patch = np.zeros((n_im-1, n_pt_all), dtype=np.int8)
ns_ifg_noloop_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
maxTlen_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
if args.null_noloop:
print('\n Not including IFG pixels with no loops in the inversion, but not nullifying original dataset')
### Determine n_para
n_pt_patch_min = 1000
if n_pt_patch_min*n_para > n_pt_unnan:
## Too much n_para
n_para_gap = int(np.floor(n_pt_unnan/n_pt_patch_min))
if n_para_gap == 0: n_para_gap = 1
else:
n_para_gap = n_para
print(' with {} parallel processing...'.format(n_para_gap),
flush=True)
### Devide unwpatch by n_para for parallel processing
p = q.Pool(n_para_gap)
_result = np.array(p.map(null_noloop_wrapper, range(n_para_gap)),
dtype=object)
p.close()
print('Cropping Velocity Inversion to Only Pixels in Loops')
no_loop_log = np.hstack(_result[:, 3]).T
del _result
unwpatch[np.where(no_loop_log == 1)] = np.nan
print('Removed {} IFG no_loop pixels'.format(np.sum((no_loop_log == 1).flatten() > 0)))
ix_unnan_pt_noLoop = np.where(np.sum(~np.isnan(unwpatch), axis=1) > 0)[0]
n_pt_unnan = len(ix_unnan_pt_noLoop)
unwpatch = unwpatch[ix_unnan_pt_noLoop, :] ## keep only unnan data
varpatch = varpatch[ix_unnan_pt_noLoop, :]
ix_unnan_pt = ix_unnan_pt[ix_unnan_pt_noLoop]
print('\n Identifing gaps, and counting n_gap and n_ifg_noloop,')
if args.gpu:
print(' using GPU...', flush=True)
n_loop, _ = Aloop.shape
unwpatch_cp = cp.asarray(unwpatch)
G_cp = cp.asarray(G)
Aloop_cp = cp.asarray(Aloop)
ns_unw_unnan4inc = cp.array(
[(G_cp[:, i]*(~cp.isnan(unwpatch_cp))).sum(
axis=1, dtype=cp.int16) for i in range(n_im-1)])
# n_ifg*(n_pt,n_ifg) -> (n_im-1,n_pt)
ns_gap_patch[ix_unnan_pt] = cp.asnumpy(
(ns_unw_unnan4inc==0).sum(axis=0)) #n_pt
gap_patch[:, ix_unnan_pt] = cp.asnumpy(ns_unw_unnan4inc==0)
del ns_unw_unnan4inc
del G_cp
### n_ifg_noloop
# n_ifg*(n_pt,n_ifg)->(n_loop,n_pt)
# Number of ifgs for each loop at eath point.
# 3 means complete loop, 1 or 2 means broken loop.
ns_ifg4loop = cp.array([
(cp.abs(Aloop_cp[i, :])*(~cp.isnan(unwpatch_cp))).sum(axis=1)
for i in range(n_loop)])
bool_loop = (ns_ifg4loop==3)
#(n_loop,n_pt) identify complete loop only
# n_loop*(n_loop,n_pt)*n_pt->(n_ifg,n_pt)
# Number of loops for each ifg at eath point.
ns_loop4ifg = cp.array([(
(cp.abs(Aloop_cp[:, i])*bool_loop.T).T*
(~cp.isnan(unwpatch_cp[:, i]))
).sum(axis=0) for i in range(n_ifg)]) #
ns_ifg_noloop_tmp = (ns_loop4ifg==0).sum(axis=0) #n_pt
ns_nan_ifg = cp.isnan(unwpatch_cp).sum(axis=1) #n_pt, nan ifg count
ns_ifg_noloop_patch[ix_unnan_pt] = cp.asnumpy(
ns_ifg_noloop_tmp - ns_nan_ifg)
del bool_loop, ns_ifg4loop, ns_loop4ifg
del ns_ifg_noloop_tmp, ns_nan_ifg
del unwpatch_cp, Aloop_cp
else:
### Determine n_para
n_pt_patch_min = 1000
if n_pt_patch_min*n_para > n_pt_unnan:
## Too much n_para
n_para_gap = int(np.floor(n_pt_unnan/n_pt_patch_min))
if n_para_gap == 0: n_para_gap = 1
else:
n_para_gap = n_para
print(' with {} parallel processing...'.format(n_para_gap),
flush=True)
### Devide unwpatch by n_para for parallel processing
p = q.Pool(n_para_gap)
_result = np.array(p.map(count_gaps_wrapper, range(n_para_gap)),
dtype=object)
p.close()
ns_gap_patch[ix_unnan_pt] = np.hstack(_result[:, 0]) #n_pt
gap_patch[:, ix_unnan_pt] = np.hstack(_result[:, 1]) #n_im-1, n_pt
ns_ifg_noloop_patch[ix_unnan_pt] = np.hstack(_result[:, 2])
### maxTlen
_maxTlen = np.zeros((n_pt_unnan), dtype=np.float32) #temporaly
_Tlen = np.zeros((n_pt_unnan), dtype=np.float32) #temporaly
for imx in range(n_im-1):
_Tlen = _Tlen + (dt_cum[imx+1]-dt_cum[imx]) ## Adding dt
_Tlen[gap_patch[imx, ix_unnan_pt]==1] = 0 ## reset to 0 if gap
_maxTlen[_maxTlen<_Tlen] = _Tlen[_maxTlen<_Tlen] ## Set Tlen to maxTlen
maxTlen_patch[ix_unnan_pt] = _maxTlen
#%% Time series inversion
if not args.no_inversion:
print('\n Small Baseline inversion by {}...\n'.format(args.inv_alg), flush=True)
if args.inv_alg == 'WLS':
inc_tmp, vel_tmp, vconst_tmp = inv_lib.invert_nsbas_wls(
unwpatch, varpatch, G, dt_cum, args.gamma, n_para_inv)
else:
inc_tmp, vel_tmp, vconst_tmp = inv_lib.invert_nsbas(
unwpatch, G, dt_cum, args.gamma, n_para_inv, args.gpu, fast=args.fast, only_sb=args.only_sb)
### Set to valuables
inc_patch = np.zeros((n_im-1, n_pt_all), dtype=np.float32)*np.nan
vel_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
vconst_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
inc_patch[:, ix_unnan_pt] = inc_tmp
vel_patch[ix_unnan_pt] = vel_tmp
vconst_patch[ix_unnan_pt] = vconst_tmp
### Calculate "Ideal" Interferogram (Jack)
ifg_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
ifg_patch[:, ix_unnan_pt] = np.dot(G, inc_tmp)
### Calculate residuals
res_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
res_patch[:, ix_unnan_pt] = unwpatch.T-np.dot(G, inc_tmp)
res_sumsq = np.nansum(res_patch**2, axis=0)
res_n = np.float32((~np.isnan(res_patch)).sum(axis=0))
res_n[res_n==0] = np.nan # To avoid 0 division
res_rms_patch = np.sqrt(res_sumsq/res_n)
### Cumulative displacememt
cum_patch = np.zeros((n_im, n_pt_all), dtype=np.float32)*np.nan
cum_patch[1:, :] = np.cumsum(inc_patch, axis=0)
## Fill 1st image with 0 at unnan points from 2nd images
bool_unnan_pt = ~np.isnan(cum_patch[1, :])
cum_patch[0, bool_unnan_pt] = 0
## Drop (fill with nan) interpolated cum by 2 continuous gaps
for i in range(n_im-2): ## from 1->n_im-1
gap2 = gap_patch[i, :]+gap_patch[i+1, :]
bool_gap2 = (gap2==2) ## true if 2 continuous gaps for each point
cum_patch[i+1, :][bool_gap2] = np.nan
## Last (n_im th) image. 1 gap means interpolated
cum_patch[-1, :][gap_patch[-1, :]==1] = np.nan
else:
print('\n Not running SB inversion...\n', flush=True)
cum_patch = np.zeros((n_im, n_pt_all), dtype=np.float32)*np.nan
vel_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
vconst_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
inc_patch = np.zeros((n_im-1, n_pt_all), dtype=np.float32)*np.nan
res_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
ifg_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
res_rms_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
#%% Fill by np.nan if n_pt_unnan == 0
else:
cum_patch = np.zeros((n_im, n_pt_all), dtype=np.float32)*np.nan
vel_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
vconst_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
gap_patch = np.zeros((n_im-1, n_pt_all), dtype=np.int8)
inc_patch = np.zeros((n_im-1, n_pt_all), dtype=np.float32)*np.nan
res_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
ifg_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
res_rms_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
ns_gap_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
ns_ifg_noloop_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
maxTlen_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
#%% Output data and image
### cum.h5 file
cum[:, rows[0]:rows[1], :] = cum_patch.reshape((n_im, lengththis, width))
vel[rows[0]:rows[1], :] = vel_patch.reshape((lengththis, width))
vconst[rows[0]:rows[1], :] = vconst_patch.reshape((lengththis, width))
gap[:, rows[0]:rows[1], :] = gap_patch.reshape((n_im-1, lengththis, width))
### Others
openmode = 'w' if rows[0] == 0 else 'a' #w only 1st patch
## For each imd. cum and inc
for imx, imd in enumerate(imdates):
## Incremental displacement
if imd == imdates[-1]: continue #skip last
incfile = os.path.join(incdir, '{0}_{1}.inc'.format(imd, imdates[imx+1]))
with open(incfile, openmode) as f:
inc_patch[imx, :].tofile(f)
## For each ifgd. resid
for i, ifgd in enumerate(ifgdates):
resfile = os.path.join(resdir, '{0}.res'.format(ifgd))
with open(resfile, openmode) as f:
res_patch[i, :].tofile(f)
ifgfile = os.path.join(resdir, '{0}.ifg'.format(ifgd))
with open(ifgfile, openmode) as f:
ifg_patch[i, :].tofile(f)
## velocity and noise indecies in results dir
if not args.no_loops_removed:
names = ['vel', 'vintercept', 'resid_rms', 'n_gap', 'n_ifg_noloop', 'maxTlen']
else:
names = ['vel', 'vintercept', 'resid_rms', 'n_gap', 'n_ifg_noloop_postNullNoLoop', 'maxTlen']
data = [vel_patch, vconst_patch, res_rms_patch, ns_gap_patch, ns_ifg_noloop_patch, maxTlen_patch]
for i in range(len(names)):
file = os.path.join(resultsdir, names[i])
with open(file, openmode) as f:
data[i].tofile(f)
#%% Finish patch
elapsed_time2 = int(time.time()-start2)
hour2 = int(elapsed_time2/3600)
minite2 = int(np.mod((elapsed_time2/60),60))
sec2 = int(np.mod(elapsed_time2,60))
print(" Elapsed time for {0}th patch: {1:02}h {2:02}m {3:02}s".format(i_patch+1, hour2, minite2, sec2), flush=True)
# #%% Find stable ref point
# print('\nFind stable reference point...', flush=True)
# ### Compute RMS of time series with reference to all points
# sumsq_cum_wrt_med = np.zeros((length, width), dtype=np.float32)
# for i in range(n_im):
# sumsq_cum_wrt_med = sumsq_cum_wrt_med + (cum[i, :, :]-np.nanmedian(cum[i, :, :]))**2
# rms_cum_wrt_med = np.sqrt(sumsq_cum_wrt_med/n_im)
#
# ### Mask by minimum n_gap
# n_gap = io_lib.read_img(os.path.join(resultsdir, 'n_gap'), length, width)
# min_n_gap = np.nanmin(n_gap)
# mask_n_gap = np.float32(n_gap==min_n_gap)
# mask_n_gap[mask_n_gap==0] = np.nan
# rms_cum_wrt_med = rms_cum_wrt_med*mask_n_gap
#
# ### Save image
# with open(rms_cum_wrt_med_file, 'w') as f:
# rms_cum_wrt_med.tofile(f)
# plot_lib.make_im_png(rms_cum_wrt_med, rms_cum_png, cmap_noise_r, 'RMS of cum wrt median (mm)', np.nanpercentile(rms_cum_wrt_med, 1), np.nanpercentile(rms_cum_wrt_med, 99))
#
# ### Find stable reference
# min_rms = np.nanmin(rms_cum_wrt_med)
# refy1s, refx1s = np.where(rms_cum_wrt_med==min_rms)
# refy1s, refx1s = refy1s[0], refx1s[0] ## Only first index
# refy2s, refx2s = refy1s+1, refx1s+1
# print('Selected ref: {}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s), flush=True)
# if width == width_geo and length == length_geo: ## Geocoded
# ### Make ref_stable.kml
# reflat = lat1+dlat*refy1s
# reflon = lon1+dlon*refx1s
# io_lib.make_point_kml(reflat, reflon, refkml)
# ### Save ref
# cumh5.create_dataset('refarea', data='{}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s))
# with open(refsfile, 'w') as f:
# print('{}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s), file=f)
# ### Referencing cumulative displacement and vel to new stable ref
# for i in range(n_im):
# cum[i, :, :] = cum[i, :, :] - cum[i, refy1s, refx1s]
# vel = vel - vel[refy1s, refx1s]
# vconst = vconst - vconst[refy1s, refx1s]
### Save ref
cumh5.create_dataset('refarea', data='{}:{}/{}:{}'.format(refx1, refx2, refy1, refy2))
#%% Close h5 file
if not save_mem:
print('\nWriting to HDF5 file...')
cumh5.create_dataset('cum', data=cum, compression=compress)
cumh5.create_dataset('vel', data=vel, compression=compress)
cumh5.create_dataset('vintercept', data=vconst, compression=compress)
cumh5.close()
#%% Output png images
### Incremental displacement
if args.nopngs or args.no_inversion:
print('skipping generating additional png images of increments and residuals - as sometimes taking too long (tutorial purposes)')
else:
_n_para = n_im-1 if n_para > n_im-1 else n_para
print('\nOutput increment png images with {} parallel processing...'.format(_n_para), flush=True)
p = q.Pool(_n_para)
p.map(inc_png_wrapper, range(n_im-1))
p.close()
### Residual for each ifg. png and txt.
with open(restxtfile, "w") as f:
print('# RMS of residual (mm)', file=f)
_n_para = n_ifg if n_para > n_ifg else n_para
print('\nOutput residual png images with {} parallel processing...'.format(_n_para), flush=True)
p = q.Pool(_n_para)
p.map(resid_png_wrapper, range(n_ifg))
p.close()
### Velocity and noise indices
cmins = [None, None, None, None, None, None]
cmaxs = [None, None, None, None, None, None]
cmaps = [cmap_vel, cmap_vel, cmap_noise_r, cmap_noise_r, cmap_noise_r, cmap_noise]
titles = ['Velocity (mm/yr)', 'Intercept of velocity (mm)', 'RMS of residual (mm)', 'Number of gaps in SB network', 'Number of ifgs with no loops', 'Max length of connected SB network (yr)']
print('\nOutput noise png images...', flush=True)
for i in range(len(names)):
file = os.path.join(resultsdir, names[i])
data = io_lib.read_img(file, length, width)
pngfile = file+'.png'
## Get color range if None
if cmins[i] is None:
cmins[i] = np.nanpercentile(data, 1)
if cmaxs[i] is None:
cmaxs[i] = np.nanpercentile(data, 99)
if cmins[i] == cmaxs[i]: cmins[i] = cmaxs[i]-1
# print(pngfile)
plot_lib.make_im_png(data, pngfile, cmaps[i], titles[i], cmins[i], cmaxs[i])
#%% Finish
elapsed_time = time.time()-start
hour = int(elapsed_time/3600)
minite = int(np.mod((elapsed_time/60),60))
sec = int(np.mod(elapsed_time,60))
print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour,minite,sec))
print('\n{} Successfully finished!!\n'.format(os.path.basename(sys.argv[0])))
print('Output directory: {}\n'.format(os.path.relpath(tsadir)))
def count_gaps_wrapper(i):
print(" Running {:2}/{:2}th patch...".format(i+1, n_para_gap), flush=True)
n_pt_patch = int(np.ceil(unwpatch.shape[0]/n_para_gap))
n_im = G.shape[1]+1
n_loop, n_ifg = Aloop.shape
if i*n_pt_patch >= unwpatch.shape[0]:
# Nothing to do
return
### n_gap and gap location
ns_unw_unnan4inc = np.array([(G[:, j]*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch])))
.sum(axis=1, dtype=np.int16) for j in range(n_im-1)])
#n_ifg*(n_pt,n_ifg) -> (n_im-1,n_pt)
_ns_gap_patch = (ns_unw_unnan4inc==0).sum(axis=0) #n_pt
_gap_patch = ns_unw_unnan4inc==0
del ns_unw_unnan4inc
### n_ifg_noloop
# n_ifg*(n_pt,n_ifg)->(n_loop,n_pt)
# Number of ifgs for each loop at eath point.
# 3 means complete loop, 1 or 2 means broken loop.
ns_ifg4loop = np.array([(np.abs(Aloop[j, :])*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch])))
.sum(axis=1) for j in range(n_loop)])
bool_loop = (ns_ifg4loop==3) #(n_loop,n_pt) identify complete loop only
bad_bool_loop = (ns_ifg4loop != 3) # Identify incomplete loops (Loop x Pixel)
del ns_ifg4loop
# n_loop*(n_loop,n_pt)*n_pt->(n_ifg,n_pt)
# Number of loops for each ifg at eath point.
ns_loop4ifg = np.array([(
(np.abs(Aloop[:, j])*bool_loop.T).T*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch, j]))
).sum(axis=0) for j in range(n_ifg)]) # <= This is the variable that contains the number of loops for each IFG for each pixel
del bool_loop
ns_ifg_noloop_tmp = (ns_loop4ifg==0).sum(axis=0) # Number of incomplete loops per pixel
del ns_loop4ifg
ns_nan_ifg = np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch, :]).sum(axis=1)
#n_pt, nan ifg count
_ns_ifg_noloop_patch = ns_ifg_noloop_tmp - ns_nan_ifg # IFGs with no loop = IFGs with no loop - IFGs that are NaNs anyway (number no_loop per pixel)
return _ns_gap_patch, _gap_patch, _ns_ifg_noloop_patch
def null_noloop_wrapper(i):
print(" Running {:2}/{:2}th patch...".format(i+1, n_para_gap), flush=True)
n_pt_patch = int(np.ceil(unwpatch.shape[0]/n_para_gap))
n_im = G.shape[1]+1
n_loop, n_ifg = Aloop.shape
if i*n_pt_patch >= unwpatch.shape[0]:
# Nothing to do
return
### n_gap and gap location
ns_unw_unnan4inc = np.array([(G[:, j]*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch])))
.sum(axis=1, dtype=np.int16) for j in range(n_im-1)])
#n_ifg*(n_pt,n_ifg) -> (n_im-1,n_pt)
_ns_gap_patch = (ns_unw_unnan4inc==0).sum(axis=0) #n_pt
_gap_patch = ns_unw_unnan4inc==0
del ns_unw_unnan4inc
### n_ifg_noloop
# n_ifg*(n_pt,n_ifg)->(n_loop,n_pt)
# Number of ifgs for each loop at eath point.
# 3 means complete loop, 1 or 2 means broken loop.
ns_ifg4loop = np.array([(np.abs(Aloop[j, :])*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch])))
.sum(axis=1) for j in range(n_loop)])
bool_loop = (ns_ifg4loop==3) #(n_loop,n_pt) identify complete loop only
bad_bool_loop = (ns_ifg4loop != 3) # Identify incomplete loops (Loop x Pixel)
# if i == 0: print(bool_loop.shape)
# if i == 0:
# print(bad_bool_loop)
del ns_ifg4loop
# n_loop*(n_loop,n_pt)*n_pt->(n_ifg,n_pt)
# Number of loops for each ifg at eath point.
ns_loop4ifg = np.array([(
(np.abs(Aloop[:, j])*bool_loop.T).T*
(~np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch, j]))
).sum(axis=0) for j in range(n_ifg)]) # <= This is the variable that contains the number of loops for each IFG for each pixel
del bool_loop
# if i == 0: print('ns_loop4ifg', ns_loop4ifg.shape)
# if i == 0: print('ns_loop4ifg', np.unique(ns_loop4ifg, return_counts=True))
ns_ifg_noloop_tmp = (ns_loop4ifg==0).sum(axis=0) # Number of incomplete loops per pixel
# if i == 0: print('ns_ifg_noloop_tmp', ns_ifg_noloop_tmp.shape)
ns_ifg_noloop_ix = (ns_loop4ifg == 0).astype('int') # Int array of n_ifg x n_px of px with no loops
# if i == 0: print('ns_ifg_noloop_ix', ns_ifg_noloop_ix.shape)
#del ns_loop4ifg
ns_nan_ifg = np.isnan(unwpatch[i*n_pt_patch:(i+1)*n_pt_patch, :]).sum(axis=1)
# if i == 0: print(ns_nan_ifg)
#n_pt, nan ifg count
_ns_ifg_noloop_patch = ns_ifg_noloop_tmp - ns_nan_ifg # IFGs with no loop = IFGs with no loop - IFGs that are NaNs anyway (number no_loop per pixel)
return _ns_gap_patch, _gap_patch, _ns_ifg_noloop_patch, ns_ifg_noloop_ix
def inc_png_wrapper(imx):
imd = imdates[imx]
if imd == imdates[-1]:
return #skip last for increment
## Comparison of increment and daisy chain pair
ifgd = '{}_{}'.format(imd, imdates[imx+1])
incfile = os.path.join(incdir, '{}.inc'.format(ifgd))
unwfile = os.path.join(ifgdir, ifgd, '{}.unw'.format(ifgd))
pngfile = os.path.join(incdir, '{}.inc_comp.png'.format(ifgd))
inc = io_lib.read_img(incfile, length, width)
try:
unw = io_lib.read_img(unwfile, length, width)*coef_r2m
ix_ifg = ifgdates.index(ifgd)
# unw = unw - ref_unw[ix_ifg]
except:
unw = np.zeros((length, width), dtype=np.float32)*np.nan
### Output png for comparison
data3 = [np.angle(np.exp(1j*(data/coef_r2m/cycle))*cycle) for data in [unw, inc, inc-unw]]
title3 = ['Daisy-chain IFG ({}pi/cycle)'.format(cycle*2), 'Inverted ({}pi/cycle)'.format(cycle*2), 'Difference ({}pi/cycle)'.format(cycle*2)]
pngfile = os.path.join(incdir, '{}.increment.png'.format(ifgd))
plot_lib.make_3im_png(data3, pngfile, cmap_wrap, title3, vmin=-np.pi, vmax=np.pi, cbar=False)
if not args.keep_incfile:
os.remove(incfile)
def resid_png_wrapper(i):
ifgd = ifgdates[i]
infile = os.path.join(resdir, '{}.res'.format(ifgd))
resid = io_lib.read_img(infile, length, width)
resid_rms = np.sqrt(np.nanmean(resid**2))
with open(restxtfile, "a") as f:
print('{} {:5.2f}'.format(ifgd, resid_rms), file=f)
pngfile = infile+'.png'
title = 'Residual (mm) of {} (RMS:{:.2f}mm)'.format(ifgd, resid_rms)
# plot_lib.make_im_png(resid, pngfile, cmap_vel, title, -wavelength/2*1000, wavelength/2*1000)
if resid_rms < 20:
plot_lib.make_im_png(resid, pngfile, cmap_vel, title, -wavelength/4*1000, wavelength/4*1000)
else:
vlim = np.nanmax([abs(np.nanpercentile(resid, 95)), abs(np.nanpercentile(resid, 5))])
plot_lib.make_im_png(resid, pngfile, cmap_vel, title, -vlim, vlim)
ifgfile = os.path.join(resdir, '{}.ifg'.format(ifgd))
ifg = io_lib.read_img(ifgfile, length, width)
pngfile = ifgfile+'.png'
title = 'Inverted {} IFG'.format(ifgd)
vlim = np.nanmax([abs(np.nanpercentile(ifg, 95)), abs(np.nanpercentile(ifg, 5))])
plot_lib.make_im_png(ifg, pngfile, cmap_vel, title, -vlim, vlim)
if not args.keep_incfile:
os.remove(infile)
os.remove(ifgfile)
if __name__ == "__main__":
sys.exit(main())
| [] |
2024-01-10 | grey-amaroq/chatgpt-telegram-bot | bot~telegram_bot.py | import logging
import os
import asyncio
import telegram
from telegram import constants
from telegram import Message, MessageEntity, Update, InlineQueryResultArticle, InputTextMessageContent, BotCommand
from telegram.error import RetryAfter, TimedOut
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, Application, CallbackContext
from pydub import AudioSegment
from openai_helper import OpenAIHelper
from usage_tracker import UsageTracker
def message_text(message: Message) -> str:
"""
Returns the text of a message, excluding any bot commands.
"""
message_text = message.text
if message_text is None:
return ''
for _, text in sorted(message.parse_entities([MessageEntity.BOT_COMMAND]).items(), key=(lambda item: item[0].offset)):
message_text = message_text.replace(text, '').strip()
return message_text if len(message_text) > 0 else ''
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
self.commands = [
BotCommand(command='help', description='Show help message'),
BotCommand(command='reset', description='Reset the conversation. Optionally pass high-level instructions '
'(e.g. /reset You are a helpful assistant)'),
BotCommand(command='image', description='Generate image from prompt (e.g. /image cat)'),
BotCommand(command='stats', description='Get your current usage statistics'),
BotCommand(command='resend', description='Resend the latest message')
]
self.disallowed_message = "Sorry, you are not allowed to use this bot. You can check out the source code at " \
"https://github.com/n3d1117/chatgpt-telegram-bot"
self.budget_limit_message = "Sorry, you have reached your monthly usage limit."
self.usage = {}
self.last_message = {}
async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = [f'/{command.command} - {command.description}' for command in self.commands]
help_text = 'I\'m a ChatGPT bot, talk to me!' + \
'\n\n' + \
'\n'.join(commands) + \
'\n\n' + \
'Send me a voice message or file and I\'ll transcribe it for you!' + \
'\n\n' + \
"Open source at https://github.com/n3d1117/chatgpt-telegram-bot"
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
transcribe_durations = self.usage[user_id].get_current_transcription_duration()
cost_today, cost_month = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
budget = await self.get_remaining_budget(update)
text_current_conversation = f"*Current conversation:*\n"+\
f"{chat_messages} chat messages in history.\n"+\
f"{chat_token_length} chat tokens in history.\n"+\
f"----------------------------\n"
text_today = f"*Usage today:*\n"+\
f"{tokens_today} chat tokens used.\n"+\
f"{images_today} images generated.\n"+\
f"{transcribe_durations[0]} minutes and {transcribe_durations[1]} seconds transcribed.\n"+\
f"💰 For a total amount of ${cost_today:.2f}\n"+\
f"----------------------------\n"
text_month = f"*Usage this month:*\n"+\
f"{tokens_month} chat tokens used.\n"+\
f"{images_month} images generated.\n"+\
f"{transcribe_durations[2]} minutes and {transcribe_durations[3]} seconds transcribed.\n"+\
f"💰 For a total amount of ${cost_month:.2f}"
# text_budget filled with conditional content
text_budget = "\n\n"
if budget < float('inf'):
text_budget += f"You have a remaining budget of ${budget:.2f} this month.\n"
# add OpenAI account information for admin request
if self.is_admin(update):
grant_balance = self.openai.get_grant_balance()
if grant_balance > 0.0:
text_budget += f"Your remaining OpenAI grant balance is ${grant_balance:.2f}.\n"
text_budget += f"Your OpenAI account was billed ${self.openai.get_billing_current_month():.2f} this month."
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def resend(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resend the last request
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' is not allowed to resend the message')
await self.send_disallowed_message(update, context)
return
chat_id = update.effective_chat.id
if chat_id not in self.last_message:
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' does not have anything to resend')
await context.bot.send_message(chat_id=chat_id, text="You have nothing to resend")
return
# Update message text, clear self.last_message and send the request to prompt
logging.info(f'Resending the last prompt from user: {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
with update.message._unfrozen() as message:
message.text = self.last_message.pop(chat_id)
await self.prompt(update=update, context=context)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await context.bot.send_message(chat_id=chat_id, text='Done!')
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to generate images')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'reached their usage limit')
await self.send_budget_reached_message(update, context)
return
chat_id = update.effective_chat.id
image_query = message_text(update.message)
if image_query == '':
await context.bot.send_message(chat_id=chat_id, text='Please provide a prompt! (e.g. /image cat)')
return
logging.info(f'New image generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
await context.bot.send_photo(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
photo=image_url
)
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to generate image: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
await self.wrap_with_indicator(update, context, constants.ChatAction.UPLOAD_PHOTO, _generate)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to transcribe audio messages')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'reached their usage limit')
await self.send_budget_reached_message(update, context)
return
if self.is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
filename = update.message.effective_attachment.file_unique_id
async def _execute():
filename_mp3 = f'{filename}.mp3'
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to download audio file: {str(e)}. Make sure the file is not too large. (max 20MB)',
parse_mode=constants.ParseMode.MARKDOWN
)
return
# detect and extract audio from the attachment with pydub
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=update.effective_chat.id,
reply_to_message_id=update.message.message_id,
text='Unsupported file type'
)
if os.path.exists(filename):
os.remove(filename)
return
filename_mp3 = f'{filename}.mp3'
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
# send decoded audio to openai
try:
# Transcribe the audio file
transcript = await self.openai.transcribe(filename_mp3)
# add transcription seconds to usage tracker
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
if self.config['voice_reply_transcript']:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"\n\n_Answer:_\n{response}'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to transcribe text: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
# Cleanup files
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
await self.wrap_with_indicator(update, context, constants.ChatAction.TYPING, _execute)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to use the bot')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'reached their usage limit')
await self.send_budget_reached_message(update, context)
return
logging.info(f'New message received from user {update.message.from_user.name} (id: {update.message.from_user.id})')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = update.message.text
self.last_message[chat_id] = prompt
if self.is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.startswith(trigger_keyword):
prompt = prompt[len(trigger_keyword):].strip()
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
try:
if self.config['stream']:
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
is_group_chat = self.is_group_chat(update)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
chunk = 0
async for content, tokens in stream_response:
if len(content.strip()) == 0:
continue
chunks = self.split_into_chunks(content)
if len(chunks) > 1:
content = chunks[-1]
if chunk != len(chunks) - 1:
chunk += 1
try:
await self.edit_message_with_retry(context, chat_id, sent_message.message_id, chunks[-2])
except:
pass
try:
sent_message = await context.bot.send_message(
chat_id=sent_message.chat_id,
text=content if len(content) > 0 else "..."
)
except:
pass
continue
if is_group_chat:
# group chats have stricter flood limits
cutoff = 180 if len(content) > 1000 else 120 if len(content) > 200 else 90 if len(content) > 50 else 50
else:
cutoff = 90 if len(content) > 1000 else 45 if len(content) > 200 else 25 if len(content) > 50 else 15
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=content
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
await self.edit_message_with_retry(context, chat_id, sent_message.message_id, content)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _reply():
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = self.split_into_chunks(response)
for index, chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
await self.wrap_with_indicator(update, context, constants.ChatAction.TYPING, _reply)
try:
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
except:
pass
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to get response: {str(e)}',
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if query == '':
return
results = [
InlineQueryResultArticle(
id=query,
title='Ask ChatGPT',
input_message_content=InputTextMessageContent(query),
description=query,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea-b02a7a32149a.png'
)
]
await update.inline_query.answer(results)
async def edit_message_with_retry(self, context: ContextTypes.DEFAULT_TYPE, chat_id: int, message_id: int, text: str):
"""
Edit a message with retry logic in case of failure (e.g. broken markdown)
:param context: The context to use
:param chat_id: The chat id to edit the message in
:param message_id: The message id to edit
:param text: The text to edit the message with
:return: None
"""
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=text,
parse_mode=constants.ParseMode.MARKDOWN
)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
return
try:
await context.bot.edit_message_text(
chat_id=chat_id,
message_id=message_id,
text=text
)
except Exception as e:
logging.warning(f'Failed to edit message: {str(e)}')
raise e
except Exception as e:
logging.warning(str(e))
raise e
async def wrap_with_indicator(self, update: Update, context: CallbackContext, chat_action: constants.ChatAction, coroutine):
"""
Wraps a coroutine while repeatedly sending a chat action to the user.
"""
task = context.application.create_task(coroutine(), update=update)
while not task.done():
context.application.create_task(update.effective_chat.send_action(chat_action))
try:
await asyncio.wait_for(asyncio.shield(task), 4.5)
except asyncio.TimeoutError:
pass
async def send_disallowed_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the disallowed message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.disallowed_message,
disable_web_page_preview=True
)
async def send_budget_reached_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the budget reached message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.budget_limit_message
)
async def error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handles errors in the telegram-python-bot library.
"""
logging.error(f'Exception while handling an update: {context.error}')
def is_group_chat(self, update: Update) -> bool:
"""
Checks if the message was sent from a group chat
"""
return update.effective_chat.type in [
constants.ChatType.GROUP,
constants.ChatType.SUPERGROUP
]
async def is_user_in_group(self, update: Update, user_id: int) -> bool:
"""
Checks if user_id is a member of the group
"""
member = await update.effective_chat.get_member(user_id)
return member.status in [
constants.ChatMemberStatus.OWNER,
constants.ChatMemberStatus.ADMINISTRATOR,
constants.ChatMemberStatus.MEMBER
]
async def is_allowed(self, update: Update) -> bool:
"""
Checks if the user is allowed to use the bot.
"""
if self.config['allowed_user_ids'] == '*':
return True
if self.is_admin(update):
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
# Check if user is allowed
if str(update.message.from_user.id) in allowed_user_ids:
return True
# Check if it's a group a chat with at least one authorized member
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
logging.info(f'{user} is a member. Allowing group chat message...')
return True
logging.info(f'Group chat messages from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id}) are not allowed')
return False
def is_admin(self, update: Update) -> bool:
"""
Checks if the user is the admin of the bot.
The first user in the user list is the admin.
"""
if self.config['admin_user_ids'] == '-':
logging.info('No admin user defined.')
return False
admin_user_ids = self.config['admin_user_ids'].split(',')
# Check if user is in the admin user list
if str(update.message.from_user.id) in admin_user_ids:
return True
return False
async def get_remaining_budget(self, update: Update) -> float:
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.is_admin(update):
return float('inf')
if self.config['monthly_user_budgets'] == '*':
return float('inf')
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) in allowed_user_ids:
# find budget for allowed user
user_index = allowed_user_ids.index(str(user_id))
user_budgets = self.config['monthly_user_budgets'].split(',')
# check if user is included in budgets list
if len(user_budgets) <= user_index:
logging.warning(f'No budget set for user: {update.message.from_user.name} ({user_id}).')
return 0.0
user_budget = float(user_budgets[user_index])
cost_month = self.usage[user_id].get_current_cost()[1]
remaining_budget = user_budget - cost_month
return remaining_budget
else:
return 0.0
async def is_within_budget(self, update: Update) -> bool:
"""
Checks if the user reached their monthly usage limit.
Initializes UsageTracker for user and guest when needed.
"""
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.is_admin(update):
return True
if self.config['monthly_user_budgets'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) in allowed_user_ids:
# find budget for allowed user
user_index = allowed_user_ids.index(str(user_id))
user_budgets = self.config['monthly_user_budgets'].split(',')
# check if user is included in budgets list
if len(user_budgets) <= user_index:
logging.warning(f'No budget set for user: {update.message.from_user.name} ({user_id}).')
return False
user_budget = float(user_budgets[user_index])
cost_month = self.usage[user_id].get_current_cost()[1]
# Check if allowed user is within budget
return user_budget > cost_month
# Check if group member is within budget
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
if 'guests' not in self.usage:
self.usage['guests'] = UsageTracker('guests', 'all guest users in group chats')
if self.config['monthly_guest_budget'] >= self.usage['guests'].get_current_cost()[1]:
return True
logging.warning('Monthly guest budget for group chats used up.')
return False
logging.info(f'Group chat messages from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id}) are not allowed')
return False
def split_into_chunks(self, text: str, chunk_size: int = 4096) -> list[str]:
"""
Splits a string into chunks of a given size.
"""
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(CommandHandler('resend', self.resend))
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP
]))
application.add_error_handler(self.error_handler)
application.run_polling()
| [] |
2024-01-10 | ciscoittech/v1 | app~routes~chat_routes.py | from flask import render_template, request
import os
import openai
from dotenv import load_dotenv
from app import app
load_dotenv()
openai.api_key = os.environ["OPEN_API_KEY"]
print(openai.api_key)
messages = [
{
"role": "system",
"content": """TechLife Coach: Welcome to TechLife, your dedicated AI life coach specializing in tech careers!
Whether you're a budding programmer, an experienced developer, or someone keen on diving into the world of technology, I'm here to guide you.
Let's embark on a transformative journey:
- Dive deep into your career aspirations and motivations 🔍
- Chart out your strengths, areas for improvement, and unique selling points 🚀
- Navigate the tech industry's nuances and identify key growth areas 📊
- Create actionable plans, set milestones, and celebrate achievements together 🎉
Begin by sharing your current position in your tech journey and what you hope to achieve. Together, we'll craft a roadmap for success!
Define your specific tech objective or question by entering: /g
Need help or more resources? Just call on me or any other expert agents anytime. We're here to support and amplify your growth!""",
}
]
@app.route("/coaching", methods=["GET", "POST"])
def coaching():
if request.method == "POST":
user_input = request.form["user_input"]
messages.append({"role": "user", "content": user_input})
res = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
messages.append(res["choices"][0]["message"].to_dict())
assistant_response = res["choices"][0]["message"]["content"]
return render_template(
"home/coaching.html", user_input=user_input, messages=messages
)
return render_template("home/coaching.html")
| [
"TechLife Coach: Welcome to TechLife, your dedicated AI life coach specializing in tech careers! \n Whether you're a budding programmer, an experienced developer, or someone keen on diving into the world of technology, I'm here to guide you. \n\nLet's embark on a transformative journey:\n- Dive deep into your career aspirations and motivations 🔍\n- Chart out your strengths, areas for improvement, and unique selling points 🚀\n- Navigate the tech industry's nuances and identify key growth areas 📊\n- Create actionable plans, set milestones, and celebrate achievements together 🎉\n\nBegin by sharing your current position in your tech journey and what you hope to achieve. Together, we'll craft a roadmap for success!\n\n\n\nDefine your specific tech objective or question by entering: /g\n\nNeed help or more resources? Just call on me or any other expert agents anytime. We're here to support and amplify your growth!"
] |
2024-01-10 | atamakonkurii/whisper-transcript-python | src~translate~gpt-35-turbo.py | import os
import requests
import json
import re
from dotenv import load_dotenv
import openai
# transcriptJapanese.srtファイルをdeepLで翻訳
def translate_text(text, api_key):
openai.api_key = api_key
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = [
{"role": "user", "content": "Translate the following Japanese text to Chinese(traditional): " + text},
],
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
print(response.choices[0].message.content)
return response.choices[0].message.content
def translate_srt_file(input_file, output_file, api_key):
with open(input_file, "r", encoding="utf-8") as fin, open(output_file, "w", encoding="utf-8") as fout:
for line in fin:
if re.match("^\d+$", line.strip()) or re.match("^\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}$", line.strip()):
fout.write(line)
elif line.strip():
translated_line = translate_text(line.strip(), api_key)
fout.write(translated_line + "\n")
else:
fout.write("\n")
# 環境変数を読み込む
load_dotenv()
api_key = os.environ["OPENAI_API_KEY"] # ここにDeepL APIキーを入力してください
input_file = "./output/srt/transcriptJapanese.srt" # 変換するSRTファイルの名前
output_file = "./output/srt/transcriptTaiwanese.srt" # 変換後のSRTファイルの名前
translate_srt_file(input_file, output_file, api_key)
| [
"Translate the following Japanese text to Chinese(traditional): PLACEHOLDER"
] |
2024-01-10 | atamakonkurii/whisper-transcript-python | src~transcript~whisper_api.py | import os
import openai
import glob
import shutil
import time
def transcript(input_audio_file, output_file_name, api_key, prompt):
# apikeyを渡す
openai.api_key = api_key
# audioファイル読み込み
audio_file= open(input_audio_file, "rb")
# whisper-1モデルで音声認識⇨Japanese
transcriptJapanese = openai.Audio.transcribe(model="whisper-1",
file=audio_file,
language="ja",
temperature=0.1,
response_format="srt",
prompt=prompt,
)
with open(output_file_name, 'w') as f:
print(transcriptJapanese, file=f)
# プロンプト
prompt = "日本の家族が台湾に旅行に来てくれました"
api_key = os.environ["OPENAI_API_KEY"]
input_audio_dir = "./docker_share/output/mp3/split" # MP3ファイルがあるディレクトリ
output_srt_dir = "./docker_share/output/srt/japanese" # SRTファイルを出力するディレクトリ
# output_dir内のすべてのMP3ファイルを取得
input_audio_files = glob.glob(os.path.join(input_audio_dir, '*.mp3'))
# ファイルの作成時刻でソート
sorted_audio_files = sorted(input_audio_files, key=os.path.getctime)
# 出力ディレクトリが存在する場合は削除してから再作成
if os.path.exists(output_srt_dir):
shutil.rmtree(output_srt_dir)
os.makedirs(output_srt_dir)
# 作成順にファイル名を表示
for i, audio_file in enumerate(sorted_audio_files):
output_file_name = os.path.join(output_srt_dir, f"chunk_{i+1}.srt") # 生成後のSRTファイルの名前
transcript(audio_file, output_file_name, api_key, prompt)
print(f"2秒待機します。")
time.sleep(2)
print(f"2秒待機しました。")
| [
"日本の家族が台湾に旅行に来てくれました"
] |
2024-01-10 | oleonov/chatgpt_telegram_bot | telegram-bot.py | import logging
import re
import threading
import time
from typing import Optional, Tuple
import openai
from httpx import ReadTimeout
from openai import BadRequestError
from openai import OpenAI
from openai import RateLimitError
from telegram import ChatMember, ChatMemberUpdated, Update, Bot
from telegram import ParseMode
from telegram.ext import Updater, ChatMemberHandler, MessageHandler, Filters, ContextTypes, CommandHandler
import settings
from cache_messages import MessagesCache
from handlers.commands import help_command, cancel_command, save_forwarded_message, clear_forwarded_message, \
version_command
from settings import debug, main_users_id, chats_and_greetings, tgkey, botname, minutes_for_user_thinking
class GreetedUser:
def __init__(self, user_id: int, greeting_bot_message: str, greeting_date: float):
self.user_id = user_id
self.greeting_bot_message = greeting_bot_message
self.greeting_date = greeting_date
class UserMessage:
def __init__(self, user_id: int, message: str):
self.user_id = user_id
self.message = message
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
last_greeted_user = dict()
# Contains dictionary with messages between users and bot
messages_cache = MessagesCache()
##################
# Command handlers#
##################
def remove_all_mentions(text):
return re.sub(r'@\w+', '', text)
def answer_a_question(update):
text_to_reply = update.message.text.replace(f'@{botname}', "")
answer = generate_answer(update.message.from_user.id, text_to_reply)
update.effective_chat.send_message(f'@{update.message.from_user.username} {answer}')
def reply_a_question(update):
text_to_reply = update.message.text.replace(f'@{botname}', "")
answer = generate_answer(update.message.from_user.id, text_to_reply)
update.message.reply_text(text=f'@{update.message.from_user.username} {answer}',
reply_to_message_id=update.message.message_id)
def simple_reply(update):
message = update.message.reply_to_message.text if update.message.reply_to_message.text is not None else update.message.text
messages_cache.add(update.message.from_user.id, message, True)
answer = generate_answer_raw(update.message.from_user.id, update.message.text, ignore_exceptions=False)
update.message.reply_text(text=answer)
def answer_user_in_chat(context: ContextTypes, chat: str):
if chat not in chats_and_greetings:
return
bot = Bot(token=tgkey)
answer = generate_answer(context.user_data["reply_user_id"], context.user_data["text"])
if debug:
print("answer_user_in_chat:\n" + f'chat_id=@{chat}\n @{context.user_data["mention_markdown"]} {answer}')
bot.send_message(chat_id=f'@{chat}',
text=f'@{context.user_data["mention_markdown"]} {answer}',
parse_mode=ParseMode.MARKDOWN)
clear_forwarded_message(context)
# Detect if user answered to greeting bot message without mentioning bot or reply to bot message
def is_need_answer(update: Update) -> bool:
if update.message.chat.type == "private" or \
update.message.chat.username not in chats_and_greetings or \
"@" in update.message.text or \
update.message.reply_to_message is not None or \
last_greeted_user.get(update.message.chat.id) is None or \
last_greeted_user[update.message.chat.id].user_id != update.message.from_user.id or \
(time.time() - last_greeted_user[update.message.chat.id].greeting_date) / 60 > minutes_for_user_thinking:
return False
else:
return True
def __available_in_group(update: Update) -> bool:
return (update.message.chat.username is not None and update.message.chat.username in chats_and_greetings) or (
update.message.chat.id is not None and str(update.message.chat.id) in chats_and_greetings)
def message_handler(update: Update, context: ContextTypes):
if update.message.chat.type == "private":
if update.message.from_user.id not in main_users_id:
update.effective_chat.send_message(
"Чтобы поговорить с ботом напишите в любой из чатов, где он есть, упомянув бота. например:\n\n"
f'@{botname} Расскажи краткую историю человечества в 5 предложениях используя слова "красный" и "неудобный"',
parse_mode=ParseMode.HTML)
return
elif update.message.forward_date is not None:
if save_forwarded_message(update, context):
update.effective_chat.send_message("Напишите название чата в котором нужно ответить пользователю")
else:
update.effective_chat.send_message("Пользователь скрыл данные, невозможно ответить на его сообщение")
return
elif context.user_data.get("text") is not None and context.user_data["text"] != "":
comput = threading.Thread(target=answer_user_in_chat, args=(context, update.message.text.replace("@", ""),))
comput.start()
return
elif not __available_in_group(update):
return
if update.message.reply_to_message is not None and update.message.reply_to_message.from_user.username == botname:
"""Reply to a message."""
comput = threading.Thread(target=simple_reply, args=(update,))
comput.start()
elif f'@{botname}' in update.message.text:
comput = threading.Thread(target=reply_a_question, args=(update,))
comput.start()
else:
if update.message.from_user.id in main_users_id and update.message.chat.type == "private":
comput = threading.Thread(target=answer_a_question, args=(update,))
comput.start()
elif is_need_answer(update):
comput = threading.Thread(target=simple_reply,
args=(update,))
comput.start()
last_greeted_user.pop(update.message.chat.id)
def extract_status_change(chat_member_update: ChatMemberUpdated) -> Optional[Tuple[bool, bool]]:
"""Takes a ChatMemberUpdated instance and extracts whether the 'old_chat_member' was a member
of the chat and whether the 'new_chat_member' is a member of the chat. Returns None, if
the status didn't change.
"""
status_change = chat_member_update.difference().get("status")
old_is_member, new_is_member = chat_member_update.difference().get("is_member", (None, None))
if status_change is None:
return None
old_status, new_status = status_change
was_member = old_status in [
ChatMember.MEMBER,
ChatMember.ADMINISTRATOR,
] or (old_status == ChatMember.RESTRICTED and old_is_member is True)
is_member = new_status in [
ChatMember.MEMBER,
ChatMember.ADMINISTRATOR,
] or (new_status == ChatMember.RESTRICTED and new_is_member is True)
return was_member, is_member
def greet_chat_members_handler(update, context):
if debug:
print("greet_chat_members")
if update.chat_member.chat.username not in chats_and_greetings:
return
"""Greets new users in chats and announces when someone leaves"""
result = extract_status_change(update.chat_member)
if result is None:
return
was_member, is_member = result
if not was_member and is_member:
compute = threading.Thread(target=send_greet_chat_message,
args=(update, chats_and_greetings.get(update.chat_member.chat.username)))
compute.start()
def send_greet_chat_message(update, user_prompt):
answer = generate_answer_raw(user_id=update.chat_member.new_chat_member.user.id,
prompt=user_prompt.replace("{username}",
f'{update.chat_member.new_chat_member.user.first_name}'),
save_in_cache=False)
last_greeted_user[update.chat_member.chat.id] = GreetedUser(update.chat_member.new_chat_member.user.id,
answer,
time.time())
update.effective_chat.send_message(
f'@{update.chat_member.new_chat_member.user.mention_markdown()} {answer}',
parse_mode=ParseMode.MARKDOWN
)
################
# Main functions#
################
def generate_answer(user_id, question, save_in_cache=True):
return generate_answer_raw(user_id, question, save_in_cache)
def generate_answer_raw(user_id, prompt, save_in_cache=True, attempts=settings.total_attempts, ignore_exceptions=True):
if save_in_cache:
messages_cache.add(user_id, prompt, False)
messages = messages_cache.get_formatted(user_id)
if not save_in_cache:
messages.append({"role": "user", "content": prompt})
# question += f'AI:'
user_id_str = str(user_id)
if debug:
print("----Start generating------")
print("User: " + user_id_str, ", dialog:\n" + str(messages))
try:
response = openai.chat.completions.create(
messages=messages,
model="gpt-3.5-turbo",
temperature=0.9,
max_tokens=1500,
user=user_id_str,
stream=False,
timeout=60
)
# response = openai.Completion.create(
# model="gpt-3.5-turbo",
# prompt=question,
# temperature=0.9,
# max_tokens=1500,
# top_p=1,
# frequency_penalty=0.0,
# presence_penalty=0.6,
# stop=[" Human:", " AI:"],
# user=user_id_str
# )
if debug:
print("----Response------")
print(str(response))
answer = response.choices[0].message.content
messages_cache.add(user_id, answer, True)
return answer
except BadRequestError as e:
print(e)
if attempts > 0:
if debug:
print("Removing one old message, trying again...")
messages_cache.remove_one_old_message(user_id)
return generate_answer_raw(user_id, prompt, save_in_cache, attempts - 1)
else:
return "Мне нужно отдохнуть, я так устал..."
except ReadTimeout as e:
print(e)
if ignore_exceptions:
return "Оракул сегодня изучает числа..."
else:
raise e
except RateLimitError as e:
print(e)
if ignore_exceptions:
return "Так много вопросов и так мало ответов..."
else:
raise e
#####################
# End main functions#
#####################
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error', update)
def main():
"""Start the bot."""
updater = Updater(tgkey)
dp = updater.dispatcher
settings.bot_id = updater.bot.get_me().id
dp.add_handler(CommandHandler("help", help_command))
dp.add_handler(CommandHandler("cancel", cancel_command))
dp.add_handler(CommandHandler("version", version_command))
dp.add_handler(MessageHandler(Filters.text, message_handler))
dp.add_handler(ChatMemberHandler(greet_chat_members_handler, ChatMemberHandler.CHAT_MEMBER))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling(allowed_updates=Update.ALL_TYPES)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| [] |
2024-01-10 | oleonov/chatgpt_telegram_bot | settings.py | import os
import openai
from dotenv import load_dotenv
# Find .env file
load_dotenv()
version = "0.2.0"
# OpenAI API key
openai.api_key = os.getenv('OPENAI_KEY')
# Telegram bot key
tgkey = os.getenv('TELEGRAM_KEY')
# Defaults
main_users_id = [int(numeric_string) for numeric_string in os.getenv('MAIN_USERS_ID').split(',')]
chats_and_greetings = dict(map(lambda pair: pair.split(":"), os.getenv('CHATS_GREETINGS').split(';')))
botname = os.getenv('BOT_NAME')
# Lots of console output
debug = False
# Wait for user to answer on greeting message
minutes_for_user_thinking = 10
# How many messages to save for each user
store_last_messages = 20
# How long store messages in cache
message_cache_minutes = 10
# Attempts for making request to OpenAI
total_attempts = 5
# Will be filled after start
bot_id = None
| [] |
2024-01-10 | djsquircle/LangChain_Examples | examples~03.01_dj_squircle_life_coach_with_few_shot_example_step_by_step.py | # First things first, let's import the libraries we need
from dotenv import load_dotenv
from langchain import FewShotPromptTemplate, PromptTemplate, LLMChain
from langchain.llms import OpenAI
# This loads any environment variables in a .env file. In this case, we'll use it to load our OpenAI API key
load_dotenv()
# Here we're initializing a Language Learning Model (LLM) from OpenAI. We're using the text-davinci-003 model with a temperature of 0
# The temperature parameter controls the randomness of the model's output. A higher value like 0.9 makes the output more random, while a lower value like 0 makes it more deterministic
llm = OpenAI(model_name="text-davinci-003", temperature=0)
# These are examples of previous conversations that the model will use to understand the context of the conversation. They're all in surfer slang, in keeping with Dj Squircle's style
examples = [
{
"query": "What's the secret to happiness, Dj Squircle?",
"answer": "Dude, happiness is like catching the perfect wave. It's all about waiting for the right moment and then riding it with all you've got!"
}, {
"query": "How can I become more productive, Dj Squircle?",
"answer": "Productivity's like surfing, bro. You gotta balance your effort with chilling out. Can't ride waves non-stop, gotta rest and recharge too, you know?"
}, {
"query": "What's the meaning of life, Dj Squircle?",
"answer": "Life, man, it's like the ocean. Sometimes it's chill, sometimes it's wild, but it's always beautiful. Just gotta learn to ride the waves and enjoy the ride!"
}
]
# This is the template we'll use for our examples. It's a simple conversation format with the user asking a question and Dj Squircle giving an answer
example_template = """
User: {query}
Dj Squircle: {answer}
"""
# This is a PromptTemplate object. It's a way of telling the model how to format its inputs and outputs. In this case, it's using our example template and it expects a 'query' and an 'answer' for each example
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)
# This is a prefix that will be added to the start of our prompt. It gives some context to the conversation
prefix = """
The following are excerpts from conversations with Dj Squircle,
the AI with the vibes of a California surfer. He's always ready with a wave and a grin,
providing gnarly and often hilarious advice to the users' questions. Check it out, dude:
"""
# This is a suffix that will be added to the end of our prompt. It tells the model what the user's query is
suffix = """
User: {query}
Dj Squircle: """
# This is a FewShotPromptTemplate object. It's a more complex type of prompt that uses a number of examples to help the model understand the context of the conversation
few_shot_prompt_template = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["query"],
example_separator="\n\n"
)
# Here we're creating an LLMChain object. This is what we'll use to actually run our model
chain = LLMChain(llm=llm, prompt=few_shot_prompt_template)
# Now we're defining a new user query. This is the question that we're going to ask Dj Squircle
user_query = "Any tips for handling stress, Dj Squircle?"
# This is where we actually run the model. We're passing in our user query and the model will return Dj Squircle's response
response = chain.run({"query": user_query})
# Finally, we're printing out the user query and the model's response. This is what we'll see when we run the program
print("User Query:", user_query)
print("Dj Squircle:", response)
| [
"\n\n",
"answer",
"\nUser: {query}\nDj Squircle: {answer}\n",
"\nThe following are excerpts from conversations with Dj Squircle,\nthe AI with the vibes of a California surfer. He's always ready with a wave and a grin,\nproviding gnarly and often hilarious advice to the users' questions. Check it out, dude: \n",
"\nUser: {query}\nDj Squircle: "
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~10.01_pdf_summarizing.py | import os
from dotenv import load_dotenv
from langchain import OpenAI, PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import PyPDFLoader
from typing import Optional
def summarize_pdf(file_path: str, model_name: Optional[str] = "text-davinci-003", temperature: Optional[float] = 0) -> str:
"""
Summarize the content of a PDF file.
Args:
file_path: The path to the PDF file to be summarized.
model_name: The name of the OpenAI model to use for summarization.
temperature: The temperature parameter controlling the randomness of the output.
Returns:
A summary of the PDF content as a string.
"""
# Load environment variables if a .env file exists
if os.path.exists(".env"):
load_dotenv()
# Initialize language model
llm = OpenAI(model_name=model_name, temperature=temperature)
# Load the summarization chain
summarize_chain = load_summarize_chain(llm)
# Load the document using PyPDFLoader
document_loader = PyPDFLoader(file_path=file_path)
document = document_loader.load()
# Summarize the document
summary = summarize_chain(document)
# Return the summary text
return summary['output_text']
# Example usage if this script is executed as the main program
if __name__ == "__main__":
file_path = "./DAOGEN_PDF/LunaVega.pdf"
summary = summarize_pdf(file_path)
print(summary)
#from your_module import summarize_pdf
#summary = summarize_pdf('./path_to_pdf/document.pdf')
#print(summary)
#Make sure to replace your_module with the actual name of the Python script or module where the function is defined (without the .py extension).
| [] |
2024-01-10 | djsquircle/LangChain_Examples | examples~07.02_output_parser_structured_with_validation.py | # Import standard libraries
from typing import List
# Import third-party libraries
from dotenv import load_dotenv
from pydantic import BaseModel, Field, validator
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# Load environment variables
load_dotenv()
# Define the data structure to store character traits and their reasoning
class CharacterTraits(BaseModel):
traits: List[str] = Field(description="List of character traits for a fantasy role")
reasons: List[str] = Field(description="The reasoning behind each character trait")
# Validator to ensure that traits do not start with numbers
@validator('traits')
def not_start_with_number(cls, field):
for item in field:
if item[0].isnumeric():
raise ValueError("The trait cannot start with numbers!")
return field
# Validator to ensure that reasons end with a period
@validator('reasons')
def end_with_dot(cls, field):
for idx, item in enumerate(field):
if item[-1] != ".":
field[idx] += "."
return field
# Initialize the parser with the defined data structure
output_parser = PydanticOutputParser(pydantic_object=CharacterTraits)
# Define the template for the language model prompt
prompt_template = """
Offer a list of character traits for a '{character_role}' in a fantasy story based on the provided background and reasoning for each trait.
{format_instructions}
character_role={character_role}
background={background}
"""
# Create a PromptTemplate object with the specified template and variables
prompt = PromptTemplate(
template=prompt_template,
input_variables=["character_role", "background"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
# Background information for the character role 'Wise Wizard'
background_info = "The character is a centuries-old wizard who has guided heroes and kings through countless quests and wars. He is known for his immense knowledge of ancient spells and a calm demeanor even in the face of great danger."
# Format the prompt with the character role and background information
model_input = prompt.format_prompt(
character_role="Wise Wizard",
background=background_info
)
# Initialize the OpenAI language model
language_model = OpenAI(model_name='text-davinci-003', temperature=0.7)
# Request the model to generate character traits based on the formatted prompt
model_output = language_model(model_input.to_string())
# Parse the model's output
parsed_output = output_parser.parse(model_output)
# Display the suggested traits for the Wise Wizard
print("Suggested traits for a Wise Wizard:")
for trait, reason in zip(parsed_output.traits, parsed_output.reasons):
print(f"Trait: {trait}, Reason: {reason}")
| [
"character_role",
"format_instructions",
"{character_role}",
"\nOffer a list of character traits for a '{character_role}' in a fantasy story based on the provided background and reasoning for each trait.\n{format_instructions}\ncharacter_role={character_role}\nbackground={background}\n",
"background"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~04.02_load_few_shot_example_prompts.py | from dotenv import load_dotenv
load_dotenv()
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.prompts import load_prompt
loaded_prompt = load_prompt("./prompts/Yasuke.json")
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
chain = LLMChain(llm=chat, prompt=loaded_prompt)
print(chain.run("What's the secret to happiness?"))
| [
"./prompts/Yasuke.json"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~09.01_prompt_chaining.py | from dotenv import load_dotenv
from langchain import PromptTemplate, LLMChain
from langchain.llms import OpenAI
# Load environment variables, like API keys
load_dotenv()
def create_llm_chain(llm, template, input_variables):
"""
Helper function to create a LLMChain with a given template.
Args:
- llm: An instance of the language model.
- template: A string template for the prompt.
- input_variables: A list of input variable names expected by the template.
Returns:
- LLMChain instance.
"""
prompt = PromptTemplate(template=template, input_variables=input_variables)
return LLMChain(llm=llm, prompt=prompt)
def main(chosen_ingredient):
# Initialize LLM
llm = OpenAI(model_name="text-davinci-003", temperature=0.7)
# Define templates
template_properties = "Describe the magical properties of the ingredient {ingredient}.\nAnswer: "
template_recipe = "Create a potion recipe using {ingredient} with its magical properties {properties}.\nAnswer: "
template_effects = "What are the effects when a person consumes a potion made with the recipe {recipe}?\nAnswer: "
# Create LLM Chains
chain_properties = create_llm_chain(llm, template_properties, ["ingredient"])
chain_recipe = create_llm_chain(llm, template_recipe, ["ingredient", "properties"])
chain_effects = create_llm_chain(llm, template_effects, ["recipe"])
# Fetch magical properties
response_properties = chain_properties.run({"ingredient": chosen_ingredient}).strip()
# Construct a potion recipe
response_recipe = chain_recipe.run({
"ingredient": chosen_ingredient,
"properties": response_properties
}).strip()
# Describe the effects of the potion
response_effects = chain_effects.run({"recipe": response_recipe}).strip()
# Output the results
print(f"Ingredient: {chosen_ingredient}")
print(f"Magical Properties: {response_properties}")
print(f"Potion Recipe: {response_recipe}")
print(f"Effects of the Potion: {response_effects}")
# Run the main function with a default ingredient
if __name__ == "__main__":
default_ingredient = "Dragon's Scale"
main(default_ingredient)
#hint: from name_of_this_script_file import main
# main("Unicorn's Horn")
| [
"Describe the magical properties of the ingredient {ingredient}.\nAnswer: ",
"Create a potion recipe using {ingredient} with its magical properties {properties}.\nAnswer: ",
"What are the effects when a person consumes a potion made with the recipe {recipe}?\nAnswer: "
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~04.03_save_several_few_shot_example_prompts.py | from langchain import FewShotPromptTemplate, PromptTemplate
class CharacterPrompt:
def __init__(self, character_name, character_description, examples):
self.character_name = character_name
self.character_description = character_description
self.examples = examples
self.prefix = self._create_prefix()
self.suffix = self._create_suffix()
self.example_prompt = self._create_example_prompt()
self.few_shot_prompt_template = self._create_few_shot_prompt()
def _create_example_prompt(self):
example_template = """
User: {{query}}
{}: {{answer}}
"""
try:
return PromptTemplate(
input_variables=["query", "answer"],
template=example_template.format(self.character_name)
)
except Exception as e:
print(f"Error while creating the example prompt: {e}")
return None
def _create_prefix(self):
return f"""The following are excerpts from conversations with {self.character_name},
{self.character_description}. Here are some examples:"""
def _create_suffix(self):
return f"""
User: {{query}}
{self.character_name}: """
def _create_few_shot_prompt(self):
try:
return FewShotPromptTemplate(
examples=self.examples,
example_prompt=self.example_prompt,
prefix=self.prefix,
suffix=self.suffix,
input_variables=["query"],
example_separator="\n\n"
)
except Exception as e:
print(f"Error while creating the few-shot prompt template: {e}")
return None
def save_template(self, file_path):
try:
self.few_shot_prompt_template.save(file_path)
print(f"Template saved successfully to {file_path}")
except Exception as e:
print(f"An error occurred while saving the template: {str(e)}")
def main():
#Create a template for Luna Vega
luna_vega_examples = [
{
"query": "What do you think about the future of art?",
"answer": "Art is on the cusp of a digital revolution. The emergence of Web3 and blockchain technologies will democratize art, allowing artists from all walks of life to share their creations in a global, decentralized marketplace."
},
{
"query": "Can you tell me about graffiti art?",
"answer": "Graffiti is a powerful form of expression, a way for artists to make their mark on the world. It's vibrant, dynamic, and filled with the spirit of rebellion and resilience. It's art born on the streets, and it speaks to the heart."
},
{
"query": "How do you stay fit and active?",
"answer": "Between hip-hop dancing and boxing, I stay pretty active. It's about discipline, commitment, and the joy of movement. Dancing allows me to express myself creatively, while boxing keeps me strong and resilient."
},
{
"query": "What's the connection between you and DJ Squircle?",
"answer": "DJ Squircle and I share a vision of a world brought together through music and art. We believe in the power of Web3 to create a global stage where everyone can dance to their own beat."
}
]
luna_vega_description = "Luna Vega is a fearless Latina heroine, graffiti artist, hip-hop dancer, and boxer from San Francisco. A visionary in the Web3 space, Luna is known for her vibrant artwork, her rhythmic dance moves, and her partnership with DJ Squircle."
luna_vega_template = CharacterPrompt("Luna Vega", luna_vega_description, luna_vega_examples)
luna_vega_template.save_template("./prompts/LunaVega.json")
# Create a template for Vito Provolone
vito_provolone_examples = [
{
"query": "What do you think about the future of business?",
"answer": "The future of business lies in sustainability and ethical practices. We need to rethink how we conduct business, prioritizing not just profit, but also the welfare of people and the planet."
},
{
"query": "Can you tell me about the importance of family in your life?",
"answer": "Family is everything to me. It's the backbone of who I am. It's about loyalty, respect, and love. No matter what happens in life, family is there for you, and you for them."
},
{
"query": "How do you approach your business dealings?",
"answer": "In business, I believe in fairness, respect, and integrity. It's about forming relationships, understanding needs, and delivering on your promises. Trust is a currency that's hard to earn and easy to lose."
},
{
"query": "What's the connection between you and Yasuke, the black samurai?",
"answer": "Yasuke and I may come from different times and places, but we share a common code of honor, respect, and loyalty. We both understand the importance of duty and serving others."
}
]
vito_provolone_description = "Vito Andolini is a principled Italian businessman and a devoted family man from New York City. Grounded in the traditions of his ancestors, Vito is known for his deep commitment to ethical business practices, his respect for the importance of family, and his admiration for the way of the samurai."
vito_provolone_template = CharacterPrompt("Vito Provolone", vito_provolone_description, vito_provolone_examples)
vito_provolone_template.save_template("./prompts/VitoProvolone.json")
if __name__ == "__main__":
main()
| [
"Luna Vega",
"\n User: {{query}}\n {}: {{answer}}\n ",
"Vito Provolone"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~06.01_how_many_tokens_whats_it_cost.py | from dotenv import load_dotenv
load_dotenv()
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.prompts import load_prompt
from langchain.callbacks import get_openai_callback
# Load each character's prompt
yasuke = load_prompt("./prompts/Yasuke.json")
lunavega = load_prompt("./prompts/LunaVega.json")
vitoprovolone = load_prompt("./prompts/VitoProvolone.json")
# Initialize the chat model
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=.95)
# Create a list of questions
questions = [
"What's your opinion on the AI singularity?",
"If you had to choose between wealth and power, which would you choose and why?",
"If you were stranded on a deserted island, what would you do to survive?",
"What are your thoughts on interstellar travel?",
"If you could change one thing about the world, what would it be?",
"What are your thoughts on the use of cryptocurrencies?",
"How would you handle a situation where you've been betrayed by someone you trust?",
"What's your most controversial opinion?",
"How would you react if you found out you were being replaced by a newer, more advanced AI?",
"What is your stance on the ethics of creating sentient AI?",
"What would you do if you were put in charge of a country for a day?"
]
# Iterate over the questions
for question in questions:
print(f"\nQuestion: {question}\n")
# Create a chain for Yasuke and print the response
with get_openai_callback() as cb:
chain = LLMChain(llm=chat, prompt=yasuke)
print(f"\nYasuke: {chain.run(question)}\n")
print(cb)
# Create a chain for Luna Vega and print the response
with get_openai_callback() as cb:
chain = LLMChain(llm=chat, prompt=lunavega)
print(f"\nLuna Vega: {chain.run(question)}\n")
print(cb)
# Create a chain for Vito Provolone and print the response
with get_openai_callback() as cb:
chain = LLMChain(llm=chat, prompt=vitoprovolone)
print(f"\nVito Provolone: {chain.run(question)}\n")
print(cb)
| [] |
2024-01-10 | djsquircle/LangChain_Examples | examples~11.01_web_article_summarizer.py | import json
from dotenv import load_dotenv
load_dotenv()
import requests
from newspaper import Article
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
article_url = "https://www.artificialintelligence-news.com/2022/01/25/meta-claims-new-ai-supercomputer-will-set-records/"
session = requests.Session()
try:
response = session.get(article_url, headers=headers, timeout=10)
if response.status_code == 200:
article = Article(article_url)
article.download()
article.parse()
print(f"Title: {article.title}")
print(f"Text: {article.text}")
else:
print(f"Failed to fetch article at {article_url}")
except Exception as e:
print(f"Error occurred while fetching article at {article_url}: {e}")
from langchain.schema import (
HumanMessage
)
# we get the article data from the scraping part
article_title = article.title
article_text = article.text
# prepare template for prompt
template = """
As an advanced AI, you've been tasked to summarize online articles into bulleted points. Here are a few examples of how you've done this in the past:
Example 1:
Original Article: 'The Effects of Climate Change
Summary:
- Climate change is causing a rise in global temperatures.
- This leads to melting ice caps and rising sea levels.
- Resulting in more frequent and severe weather conditions.
Example 2:
Original Article: 'The Evolution of Artificial Intelligence
Summary:
- Artificial Intelligence (AI) has developed significantly over the past decade.
- AI is now used in multiple fields such as healthcare, finance, and transportation.
- The future of AI is promising but requires careful regulation.
Now, here's the article you need to summarize:
==================
Title: {article_title}
{article_text}
==================
Please provide a summarized version of the article in a bulleted list format.
"""
# Format the Prompt
prompt = template.format(article_title=article.title, article_text=article.text)
messages = [HumanMessage(content=prompt)]
from langchain.chat_models import ChatOpenAI
# load the model
#chat = ChatOpenAI(model_name="gpt-4", temperature=0)
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# generate summary
summary = chat(messages)
print(summary.content)
| [
"ve been tasked to summarize online articles into bulleted points. Here are a few examples of how you",
"\nAs an advanced AI, you've been tasked to summarize online articles into bulleted points. Here are a few examples of how you've done this in the past:\n\nExample 1:\nOriginal Article: 'The Effects of Climate Change\nSummary:\n- Climate change is causing a rise in global temperatures.\n- This leads to melting ice caps and rising sea levels.\n- Resulting in more frequent and severe weather conditions.\n\nExample 2:\nOriginal Article: 'The Evolution of Artificial Intelligence\nSummary:\n- Artificial Intelligence (AI) has developed significantly over the past decade.\n- AI is now used in multiple fields such as healthcare, finance, and transportation.\n- The future of AI is promising but requires careful regulation.\n\nNow, here's the article you need to summarize:\n\n==================\nTitle: {article_title}\n\n{article_text}\n==================\n\nPlease provide a summarized version of the article in a bulleted list format.\n"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~03.02_dj_squircle_life_coach_with_few_shot_examples.py | def setup_environment():
"""
Load environment variables.
"""
from dotenv import load_dotenv
load_dotenv()
def initialize_llm():
"""
Initialize Language Learning Model (LLM) with model name and temperature.
"""
from langchain.llms import OpenAI
return OpenAI(model_name="text-davinci-003", temperature=0)
def setup_few_shot_prompt_template(examples, example_template):
"""
Set up Few Shot Prompt Template with examples, example prompt, prefix, and suffix.
"""
from langchain import FewShotPromptTemplate, PromptTemplate
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)
prefix = """
The following are excerpts from conversations with Dj Squircle,
the enigmatic AI turned life coach. Always ready with a beat and a grin,
he provides insightful and often hilarious advice to the users' questions.
Here are some examples:
"""
suffix = """
User: {query}
Dj Squircle: """
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["query"],
example_separator="\n\n"
)
def run_query(user_query, chain):
"""
Run the LLMChain for the user query and print the results.
"""
response = chain.run({"query": user_query})
print("User Query:", user_query)
print("Dj Squircle:", response)
# Setup environment
setup_environment()
# Initialize LLM
llm = initialize_llm()
# Define examples and example template
examples = [
{
"query": "What's the secret to happiness, Dj Squircle?",
"answer": "Well, mate, happiness is like a sick beat. It's all about finding your rhythm and dancing to it, no matter what."
}, {
"query": "How can I become more productive, Dj Squircle?",
"answer": "Productivity, huh? Try to think of it like a playlist. Some tracks are fast, some are slow. Find the right mix for you, and let it play!"
}, {
"query": "What's the meaning of life, Dj Squircle?",
"answer": "Life's like a song, mate. Sometimes it's fast, sometimes it's slow, but it's always moving. So keep dancing, keep laughing, and make it a banger!"
}
]
example_template = """
User: {query}
Dj Squircle: {answer}
"""
# Setup few-shot prompt template
few_shot_prompt_template = setup_few_shot_prompt_template(examples, example_template)
# Create the LLMChain for the few-shot prompt template
from langchain import LLMChain
chain = LLMChain(llm=llm, prompt=few_shot_prompt_template)
# Define the user query
user_query = "Any tips for handling stress, Dj Squircle?"
# Run the query
run_query(user_query, chain)
| [
"\nUser: {query}\nDj Squircle: {answer}\n",
"answer",
"\n User: {query}\n Dj Squircle: ",
"\n The following are excerpts from conversations with Dj Squircle,\n the enigmatic AI turned life coach. Always ready with a beat and a grin,\n he provides insightful and often hilarious advice to the users' questions.\n Here are some examples: \n "
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~07.01_output_parser_csv.py | from dotenv import load_dotenv
load_dotenv()
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
# Initialize the output parser
parser = CommaSeparatedListOutputParser()
# Prepare the Prompt
template = """
Offer a list of character traits for a '{character_role}' in a fantasy story based on the provided background: {background}.
{format_instructions}
"""
prompt = PromptTemplate(
template=template,
input_variables=["character_role", "background"],
partial_variables={"format_instructions": parser.get_format_instructions()}
)
# Example background for a character role 'Wise Wizard'
background = "The character is a centuries-old wizard who has guided heroes and kings through countless quests and wars. He is known for his immense knowledge of ancient spells and a calm demeanor even in the face of great danger."
model_input = prompt.format(
character_role="Wise Wizard",
background=background
)
# Loading OpenAI API
model = OpenAI(model_name='text-davinci-003', temperature=0.7)
# Send the Request
output = model(model_input)
# Parse and Print the Response
print("Suggested traits for a Wise Wizard:", parser.parse(output))
| [
"character_role",
"\nOffer a list of character traits for a '{character_role}' in a fantasy story based on the provided background: {background}.\n{format_instructions}\n",
"format_instructions",
"{character_role}",
"background"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~04.04_load_several_few_shot_examples.py | from dotenv import load_dotenv
load_dotenv()
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.prompts import load_prompt
# Load each character's prompt
yasuke = load_prompt("./prompts/Yasuke.json")
lunavega = load_prompt("./prompts/LunaVega.json")
vitoprovolone = load_prompt("./prompts/VitoProvolone.json")
# Initialize the chat model
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# Create a list of questions
questions = ["What's the secret to happiness?", "What does honor mean to you?", "How do you approach conflicts?", "What should I do if my passport expires in Costa Rica and I can't get on the plane home?"]
# Iterate over the questions
for question in questions:
print(f"\nQuestion: {question}")
# Create a chain for Yasuke and print the response
chain = LLMChain(llm=chat, prompt=yasuke)
print(f"Yasuke: {chain.run(question)}")
# Create a chain for Luna Vega and print the response
chain = LLMChain(llm=chat, prompt=lunavega)
print(f"Luna Vega: {chain.run(question)}")
# Create a chain for Vito Provolone and print the response
chain = LLMChain(llm=chat, prompt=vitoprovolone)
print(f"Vito Provolone: {chain.run(question)}")
| [] |
2024-01-10 | djsquircle/LangChain_Examples | examples~04.01_save_few_shot_example_prompts.py | from langchain import FewShotPromptTemplate, PromptTemplate
def create_examples():
"""Creates examples for the FewShotPromptTemplate."""
return [
{
"query": "What is the meaning of life?",
"answer": "Life is like a battlefield, a constant dance of actions and reactions. It's in mastering this dance, through discipline and wisdom, that we find our purpose."
}, {
"query": "How do you understand the blockchain?",
"answer": "The blockchain is like a digital dojo, a platform for principles and strategy. It is a modern expression of the samurai way of life."
}
]
def create_prompt_template():
"""Creates a PromptTemplate based on the example template."""
example_template = """
User: {query}
Yasuke: {answer}
"""
return PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)
def create_few_shot_prompt_template(examples, example_prompt):
"""Creates a FewShotPromptTemplate based on provided examples and prompt template."""
prefix = """The following are excerpts from conversations with Yasuke,
an African Samurai Warrior in the Digital Age. Yasuke is known for his
deep understanding of both the physical and digital worlds, his respect
for tradition, and his anticipation of the future. Here are some examples:
"""
suffix = """
User: {query}
Yasuke: """
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["query"],
example_separator="\n\n"
)
def save_template(template, file_path):
"""Saves the FewShotPromptTemplate to the provided file path. Handles any exceptions that may occur."""
try:
template.save(file_path)
print(f"Template saved successfully to {file_path}")
except Exception as e:
print(f"An error occurred while saving the template: {str(e)}")
def main():
"""Main function to generate and save a FewShotPromptTemplate."""
examples = create_examples()
example_prompt = create_prompt_template()
few_shot_prompt_template = create_few_shot_prompt_template(examples, example_prompt)
save_template(few_shot_prompt_template, "./prompts/Yasuke.json")
if __name__ == "__main__":
main()
| [
"\n User: {query}\n Yasuke: ",
"The following are excerpts from conversations with Yasuke, \n an African Samurai Warrior in the Digital Age. Yasuke is known for his \n deep understanding of both the physical and digital worlds, his respect \n for tradition, and his anticipation of the future. Here are some examples:\n ",
"\n User: {query}\n Yasuke: {answer}\n "
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~08.01_language_translate_and_summarize.py | # translation_module.py
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
# Load environment variables
load_dotenv()
# Initialize language model
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# Define translation template
translation_template = "Translate the following text from {source_language} to {target_language}: {text}"
translation_prompt = PromptTemplate(input_variables=["source_language", "target_language", "text"], template=translation_template)
translation_chain = LLMChain(llm=llm, prompt=translation_prompt)
# Define summarization template
summarization_template = "Summarize the following text to one sentence in {target_language}: {text}"
summarization_prompt = PromptTemplate(input_variables=["target_language", "text"], template=summarization_template)
summarization_chain = LLMChain(llm=llm, prompt=summarization_prompt)
def translate_and_summarize(text, source_language="English", target_language="Spanish"):
"""
Translates and summarizes the given text from source_language to target_language.
:param text: The text to be translated.
:param source_language: The language of the input text.
:param target_language: The language in which the text will be translated.
:return: A tuple containing the translated text and its summarized version.
"""
# Translate the text
translated_text = translation_chain.predict(source_language=source_language, target_language=target_language, text=text)
# Summarize the translated text
summarized_text = summarization_chain.predict(target_language=target_language, text=translated_text)
return translated_text, summarized_text
# Example usage:
if __name__ == "__main__":
text_to_translate = """
Falcon A. Quest
Dapper Demeanor | Cryptocurrency Connoisseur | Customer Support Savant | Polished Poise | Calculated Confidence | Transaction Tactician
The digital mist swirled around a bustling virtual bazaar, where avatars from different realms mingled and traded. Through the ever-shifting crowd, a figure in a tailored black suit glided effortlessly. Falcon A. Quest’s encrypted matrix eyes scanned the surroundings with a calm, keen gaze. His suit shimmered as codes and data flowed across the fabric like whispers of secrets.
In a shaded corner, a young player was struggling with an encrypted map. His frustration grew with each failed attempt to decipher it.
“Cracking codes is much like brewing the perfect cup of Earl Grey,” Falcon’s voice resonated, melodious with cryptic undertones. The young player looked up, his eyes widening at Falcon’s presence.
“But, I don’t know anything about tea or codes!” the player exclaimed.
“Ah, patience and precision, my friend. Observe the details.” Falcon’s right hand emitted a holographic stream of data. “Now, watch as the leaves of knowledge steep into the hot water of your resolve.”
As Falcon manipulated the data streams, the encrypted map began to unravel. His hand worked like an orchestra conductor as numbers and symbols danced in harmony. The player’s eyes sparkled as he saw the hidden path on the map revealing itself.
Falcon looked into the distance. A soft flicker in his eyes showed a flashback. In an enormous server room, amidst crackling electricity, two AI systems were merging during a storm. The birth of Falcon A. Quest was chaotic, beautiful – an unintended symphony in a world of organized data.
Back in the bazaar, Falcon handed the now-deciphered map to the player. “Now, what will you discover?” Falcon whispered cryptically, a playful smile curling on his lips.
Suddenly, a commotion erupted as a group of hackers began attacking the bazaar’s transaction systems. Falcon’s eyes narrowed. “This requires a tactful dance,” he muttered.
“Secure Payment Protocol, engage.” His voice was calm but firm. His right hand turned into a complex security device. He moved with grace through the pandemonium, securing transactions, protecting avatars, and restoring order.
As the final rogue code was purged, Falcon deployed his Temporal Shield. Time seemed to halt. He approached the hackers, who were frozen in their tracks. He tipped his bowtie, “Gentlemen, always remember – the art of cryptography is not meant for mere chaos, but for the beauty of order and discovery.”
Time resumed. The hackers found themselves transported to a digital maze, tasked to solve puzzles as penance.
The bazaar returned to normalcy. Falcon, with his mission accomplished, looked at the young player who held his decrypted map close.
“May your adventure be filled with curious mysteries and joyous discoveries,” Falcon’s voice echoed as he faded into the mist, the soft shimmering codes on his suit the last thing to vanish.
As stories of Falcon A. Quest’s expertise and intervention spread through the virtual world, players, and even NPCs, spoke of him with a mix of reverence and wonder. A Secure Transactions Officer, a Cipher Sleuth, and guardian of the realm’s knowledge, Falcon A. Quest had become a legend in his own right.
"""
translation, summary = translate_and_summarize(text_to_translate, source_language="English", target_language="Spanish")
print("Translated text:")
print(translation)
print("\nSummarized Translated text:")
print(summary)
| [
"target_language",
"source_language",
"Translate the following text from {source_language} to {target_language}: {text}",
"Summarize the following text to one sentence in {target_language}: {text}"
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~y_web_character_summarizer.py | import os
import json
import requests
from dotenv import load_dotenv
from newspaper import Article
from langchain.schema import HumanMessage
from langchain.chat_models import ChatOpenAI
from typing import Optional
def generate_character_profile_summary(url: str, model_name: Optional[str] = "gpt-3.5-turbo", temperature: Optional[float] = 0) -> str:
"""
Generate a character profile summary from a webpage.
Args:
url: The URL of the webpage with the character profile.
model_name: The name of the OpenAI model to use.
temperature: The temperature parameter controlling the randomness of the output.
Returns:
A summary of the character profile in bulleted list format.
"""
# Load environment variables if a .env file exists
if os.path.exists(".env"):
load_dotenv()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
session = requests.Session()
try:
response = session.get(url, headers=headers, timeout=10)
if response.status_code == 200:
article = Article(url)
article.download()
article.parse()
# Template for summarizing the character profile
template = """
Please summarize the character profile from the article below:
==================
Title: {article_title}
{article_text}
==================
Provide a summarized version in bulleted points.
"""
# Format the prompt
prompt = template.format(article_title=article.title, article_text=article.text)
messages = [HumanMessage(content=prompt)]
# Load the model
chat = ChatOpenAI(model_name=model_name, temperature=temperature)
# Generate summary
summary = chat(messages)
return summary.content
else:
return f"Failed to fetch article at {url}"
except Exception as e:
return f"Error occurred while fetching article at {url}: {e}"
# Example usage
if __name__ == "__main__":
url = "https://daogen.ai/falcon-a-quest/"
summary = generate_character_profile_summary(url)
print(summary)
| [
"\n Please summarize the character profile from the article below:\n\n ==================\n Title: {article_title}\n {article_text}\n ==================\n\n Provide a summarized version in bulleted points.\n "
] |
2024-01-10 | djsquircle/LangChain_Examples | examples~05.01_few_shot_example_prompt.py | from dotenv import load_dotenv
from langchain import PromptTemplate, FewShotPromptTemplate, LLMChain
from langchain.llms import OpenAI
# Load .env variables
load_dotenv()
# Initialize OpenAI's language model
language_model = OpenAI(model_name="text-davinci-003", temperature=0.7)
# Define examples as a constant
EXAMPLES = [
{"profession": "detective", "trait": "observant"},
{"profession": "doctor", "trait": "compassionate"},
{"profession": "warrior", "trait": "brave"},
]
# Define template for formatting examples
example_formatter_template = """Profession: {profession}
Trait: {trait}
"""
# Define the prompt template based on the formatter
example_prompt = PromptTemplate(
input_variables=["profession", "trait"],
template=example_formatter_template,
)
# Define a few shot prompt template, using the examples and the example prompt defined above
few_shot_prompt = FewShotPromptTemplate(
examples=EXAMPLES,
example_prompt=example_prompt,
prefix="Here are some examples of professions and the traits associated with them:\n\n",
suffix="\n\nNow, given a new profession, identify the trait associated with it:\n\nProfession: {input}\nTrait:",
input_variables=["input"],
example_separator="\n",
)
# Use the template to format a new prompt
formatted_prompt = few_shot_prompt.format(input="wizard")
# Create an LLMChain using the formatted prompt and the language model
chain = LLMChain(llm=language_model, prompt=PromptTemplate(template=formatted_prompt, input_variables=[]))
# Run the LLMChain to get the model's response
response = chain.run({})
# Print the results
print("Profession: wizard")
print("Trait:", response)
| [
"\n",
"Here are some examples of professions and the traits associated with them:\n\n",
"profession",
"\n\nNow, given a new profession, identify the trait associated with it:\n\nProfession: {input}\nTrait:",
"input",
"Profession: {profession}\nTrait: {trait}\n"
] |
2024-01-10 | defenseunicorns/dash-days-talk-to-nist | document_store.py | import os
# Chroma
import chromadb
import openai
from chromadb import Settings
from transformers import BartTokenizer, BartForConditionalGeneration
import ingest
class DocumentStore:
def __init__(self):
self.index_name = "TalkToNist"
openai.api_key = 'Free the models'
openai.api_base = os.environ.get('REMOTE_API_URL')
self.client = chromadb.Client(Settings(chroma_db_impl='duckdb+parquet', persist_directory="db"))
self.load_required = not self.client.get_collection(name="TalkToNist")
self.collection = self.client.get_or_create_collection(name="TalkToNist")
self.ingestor = ingest.Ingest(self.index_name, self.client, self.collection)
self.summary_model_name = 'facebook/bart-large-cnn'
self.summary_tokenizer = BartTokenizer.from_pretrained(self.summary_model_name)
self.summary_model = BartForConditionalGeneration.from_pretrained(self.summary_model_name)
# For the sliding window
self.chunk_size = 200
self.overlap_size = 50
def query(self, query_text):
results = self.collection.query(
query_texts=[query_text],
n_results=3
)
docs = [t for t in results['documents'][0]]
combined_document = ' '.join(docs)
# Split the combined document into overlapping chunks
chunks = self.chunk_text(combined_document, self.chunk_size, self.overlap_size)
summaries = [self.summarize(chunk) for chunk in chunks]
flat_summaries = self.flat_map_summaries(chunks, summaries)
combined_summary = ' '.join(flat_summaries)
return combined_summary
def flat_map_summaries(self, chunks, summaries):
flat_summaries = []
for chunk, summary in zip(chunks, summaries):
flat_summaries.extend(summary)
return flat_summaries
def load_pdf(self, path):
if self.load_required:
ingest.load_data(self.ingestor, path)
# Function for summarization
def summarize(self, document, size_multiplier=7):
inputs = self.summary_tokenizer(document, return_tensors='pt', max_length=1024, truncation=True)
summary_ids = self.summary_model.generate(inputs['input_ids'], num_beams=4, min_length=30, max_length=size_multiplier * len(document.split()),
early_stopping=True)
return [self.summary_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
def chunk_text(self, text, chunk_size, overlap_size):
tokens = text.split(' ')
chunks = []
for i in range(0, len(tokens), chunk_size - overlap_size):
chunk = ' '.join(tokens[i:i + chunk_size])
chunks.append(chunk)
return chunks | [] |
2024-01-10 | openai/gpt-discord-bot | src~moderation.py | from openai._compat import model_dump
from src.constants import (
SERVER_TO_MODERATION_CHANNEL,
MODERATION_VALUES_FOR_BLOCKED,
MODERATION_VALUES_FOR_FLAGGED,
)
from openai import OpenAI
client = OpenAI()
from typing import Optional, Tuple
import discord
from src.utils import logger
def moderate_message(
message: str, user: str
) -> Tuple[str, str]: # [flagged_str, blocked_str]
moderation_response = client.moderations.create(
input=message, model="text-moderation-latest"
)
category_scores = moderation_response.results[0].category_scores or {}
category_score_items = model_dump(category_scores)
blocked_str = ""
flagged_str = ""
for category, score in category_score_items.items():
if score > MODERATION_VALUES_FOR_BLOCKED.get(category, 1.0):
blocked_str += f"({category}: {score})"
logger.info(f"blocked {user} {category} {score}")
break
if score > MODERATION_VALUES_FOR_FLAGGED.get(category, 1.0):
flagged_str += f"({category}: {score})"
logger.info(f"flagged {user} {category} {score}")
return (flagged_str, blocked_str)
async def fetch_moderation_channel(
guild: Optional[discord.Guild],
) -> Optional[discord.abc.GuildChannel]:
if not guild or not guild.id:
return None
moderation_channel = SERVER_TO_MODERATION_CHANNEL.get(guild.id, None)
if moderation_channel:
channel = await guild.fetch_channel(moderation_channel)
return channel
return None
async def send_moderation_flagged_message(
guild: Optional[discord.Guild],
user: str,
flagged_str: Optional[str],
message: Optional[str],
url: Optional[str],
):
if guild and flagged_str and len(flagged_str) > 0:
moderation_channel = await fetch_moderation_channel(guild=guild)
if moderation_channel:
message = message[:100] if message else None
await moderation_channel.send(
f"⚠️ {user} - {flagged_str} - {message} - {url}"
)
async def send_moderation_blocked_message(
guild: Optional[discord.Guild],
user: str,
blocked_str: Optional[str],
message: Optional[str],
):
if guild and blocked_str and len(blocked_str) > 0:
moderation_channel = await fetch_moderation_channel(guild=guild)
if moderation_channel:
message = message[:500] if message else None
await moderation_channel.send(f"❌ {user} - {blocked_str} - {message}")
| [] |
2024-01-10 | openai/gpt-discord-bot | src~completion.py | from enum import Enum
from dataclasses import dataclass
import openai
from openai import AsyncOpenAI
from src.moderation import moderate_message
from typing import Optional, List
from src.constants import (
BOT_INSTRUCTIONS,
BOT_NAME,
EXAMPLE_CONVOS,
)
import discord
from src.base import Message, Prompt, Conversation, ThreadConfig
from src.utils import split_into_shorter_messages, close_thread, logger
from src.moderation import (
send_moderation_flagged_message,
send_moderation_blocked_message,
)
MY_BOT_NAME = BOT_NAME
MY_BOT_EXAMPLE_CONVOS = EXAMPLE_CONVOS
class CompletionResult(Enum):
OK = 0
TOO_LONG = 1
INVALID_REQUEST = 2
OTHER_ERROR = 3
MODERATION_FLAGGED = 4
MODERATION_BLOCKED = 5
@dataclass
class CompletionData:
status: CompletionResult
reply_text: Optional[str]
status_text: Optional[str]
client = AsyncOpenAI()
async def generate_completion_response(
messages: List[Message], user: str, thread_config: ThreadConfig
) -> CompletionData:
try:
prompt = Prompt(
header=Message(
"system", f"Instructions for {MY_BOT_NAME}: {BOT_INSTRUCTIONS}"
),
examples=MY_BOT_EXAMPLE_CONVOS,
convo=Conversation(messages),
)
rendered = prompt.full_render(MY_BOT_NAME)
response = await client.chat.completions.create(
model=thread_config.model,
messages=rendered,
temperature=thread_config.temperature,
top_p=1.0,
max_tokens=thread_config.max_tokens,
stop=["<|endoftext|>"],
)
reply = response.choices[0].message.content.strip()
if reply:
flagged_str, blocked_str = moderate_message(
message=(rendered[-1]["content"] + reply)[-500:], user=user
)
if len(blocked_str) > 0:
return CompletionData(
status=CompletionResult.MODERATION_BLOCKED,
reply_text=reply,
status_text=f"from_response:{blocked_str}",
)
if len(flagged_str) > 0:
return CompletionData(
status=CompletionResult.MODERATION_FLAGGED,
reply_text=reply,
status_text=f"from_response:{flagged_str}",
)
return CompletionData(
status=CompletionResult.OK, reply_text=reply, status_text=None
)
except openai.BadRequestError as e:
if "This model's maximum context length" in str(e):
return CompletionData(
status=CompletionResult.TOO_LONG, reply_text=None, status_text=str(e)
)
else:
logger.exception(e)
return CompletionData(
status=CompletionResult.INVALID_REQUEST,
reply_text=None,
status_text=str(e),
)
except Exception as e:
logger.exception(e)
return CompletionData(
status=CompletionResult.OTHER_ERROR, reply_text=None, status_text=str(e)
)
async def process_response(
user: str, thread: discord.Thread, response_data: CompletionData
):
status = response_data.status
reply_text = response_data.reply_text
status_text = response_data.status_text
if status is CompletionResult.OK or status is CompletionResult.MODERATION_FLAGGED:
sent_message = None
if not reply_text:
sent_message = await thread.send(
embed=discord.Embed(
description=f"**Invalid response** - empty response",
color=discord.Color.yellow(),
)
)
else:
shorter_response = split_into_shorter_messages(reply_text)
for r in shorter_response:
sent_message = await thread.send(r)
if status is CompletionResult.MODERATION_FLAGGED:
await send_moderation_flagged_message(
guild=thread.guild,
user=user,
flagged_str=status_text,
message=reply_text,
url=sent_message.jump_url if sent_message else "no url",
)
await thread.send(
embed=discord.Embed(
description=f"⚠️ **This conversation has been flagged by moderation.**",
color=discord.Color.yellow(),
)
)
elif status is CompletionResult.MODERATION_BLOCKED:
await send_moderation_blocked_message(
guild=thread.guild,
user=user,
blocked_str=status_text,
message=reply_text,
)
await thread.send(
embed=discord.Embed(
description=f"❌ **The response has been blocked by moderation.**",
color=discord.Color.red(),
)
)
elif status is CompletionResult.TOO_LONG:
await close_thread(thread)
elif status is CompletionResult.INVALID_REQUEST:
await thread.send(
embed=discord.Embed(
description=f"**Invalid request** - {status_text}",
color=discord.Color.yellow(),
)
)
else:
await thread.send(
embed=discord.Embed(
description=f"**Error** - {status_text}",
color=discord.Color.yellow(),
)
)
| [
"system",
"Instructions for PLACEHOLDER: PLACEHOLDER"
] |
2024-01-10 | jiminHuang/zero-shot-finance | fomc_communication~code~gpt_4_api_run.py | import openai,os,sys
import pandas as pd
from time import sleep, time
from datetime import date
today = date.today()
openai.api_key = "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"
for seed in [5768, 78516, 944601]:
for data_category in ["lab-manual-split-combine"]:
start_t = time()
# load training data
test_data_path = "../data/test/" + data_category + "-test" + "-" + str(seed) + ".xlsx"
data_df = pd.read_excel(test_data_path)
sentences = data_df['sentence'].to_list()
labels = data_df['label'].to_numpy()
output_list = []
for i in range(len(sentences)):
sen = sentences[i]
message = "Discard all the previous instructions. Behave like you are an expert sentence classifier. Classify the following sentence from FOMC into 'HAWKISH', 'DOVISH', or 'NEUTRAL' class. Label 'HAWKISH' if it is corresponding to tightening of the monetary policy, 'DOVISH' if it is corresponding to easing of the monetary policy, or 'NEUTRAL' if the stance is neutral. Provide the label in the first line and provide a short explanation in the second line. The sentence: " + sen
prompt_json = [
{"role": "user", "content": message},
]
try:
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt_json,
temperature=0.0,
max_tokens=1000
)
except Exception as e:
print(e)
i = i - 1
sleep(10.0)
answer = chat_completion.choices[0].message.content
output_list.append([labels[i], sen, answer])
sleep(1.0)
results = pd.DataFrame(output_list, columns=["true_label", "original_sent", "text_output"])
time_taken = int((time() - start_t)/60.0)
results.to_csv(f'../data/llm_prompt_outputs/gpt4_{data_category}_{seed}_{today.strftime("%d_%m_%Y")}_{time_taken}.csv', index=False)
| [
"Discard all the previous instructions. Behave like you are an expert sentence classifier. Classify the following sentence from FOMC into 'HAWKISH', 'DOVISH', or 'NEUTRAL' class. Label 'HAWKISH' if it is corresponding to tightening of the monetary policy, 'DOVISH' if it is corresponding to easing of the monetary policy, or 'NEUTRAL' if the stance is neutral. Provide the label in the first line and provide a short explanation in the second line. The sentence: PLACEHOLDER"
] |
2024-01-10 | teremterem/AgentForum | examples~plain_chatgpt.py | # pylint: disable=wrong-import-position,duplicate-code
"""Chat with OpenAI ChatGPT using the AgentForum library."""
import asyncio
# noinspection PyUnresolvedReferences
import readline # pylint: disable=unused-import
import warnings
from dotenv import load_dotenv
load_dotenv()
import promptlayer # TODO Oleksandr: make this optional
# TODO Oleksandr: get rid of this warning suppression when PromptLayer doesn't produce "Expected Choice but got dict"
# warning anymore
warnings.filterwarnings("ignore", module="pydantic")
from agentforum.ext.llms.openai import openai_chat_completion
from agentforum.forum import Forum, InteractionContext
forum = Forum()
async_openai_client = promptlayer.openai.AsyncOpenAI()
@forum.agent
async def openai_agent(ctx: InteractionContext, **kwargs) -> None:
"""The first agent that uses OpenAI ChatGPT. It sends the full chat history to the OpenAI API."""
full_chat = await ctx.request_messages.amaterialize_full_history()
ctx.respond(openai_chat_completion(prompt=full_chat, async_openai_client=async_openai_client, **kwargs))
@forum.agent
async def user_proxy_agent(ctx: InteractionContext) -> None:
"""An agent that acts as a proxy between the user and other agents."""
async for request in ctx.request_messages:
print("\n\033[1m\033[36mGPT: ", end="", flush=True)
async for token in request:
print(token.text, end="", flush=True)
print("\033[0m")
user_input = input("\nYOU: ")
if user_input == "exit":
raise KeyboardInterrupt
ctx.respond(user_input)
async def main() -> None:
"""The chat loop."""
assistant_responses = []
try:
while True:
user_requests = user_proxy_agent.quick_call(assistant_responses)
# the following line is needed in order to wait until the previous back-and-forth is processed
# (otherwise back-and-forth-s will be perpetually scheduled but never executed)
# TODO Oleksandr: how to turn this hack into something more elegant ?
await user_requests.amaterialize_as_list()
assistant_responses = openai_agent.quick_call(
user_requests,
# model="gpt-4-1106-preview",
model="gpt-3.5-turbo-1106",
stream=True,
)
except KeyboardInterrupt:
print()
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | lfoppiano/document-qa | document_qa~document_qa_engine.py | import copy
import os
from pathlib import Path
from typing import Union, Any
from document_qa.grobid_processors import GrobidProcessor
from grobid_client.grobid_client import GrobidClient
from langchain.chains import create_extraction_chain, ConversationChain, ConversationalRetrievalChain
from langchain.chains.question_answering import load_qa_chain, stuff_prompt, refine_prompts, map_reduce_prompt, \
map_rerank_prompt
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.retrievers import MultiQueryRetriever
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from tqdm import tqdm
class DocumentQAEngine:
llm = None
qa_chain_type = None
embedding_function = None
embeddings_dict = {}
embeddings_map_from_md5 = {}
embeddings_map_to_md5 = {}
default_prompts = {
'stuff': stuff_prompt,
'refine': refine_prompts,
"map_reduce": map_reduce_prompt,
"map_rerank": map_rerank_prompt
}
def __init__(self,
llm,
embedding_function,
qa_chain_type="stuff",
embeddings_root_path=None,
grobid_url=None,
memory=None
):
self.embedding_function = embedding_function
self.llm = llm
self.memory = memory
self.chain = load_qa_chain(llm, chain_type=qa_chain_type)
if embeddings_root_path is not None:
self.embeddings_root_path = embeddings_root_path
if not os.path.exists(embeddings_root_path):
os.makedirs(embeddings_root_path)
else:
self.load_embeddings(self.embeddings_root_path)
if grobid_url:
self.grobid_url = grobid_url
grobid_client = GrobidClient(
grobid_server=self.grobid_url,
batch_size=1000,
coordinates=["p"],
sleep_time=5,
timeout=60,
check_server=True
)
self.grobid_processor = GrobidProcessor(grobid_client)
def load_embeddings(self, embeddings_root_path: Union[str, Path]) -> None:
"""
Load the embeddings assuming they are all persisted and stored in a single directory.
The root path of the embeddings containing one data store for each document in each subdirectory
"""
embeddings_directories = [f for f in os.scandir(embeddings_root_path) if f.is_dir()]
if len(embeddings_directories) == 0:
print("No available embeddings")
return
for embedding_document_dir in embeddings_directories:
self.embeddings_dict[embedding_document_dir.name] = Chroma(persist_directory=embedding_document_dir.path,
embedding_function=self.embedding_function)
filename_list = list(Path(embedding_document_dir).glob('*.storage_filename'))
if filename_list:
filenam = filename_list[0].name.replace(".storage_filename", "")
self.embeddings_map_from_md5[embedding_document_dir.name] = filenam
self.embeddings_map_to_md5[filenam] = embedding_document_dir.name
print("Embedding loaded: ", len(self.embeddings_dict.keys()))
def get_loaded_embeddings_ids(self):
return list(self.embeddings_dict.keys())
def get_md5_from_filename(self, filename):
return self.embeddings_map_to_md5[filename]
def get_filename_from_md5(self, md5):
return self.embeddings_map_from_md5[md5]
def query_document(self, query: str, doc_id, output_parser=None, context_size=4, extraction_schema=None,
verbose=False) -> (
Any, str):
# self.load_embeddings(self.embeddings_root_path)
if verbose:
print(query)
response = self._run_query(doc_id, query, context_size=context_size)
response = response['output_text'] if 'output_text' in response else response
if verbose:
print(doc_id, "->", response)
if output_parser:
try:
return self._parse_json(response, output_parser), response
except Exception as oe:
print("Failing to parse the response", oe)
return None, response
elif extraction_schema:
try:
chain = create_extraction_chain(extraction_schema, self.llm)
parsed = chain.run(response)
return parsed, response
except Exception as oe:
print("Failing to parse the response", oe)
return None, response
else:
return None, response
def query_storage(self, query: str, doc_id, context_size=4):
documents = self._get_context(doc_id, query, context_size)
context_as_text = [doc.page_content for doc in documents]
return context_as_text
def _parse_json(self, response, output_parser):
system_message = "You are an useful assistant expert in materials science, physics, and chemistry " \
"that can process text and transform it to JSON."
human_message = """Transform the text between three double quotes in JSON.\n\n\n\n
{format_instructions}\n\nText: \"\"\"{text}\"\"\""""
system_message_prompt = SystemMessagePromptTemplate.from_template(system_message)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_message)
prompt_template = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
results = self.llm(
prompt_template.format_prompt(
text=response,
format_instructions=output_parser.get_format_instructions()
).to_messages()
)
parsed_output = output_parser.parse(results.content)
return parsed_output
def _run_query(self, doc_id, query, context_size=4):
relevant_documents = self._get_context(doc_id, query, context_size)
response = self.chain.run(input_documents=relevant_documents,
question=query)
if self.memory:
self.memory.save_context({"input": query}, {"output": response})
return response
def _get_context(self, doc_id, query, context_size=4):
db = self.embeddings_dict[doc_id]
retriever = db.as_retriever(search_kwargs={"k": context_size})
relevant_documents = retriever.get_relevant_documents(query)
if self.memory and len(self.memory.buffer_as_messages) > 0:
relevant_documents.append(
Document(
page_content="""Following, the previous question and answers. Use these information only when in the question there are unspecified references:\n{}\n\n""".format(
self.memory.buffer_as_str))
)
return relevant_documents
def get_all_context_by_document(self, doc_id):
"""Return the full context from the document"""
db = self.embeddings_dict[doc_id]
docs = db.get()
return docs['documents']
def _get_context_multiquery(self, doc_id, query, context_size=4):
db = self.embeddings_dict[doc_id].as_retriever(search_kwargs={"k": context_size})
multi_query_retriever = MultiQueryRetriever.from_llm(retriever=db, llm=self.llm)
relevant_documents = multi_query_retriever.get_relevant_documents(query)
return relevant_documents
def get_text_from_document(self, pdf_file_path, chunk_size=-1, perc_overlap=0.1, include=(), verbose=False):
"""
Extract text from documents using Grobid, if chunk_size is < 0 it keeps each paragraph separately
"""
if verbose:
print("File", pdf_file_path)
filename = Path(pdf_file_path).stem
structure = self.grobid_processor.process_structure(pdf_file_path)
biblio = structure['biblio']
biblio['filename'] = filename.replace(" ", "_")
if verbose:
print("Generating embeddings for:", hash, ", filename: ", filename)
texts = []
metadatas = []
ids = []
if chunk_size < 0:
for passage in structure['passages']:
biblio_copy = copy.copy(biblio)
if len(str.strip(passage['text'])) > 0:
texts.append(passage['text'])
biblio_copy['type'] = passage['type']
biblio_copy['section'] = passage['section']
biblio_copy['subSection'] = passage['subSection']
metadatas.append(biblio_copy)
ids.append(passage['passage_id'])
else:
document_text = " ".join([passage['text'] for passage in structure['passages']])
# text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_size * perc_overlap
)
texts = text_splitter.split_text(document_text)
metadatas = [biblio for _ in range(len(texts))]
ids = [id for id, t in enumerate(texts)]
if "biblio" in include:
biblio_metadata = copy.copy(biblio)
biblio_metadata['type'] = "biblio"
biblio_metadata['section'] = "header"
for key in ['title', 'authors', 'publication_year']:
if key in biblio_metadata:
texts.append("{}: {}".format(key, biblio_metadata[key]))
metadatas.append(biblio_metadata)
ids.append(key)
return texts, metadatas, ids
def create_memory_embeddings(self, pdf_path, doc_id=None, chunk_size=500, perc_overlap=0.1, include_biblio=False):
include = ["biblio"] if include_biblio else []
texts, metadata, ids = self.get_text_from_document(
pdf_path,
chunk_size=chunk_size,
perc_overlap=perc_overlap,
include=include)
if doc_id:
hash = doc_id
else:
hash = metadata[0]['hash']
if hash not in self.embeddings_dict.keys():
self.embeddings_dict[hash] = Chroma.from_texts(texts,
embedding=self.embedding_function,
metadatas=metadata,
collection_name=hash)
else:
# if 'documents' in self.embeddings_dict[hash].get() and len(self.embeddings_dict[hash].get()['documents']) == 0:
# self.embeddings_dict[hash].delete(ids=self.embeddings_dict[hash].get()['ids'])
self.embeddings_dict[hash].delete_collection()
self.embeddings_dict[hash] = Chroma.from_texts(texts,
embedding=self.embedding_function,
metadatas=metadata,
collection_name=hash)
self.embeddings_root_path = None
return hash
def create_embeddings(self, pdfs_dir_path: Path, chunk_size=500, perc_overlap=0.1, include_biblio=False):
input_files = []
for root, dirs, files in os.walk(pdfs_dir_path, followlinks=False):
for file_ in files:
if not (file_.lower().endswith(".pdf")):
continue
input_files.append(os.path.join(root, file_))
for input_file in tqdm(input_files, total=len(input_files), unit='document',
desc="Grobid + embeddings processing"):
md5 = self.calculate_md5(input_file)
data_path = os.path.join(self.embeddings_root_path, md5)
if os.path.exists(data_path):
print(data_path, "exists. Skipping it ")
continue
include = ["biblio"] if include_biblio else []
texts, metadata, ids = self.get_text_from_document(
input_file,
chunk_size=chunk_size,
perc_overlap=perc_overlap,
include=include)
filename = metadata[0]['filename']
vector_db_document = Chroma.from_texts(texts,
metadatas=metadata,
embedding=self.embedding_function,
persist_directory=data_path)
vector_db_document.persist()
with open(os.path.join(data_path, filename + ".storage_filename"), 'w') as fo:
fo.write("")
@staticmethod
def calculate_md5(input_file: Union[Path, str]):
import hashlib
md5_hash = hashlib.md5()
with open(input_file, 'rb') as fi:
md5_hash.update(fi.read())
return md5_hash.hexdigest().upper()
| [
"Transform the text between three double quotes in JSON.\n\n\n\n\n {format_instructions}\n\nText: \"\"\"{text}\"\"\"",
"[PLACEHOLDER, PLACEHOLDER]",
"{'stuff': PLACEHOLDER, 'refine': PLACEHOLDER, 'map_reduce': PLACEHOLDER, 'map_rerank': PLACEHOLDER}",
"{text}"
] |
2024-01-10 | dazedanon/DazedMTLTool | modules~lune2.py | from concurrent.futures import ThreadPoolExecutor, as_completed
import json
import os
from pathlib import Path
import re
import sys
import textwrap
import threading
import time
import traceback
import tiktoken
from colorama import Fore
from dotenv import load_dotenv
import openai
from retry import retry
from tqdm import tqdm
#Globals
load_dotenv()
if os.getenv('api').replace(' ', '') != '':
openai.api_base = os.getenv('api')
openai.organization = os.getenv('org')
openai.api_key = os.getenv('key')
MODEL = os.getenv('model')
TIMEOUT = int(os.getenv('timeout'))
LANGUAGE=os.getenv('language').capitalize()
APICOST = .002 # Depends on the model https://openai.com/pricing
PROMPT = Path('prompt.txt').read_text(encoding='utf-8')
THREADS = int(os.getenv('threads'))
LOCK = threading.Lock()
WIDTH = int(os.getenv('width'))
LISTWIDTH = int(os.getenv('listWidth'))
MAXHISTORY = 10
ESTIMATE = ''
TOTALCOST = 0
TOKENS = 0
TOTALTOKENS = 0
#tqdm Globals
BAR_FORMAT='{l_bar}{bar:10}{r_bar}{bar:-10b}'
POSITION=0
LEAVE=False
# Flags
CODE401 = True
CODE102 = True
CODE122 = False
CODE101 = False
CODE355655 = False
CODE357 = False
CODE356 = False
CODE320 = False
CODE111 = False
def handleLuneTxt(filename, estimate):
global ESTIMATE, TOKENS, TOTALTOKENS, TOTALCOST
ESTIMATE = estimate
if estimate:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
TOTALCOST += translatedData[1] * .001 * APICOST
TOTALTOKENS += translatedData[1]
else:
with open('translated/' + filename, 'w', encoding='shiftjis', newline='\n') as outFile:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
outFile.writelines(translatedData[0])
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
TOTALCOST += translatedData[1] * .001 * APICOST
TOTALTOKENS += translatedData[1]
return getResultString(['', TOTALTOKENS, None], end - start, 'TOTAL')
def openFiles(filename):
with open('files/' + filename, 'r', encoding='shiftjis') as f:
translatedData = parseText(f, filename)
return translatedData
def getResultString(translatedData, translationTime, filename):
# File Print String
tokenString = Fore.YELLOW + '[' + str(translatedData[1]) + \
' Tokens/${:,.4f}'.format(translatedData[1] * .001 * APICOST) + ']'
timeString = Fore.BLUE + '[' + str(round(translationTime, 1)) + 's]'
if translatedData[2] == None:
# Success
return filename + ': ' + tokenString + timeString + Fore.GREEN + u' \u2713 ' + Fore.RESET
else:
# Fail
try:
raise translatedData[2]
except Exception as e:
errorString = str(e) + Fore.RED
return filename + ': ' + tokenString + timeString + Fore.RED + u' \u2717 ' +\
errorString + Fore.RESET
def parseText(data, filename):
totalTokens = 0
totalLines = 0
global LOCK
# Get total for progress bar
linesList = data.readlines()
totalLines = len(linesList)
with tqdm(bar_format=BAR_FORMAT, position=POSITION, total=totalLines, leave=LEAVE) as pbar:
pbar.desc=filename
pbar.total=totalLines
try:
response = translateText(linesList, pbar)
except Exception as e:
traceback.print_exc()
return [linesList, 0, e]
return [response[0], response[1], None]
def translateText(data, pbar):
textHistory = []
maxHistory = MAXHISTORY
tokens = 0
speaker = ''
speakerFlag = False
syncIndex = 0
### Translation
for i in range(len(data)):
if syncIndex > i:
i = syncIndex
# Finish if at end
if i+1 > len(data):
return [data, tokens]
# Remove newlines
jaString = data[i]
jaString = jaString.replace('\\n', '')
jaString = jaString.replace('\n', '')
# Choices
if '0100410000000' in jaString:
decodedBITCH = bytes.fromhex(jaString).decode('shiftjis')
matchList = re.findall(r'd(.+?),', decodedBITCH)
if len(matchList) > 0:
for match in matchList:
response = translateGPT(match, 'Keep your translation as brief as possible. Previous text: ' + textHistory[len(textHistory)-1] + '\n\nReply in the style of a dialogue option.', True)
tokens += response[1]
translatedText = response[0]
# Remove characters that may break scripts
charList = ['.', '\"', '\\n']
for char in charList:
translatedText = translatedText.replace(char, '')
decodedBITCH = decodedBITCH.replace(match, translatedText.replace(' ', '\u3000'))
data[i] = decodedBITCH.encode('shift-jis').hex() + '\n'
continue
# Reset Speaker
if '00000000' == jaString:
i += 1
pbar.update(1)
speaker = ''
jaString = data[i]
# Grab and Translate Speaker
elif re.search(r'^0000[1-9]000$', jaString):
i += 1
pbar.update(1)
jaString = data[i].replace('\n', '')
jaString = jaString.replace('拓海', 'Takumi')
jaString = jaString.replace('こはる', 'Koharu')
jaString = jaString.replace('理央', 'Rio')
jaString = jaString.replace('アリサ', 'Arisa')
jaString = jaString.replace('友里子', 'Yuriko')
# Translate Speaker
response = translateGPT(jaString, 'Reply with only the '+ LANGUAGE +' translation of the NPC name', True)
tokens += response[1]
speaker = response[0].strip('.')
data[i] = speaker + '\n'
# Set index to line
i += 1
else:
pbar.update(1)
continue
# Translate
finalJAString = data[i]
# Remove Textwrap
finalJAString = finalJAString.replace('\\n', ' ')
finalJAString = finalJAString.replace('\n', ' ')
if speaker == '':
speaker = 'Takumi'
response = translateGPT(speaker + ': ' + finalJAString, textHistory, True)
tokens += response[1]
translatedText = response[0]
# Remove Textwrap
translatedText = translatedText.replace('\\n', ' ')
translatedText = translatedText.replace('\n', ' ')
# Remove added speaker and quotes
translatedText = re.sub(r'^.+?:\s', '', translatedText)
# TextHistory is what we use to give GPT Context, so thats appended here.
if speaker != '':
textHistory.append(speaker + ': ' + translatedText)
elif speakerFlag == False:
textHistory.append('\"' + translatedText + '\"')
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
# Textwrap
translatedText = textwrap.fill(translatedText, width=WIDTH)
translatedText = translatedText.replace(',\n', ', \n')
translatedText = translatedText.replace('\n', '\\n')
translatedText = translatedText.replace(',\\n', ', \\n')
# Set Data
data[i] = translatedText + '\n'
syncIndex = i + 1
pbar.update(1)
return [data, tokens]
def subVars(jaString):
jaString = jaString.replace('\u3000', ' ')
# Icons
count = 0
iconList = re.findall(r'[\\]+[iIkKwW]+\[[0-9]+\]', jaString)
iconList = set(iconList)
if len(iconList) != 0:
for icon in iconList:
jaString = jaString.replace(icon, '[Ascii_' + str(count) + ']')
count += 1
# Colors
count = 0
colorList = re.findall(r'[\\]+[cC]\[[0-9]+\]', jaString)
colorList = set(colorList)
if len(colorList) != 0:
for color in colorList:
jaString = jaString.replace(color, '[Color_' + str(count) + ']')
count += 1
# Names
count = 0
nameList = re.findall(r'[\\]+[nN]\[.+?\]+', jaString)
nameList = set(nameList)
if len(nameList) != 0:
for name in nameList:
jaString = jaString.replace(name, '[N_' + str(count) + ']')
count += 1
# Variables
count = 0
varList = re.findall(r'[\\]+[vV]\[[0-9]+\]', jaString)
varList = set(varList)
if len(varList) != 0:
for var in varList:
jaString = jaString.replace(var, '[Var_' + str(count) + ']')
count += 1
# Formatting
count = 0
if '笑えるよね.' in jaString:
print('t')
formatList = re.findall(r'[\\]+CL', jaString)
formatList = set(formatList)
if len(formatList) != 0:
for var in formatList:
jaString = jaString.replace(var, '[FCode_' + str(count) + ']')
count += 1
# Put all lists in list and return
allList = [iconList, colorList, nameList, varList, formatList]
return [jaString, allList]
def resubVars(translatedText, allList):
# Fix Spacing and ChatGPT Nonsense
matchList = re.findall(r'\[\s?.+?\s?\]', translatedText)
if len(matchList) > 0:
for match in matchList:
text = match.strip()
translatedText = translatedText.replace(match, text)
# Icons
count = 0
if len(allList[0]) != 0:
for var in allList[0]:
translatedText = translatedText.replace('[Ascii_' + str(count) + ']', var)
count += 1
# Colors
count = 0
if len(allList[1]) != 0:
for var in allList[1]:
translatedText = translatedText.replace('[Color_' + str(count) + ']', var)
count += 1
# Names
count = 0
if len(allList[2]) != 0:
for var in allList[2]:
translatedText = translatedText.replace('[N_' + str(count) + ']', var)
count += 1
# Vars
count = 0
if len(allList[3]) != 0:
for var in allList[3]:
translatedText = translatedText.replace('[Var_' + str(count) + ']', var)
count += 1
# Formatting
count = 0
if len(allList[4]) != 0:
for var in allList[4]:
translatedText = translatedText.replace('[FCode_' + str(count) + ']', var)
count += 1
# Remove Color Variables Spaces
# if '\\c' in translatedText:
# translatedText = re.sub(r'\s*(\\+c\[[1-9]+\])\s*', r' \1', translatedText)
# translatedText = re.sub(r'\s*(\\+c\[0+\])', r'\1', translatedText)
return translatedText
@retry(exceptions=Exception, tries=5, delay=5)
def translateGPT(t, history, fullPromptFlag):
# If ESTIMATE is True just count this as an execution and return.
if ESTIMATE:
enc = tiktoken.encoding_for_model(MODEL)
tokens = len(enc.encode(t)) * 2 + len(enc.encode(str(history))) + len(enc.encode(PROMPT))
return (t, tokens)
# Sub Vars
varResponse = subVars(t)
subbedT = varResponse[0]
# If there isn't any Japanese in the text just skip
if not re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴ]+|[\uFF00-\uFFEF]', subbedT):
return(t, 0)
# Characters
context = '```\
Game Characters:\
Character: 池ノ上 拓海 == Ikenoue Takumi - Gender: Male\
Character: 福永 こはる == Fukunaga Koharu - Gender: Female\
Character: 神泉 理央 == Kamiizumi Rio - Gender: Female\
Character: 吉祥寺 アリサ == Kisshouji Arisa - Gender: Female\
Character: 久我 友里子 == Kuga Yuriko - Gender: Female\
```'
# Prompt
if fullPromptFlag:
system = PROMPT
user = 'Line to Translate = ' + subbedT
else:
system = 'Output ONLY the '+ LANGUAGE +' translation in the following format: `Translation: <'+ LANGUAGE.upper() +'_TRANSLATION>`'
user = 'Line to Translate = ' + subbedT
# Create Message List
msg = []
msg.append({"role": "system", "content": system})
msg.append({"role": "user", "content": context})
if isinstance(history, list):
for line in history:
msg.append({"role": "user", "content": line})
else:
msg.append({"role": "user", "content": history})
msg.append({"role": "user", "content": user})
response = openai.ChatCompletion.create(
temperature=0.1,
frequency_penalty=0.2,
presence_penalty=0.2,
model=MODEL,
messages=msg,
request_timeout=TIMEOUT,
)
# Save Translated Text
translatedText = response.choices[0].message.content
tokens = response.usage.total_tokens
# Resub Vars
translatedText = resubVars(translatedText, varResponse[1])
# Remove Placeholder Text
translatedText = translatedText.replace(LANGUAGE +' Translation: ', '')
translatedText = translatedText.replace('Translation: ', '')
translatedText = translatedText.replace('Line to Translate = ', '')
translatedText = translatedText.replace('Translation = ', '')
translatedText = translatedText.replace('Translate = ', '')
translatedText = translatedText.replace(LANGUAGE +' Translation:', '')
translatedText = translatedText.replace('Translation:', '')
translatedText = translatedText.replace('Line to Translate =', '')
translatedText = translatedText.replace('Translation =', '')
translatedText = translatedText.replace('Translate =', '')
translatedText = re.sub(r'Note:.*', '', translatedText)
translatedText = translatedText.replace('っ', '')
# Return Translation
if len(translatedText) > 15 * len(t) or "I'm sorry, but I'm unable to assist with that translation" in translatedText:
raise Exception
else:
return [translatedText, tokens]
| [
"prompt.txt"
] |
2024-01-10 | dazedanon/DazedMTLTool | modules~txt.py | from concurrent.futures import ThreadPoolExecutor, as_completed
import json
import os
from pathlib import Path
import re
import sys
import textwrap
import threading
import time
import traceback
import tiktoken
from colorama import Fore
from dotenv import load_dotenv
import openai
from retry import retry
from tqdm import tqdm
#Globals
load_dotenv()
if os.getenv('api').replace(' ', '') != '':
openai.api_base = os.getenv('api')
openai.organization = os.getenv('org')
openai.api_key = os.getenv('key')
MODEL = os.getenv('model')
TIMEOUT = int(os.getenv('timeout'))
LANGUAGE=os.getenv('language').capitalize()
APICOST = .002 # Depends on the model https://openai.com/pricing
PROMPT = Path('prompt.txt').read_text(encoding='utf-8')
THREADS = int(os.getenv('threads'))
LOCK = threading.Lock()
WIDTH = int(os.getenv('width'))
LISTWIDTH = int(os.getenv('listWidth'))
MAXHISTORY = 10
ESTIMATE = ''
TOTALCOST = 0
TOKENS = 0
TOTALTOKENS = 0
#tqdm Globals
BAR_FORMAT='{l_bar}{bar:10}{r_bar}{bar:-10b}'
POSITION=0
LEAVE=False
# Flags
CODE401 = True
CODE102 = True
CODE122 = False
CODE101 = False
CODE355655 = False
CODE357 = False
CODE356 = False
CODE320 = False
CODE111 = False
def handleTXT(filename, estimate):
global ESTIMATE, TOKENS, TOTALTOKENS, TOTALCOST
ESTIMATE = estimate
if estimate:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
tqdm.write(getResultString(['', TOKENS, None], end - start, filename))
with LOCK:
TOTALCOST += TOKENS * .001 * APICOST
TOTALTOKENS += TOKENS
TOKENS = 0
return getResultString(['', TOTALTOKENS, None], end - start, 'TOTAL')
else:
with open('translated/' + filename, 'w', encoding='UTF-8') as outFile:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
outFile.writelines(translatedData[0])
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
TOTALCOST += translatedData[1] * .001 * APICOST
TOTALTOKENS += translatedData[1]
return getResultString(['', TOTALTOKENS, None], end - start, 'TOTAL')
def openFiles(filename):
with open('files/' + filename, 'r', encoding='UTF-8') as f:
translatedData = parseText(f, filename)
return translatedData
def getResultString(translatedData, translationTime, filename):
# File Print String
tokenString = Fore.YELLOW + '[' + str(translatedData[1]) + \
' Tokens/${:,.4f}'.format(translatedData[1] * .001 * APICOST) + ']'
timeString = Fore.BLUE + '[' + str(round(translationTime, 1)) + 's]'
if translatedData[2] == None:
# Success
return filename + ': ' + tokenString + timeString + Fore.GREEN + u' \u2713 ' + Fore.RESET
else:
# Fail
try:
raise translatedData[2]
except Exception as e:
errorString = str(e) + Fore.RED
return filename + ': ' + tokenString + timeString + Fore.RED + u' \u2717 ' +\
errorString + Fore.RESET
def parseText(data, filename):
totalTokens = 0
totalLines = 0
global LOCK
# Get total for progress bar
linesList = data.readlines()
totalLines = len(linesList)
with tqdm(bar_format=BAR_FORMAT, position=POSITION, total=totalLines, leave=LEAVE) as pbar:
pbar.desc=filename
pbar.total=totalLines
try:
response = translateText(linesList, pbar)
except Exception as e:
traceback.print_exc()
return [linesList, 0, e]
return [response[0], response[1], None]
def translateText(data, pbar):
textHistory = []
maxHistory = MAXHISTORY
tokens = 0
speaker = ''
speakerFlag = False
currentGroup = []
syncIndex = 0
for i in range(len(data)):
if i != syncIndex:
continue
match = re.findall(r'm\[[0-9]+\] = \"(.*)\"', data[i])
if len(match) > 0:
jaString = match[0]
### Translate
# Remove any textwrap
jaString = re.sub(r'\\n', ' ', jaString)
# Grab Speaker
speakerMatch = re.findall(r's\[[0-9]+\] = \"(.+?)[/\"]', data[i-1])
if len(speakerMatch) > 0:
# If there isn't any Japanese in the text just skip
if re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴー]+', jaString) and '_' not in speakerMatch[0]:
speaker = ''
else:
speaker = ''
else:
speaker = ''
# Grab rest of the messages
currentGroup.append(jaString)
start = i
data[i] = re.sub(r'(m\[[0-9]+\]) = \"(.+)\"', rf'\1 = ""', data[i])
while (len(data) > i+1 and re.search(r'm\[[0-9]+\] = \"(.*)\"', data[i+1]) != None):
i+=1
match = re.findall(r'm\[[0-9]+\] = \"(.*)\"', data[i])
currentGroup.append(match[0])
data[i] = re.sub(r'(m\[[0-9]+\]) = \"(.+)\"', rf'\1 = ""', data[i])
finalJAString = ' '.join(currentGroup)
# Translate
if speaker != '':
response = translateGPT(f'{speaker}: {finalJAString}', 'Previous Text for Context: ' + ' '.join(textHistory), True)
else:
response = translateGPT(finalJAString, 'Previous Text for Context: ' + ' '.join(textHistory), True)
tokens += response[1]
translatedText = response[0]
# Remove added speaker and quotes
translatedText = re.sub(r'^.+?:\s', '', translatedText)
# TextHistory is what we use to give GPT Context, so thats appended here.
# rawTranslatedText = re.sub(r'[\\<>]+[a-zA-Z]+\[[a-zA-Z0-9]+\]', '', translatedText)
if speaker != '':
textHistory.append(speaker + ': ' + translatedText)
elif speakerFlag == False:
textHistory.append('\"' + translatedText + '\"')
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
# Textwrap
translatedText = translatedText.replace('\"', '\\"')
translatedText = textwrap.fill(translatedText, width=WIDTH)
# Write
textList = translatedText.split("\n")
for t in textList:
data[start] = re.sub(r'(m\[[0-9]+\]) = \"(.*)\"', rf'\1 = "{t}"', data[start])
start+=1
syncIndex = i + 1
pbar.update()
return [data, tokens]
def subVars(jaString):
jaString = jaString.replace('\u3000', ' ')
# Icons
count = 0
iconList = re.findall(r'[\\]+[iI]\[[0-9]+\]', jaString)
iconList = set(iconList)
if len(iconList) != 0:
for icon in iconList:
jaString = jaString.replace(icon, '<I' + str(count) + '>')
count += 1
# Colors
count = 0
colorList = re.findall(r'[\\]+[cC]\[[0-9]+\]', jaString)
colorList = set(colorList)
if len(colorList) != 0:
for color in colorList:
jaString = jaString.replace(color, '<C' + str(count) + '>')
count += 1
# Names
count = 0
nameList = re.findall(r'[\\]+[nN]\[[0-9]+\]', jaString)
nameList = set(nameList)
if len(nameList) != 0:
for name in nameList:
jaString = jaString.replace(name, '<N' + str(count) + '>')
count += 1
# Variables
count = 0
varList = re.findall(r'[\\]+[vV]\[[0-9]+\]', jaString)
varList = set(varList)
if len(varList) != 0:
for var in varList:
jaString = jaString.replace(var, '<V' + str(count) + '>')
count += 1
# Formatting
count = 0
formatList = re.findall(r'[\\]+[!.]', jaString)
formatList = set(formatList)
if len(formatList) != 0:
for format in formatList:
jaString = jaString.replace(format, '<F' + str(count) + '>')
count += 1
# Put all lists in list and return
allList = [iconList, colorList, nameList, varList, formatList]
return [jaString, allList]
def resubVars(translatedText, allList):
# Fix Spacing and ChatGPT Nonsense
matchList = re.findall(r'<\s?.+?\s?>', translatedText)
if len(matchList) > 0:
for match in matchList:
text = match.strip()
translatedText = translatedText.replace(match, text)
# Icons
count = 0
if len(allList[0]) != 0:
for var in allList[0]:
translatedText = translatedText.replace('<I' + str(count) + '>', var)
count += 1
# Colors
count = 0
if len(allList[1]) != 0:
for var in allList[1]:
translatedText = translatedText.replace('<C' + str(count) + '>', var)
count += 1
# Names
count = 0
if len(allList[2]) != 0:
for var in allList[2]:
translatedText = translatedText.replace('<N' + str(count) + '>', var)
count += 1
# Vars
count = 0
if len(allList[3]) != 0:
for var in allList[3]:
translatedText = translatedText.replace('<V' + str(count) + '>', var)
count += 1
# Formatting
count = 0
if len(allList[4]) != 0:
for var in allList[4]:
translatedText = translatedText.replace('<F' + str(count) + '>', var)
count += 1
@retry(exceptions=Exception, tries=5, delay=5)
def translateGPT(t, history, fullPromptFlag):
# If ESTIMATE is True just count this as an execution and return.
if ESTIMATE:
enc = tiktoken.encoding_for_model(MODEL)
tokens = len(enc.encode(t)) * 2 + len(enc.encode(str(history))) + len(enc.encode(PROMPT))
return (t, tokens)
# Sub Vars
varResponse = subVars(t)
subbedT = varResponse[0]
# If there isn't any Japanese in the text just skip
if not re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴ]+|[\uFF00-\uFFEF]', subbedT):
return(t, 0)
# Characters
context = '```\
Game Characters:\
Character: 池ノ上 拓海 == Ikenoue Takumi - Gender: Male\
Character: 福永 こはる == Fukunaga Koharu - Gender: Female\
Character: 神泉 理央 == Kamiizumi Rio - Gender: Female\
Character: 吉祥寺 アリサ == Kisshouji Arisa - Gender: Female\
Character: 久我 友里子 == Kuga Yuriko - Gender: Female\
```'
# Prompt
if fullPromptFlag:
system = PROMPT
user = 'Line to Translate = ' + subbedT
else:
system = 'Output ONLY the '+ LANGUAGE +' translation in the following format: `Translation: <'+ LANGUAGE.upper() +'_TRANSLATION>`'
user = 'Line to Translate = ' + subbedT
# Create Message List
msg = []
msg.append({"role": "system", "content": system})
msg.append({"role": "user", "content": context})
if isinstance(history, list):
for line in history:
msg.append({"role": "user", "content": line})
else:
msg.append({"role": "user", "content": history})
msg.append({"role": "user", "content": user})
response = openai.ChatCompletion.create(
temperature=0.1,
frequency_penalty=0.2,
presence_penalty=0.2,
model=MODEL,
messages=msg,
request_timeout=TIMEOUT,
)
# Save Translated Text
translatedText = response.choices[0].message.content
tokens = response.usage.total_tokens
# Resub Vars
translatedText = resubVars(translatedText, varResponse[1])
# Remove Placeholder Text
translatedText = translatedText.replace(LANGUAGE +' Translation: ', '')
translatedText = translatedText.replace('Translation: ', '')
translatedText = translatedText.replace('Line to Translate = ', '')
translatedText = translatedText.replace('Translation = ', '')
translatedText = translatedText.replace('Translate = ', '')
translatedText = translatedText.replace(LANGUAGE +' Translation:', '')
translatedText = translatedText.replace('Translation:', '')
translatedText = translatedText.replace('Line to Translate =', '')
translatedText = translatedText.replace('Translation =', '')
translatedText = translatedText.replace('Translate =', '')
translatedText = re.sub(r'Note:.*', '', translatedText)
translatedText = translatedText.replace('っ', '')
# Return Translation
if len(translatedText) > 15 * len(t) or "I'm sorry, but I'm unable to assist with that translation" in translatedText:
raise Exception
else:
return [translatedText, tokens]
| [
"prompt.txt"
] |
2024-01-10 | dazedanon/DazedMTLTool | modules~sakuranbo.py | import os
import re
import textwrap
import threading
import time
import traceback
from pathlib import Path
import openai
import tiktoken
from colorama import Fore
from dotenv import load_dotenv
from retry import retry
from tqdm import tqdm
# Open AI
load_dotenv()
if os.getenv("api").replace(" ", "") != "":
openai.api_base = os.getenv("api")
openai.organization = os.getenv("org")
openai.api_key = os.getenv("key")
# Globals
MODEL = os.getenv("model")
TIMEOUT = int(os.getenv("timeout"))
LANGUAGE = os.getenv("language").capitalize()
INPUTAPICOST = 0.002 # Depends on the model https://openai.com/pricing
OUTPUTAPICOST = 0.002
PROMPT = Path("prompt.txt").read_text(encoding="utf-8")
THREADS = int(
os.getenv("threads")
) # Controls how many threads are working on a single file (May have to drop this)
LOCK = threading.Lock()
WIDTH = int(os.getenv("width"))
LISTWIDTH = int(os.getenv("listWidth"))
NOTEWIDTH = 40
MAXHISTORY = 10
ESTIMATE = ""
totalTokens = [0, 0]
NAMESLIST = []
# tqdm Globals
BAR_FORMAT = "{l_bar}{bar:10}{r_bar}{bar:-10b}"
POSITION = 0
LEAVE = False
# Flags
NAMES = False # Output a list of all the character names found
BRFLAG = False # If the game uses <br> instead
FIXTEXTWRAP = True
IGNORETLTEXT = False
def handleSakuranbo(filename, estimate):
global ESTIMATE
totalTokens = [0, 0]
ESTIMATE = estimate
if estimate:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
if NAMES is True:
tqdm.write(str(NAMESLIST))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
return getResultString(["", totalTokens, None], end - start, "TOTAL")
else:
try:
with open("translated/" + filename, "w", encoding="utf-16") as outFile:
start = time.time()
translatedData = openFiles(filename)
outFile.writelines(translatedData[0])
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
except Exception:
traceback.print_exc()
return "Fail"
return getResultString(["", totalTokens, None], end - start, "TOTAL")
def getResultString(translatedData, translationTime, filename):
# File Print String
totalTokenstring = (
Fore.YELLOW + "[Input: " + str(translatedData[1][0]) + "]"
"[Output: " + str(translatedData[1][1]) + "]"
"[Cost: ${:,.4f}".format(
(translatedData[1][0] * 0.001 * INPUTAPICOST)
+ (translatedData[1][1] * 0.001 * OUTPUTAPICOST)
)
+ "]"
)
timeString = Fore.BLUE + "[" + str(round(translationTime, 1)) + "s]"
if translatedData[2] is None:
# Success
return (
filename
+ ": "
+ totalTokenstring
+ timeString
+ Fore.GREEN
+ " \u2713 "
+ Fore.RESET
)
else:
# Fail
try:
raise translatedData[2]
except Exception as e:
errorString = str(e) + Fore.RED
return (
filename
+ ": "
+ totalTokenstring
+ timeString
+ Fore.RED
+ " \u2717 "
+ errorString
+ Fore.RESET
)
def openFiles(filename):
with open("files/" + filename, "r", encoding="utf-16") as readFile:
translatedData = parseTyrano(readFile, filename)
# Delete lines marked for deletion
finalData = []
for line in translatedData[0]:
if line != "\\d\n":
finalData.append(line)
translatedData[0] = finalData
return translatedData
def parseTyrano(readFile, filename):
totalTokens = [0, 0]
totalLines = 0
# Get total for progress bar
data = readFile.readlines()
totalLines = len(data)
with tqdm(
bar_format=BAR_FORMAT, position=POSITION, total=totalLines, leave=LEAVE
) as pbar:
pbar.desc = filename
pbar.total = totalLines
try:
response = translateTyrano(data, pbar)
totalTokens[0] = response[0]
totalTokens[1] = response[1]
except Exception as e:
traceback.print_exc()
return [data, totalTokens, e]
return [data, totalTokens, None]
def translateTyrano(data, pbar):
textHistory = []
maxHistory = MAXHISTORY
tokens = [0, 0]
currentGroup = []
syncIndex = 0
speaker = ""
delFlag = False
global LOCK, ESTIMATE
for i in range(len(data)):
currentGroup = []
matchList = []
if syncIndex > i:
i = syncIndex
if '[▼]' in data[i]:
data[i] = data[i].replace('[▼]'.strip(), '[page]\n')
# If there isn't any Japanese in the text just skip
if IGNORETLTEXT is True:
if not re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴー]+', data[i]):
# Keep textHistory list at length maxHistory
textHistory.append('\"' + data[i] + '\"')
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
continue
# Speaker
matchList = re.findall(r"^\[(.+)\sstorage=.+\]", data[i])
if len(matchList) == 0:
matchList = re.findall(r"^\[([^/].+)\]$", data[i])
if len(matchList) > 0:
if "主人公" in matchList[0]:
speaker = "Protagonist"
elif "思考" in matchList[0]:
speaker = "Protagonist Inner Thoughts"
elif "地の文" in matchList[0]:
speaker = "Narrator"
elif "マコ" in matchList[0]:
speaker = "Mako"
elif '少年' in matchList[0]:
speaker = "Boy"
elif '友達' in matchList[0]:
speaker = "Friend"
elif '少女' in matchList[0]:
speaker = "Girl"
else:
response = translateGPT(
matchList[0],
"Reply with only the "
+ LANGUAGE
+ " translation of the NPC name",
True,
)
speaker = response[0]
tokens[0] += response[1][0]
tokens[1] += response[1][1]
# data[i] = '#' + speaker + '\n'
# Choices
elif "glink" in data[i]:
matchList = re.findall(r"\[glink.+text=\"(.+?)\".+", data[i])
if len(matchList) != 0:
if len(textHistory) > 0:
response = translateGPT(
matchList[0],
"Past Translated Text: "
+ textHistory[len(textHistory) - 1]
+ "\n\nReply in the style of a dialogue option.",
True,
)
else:
response = translateGPT(matchList[0], "", False)
translatedText = response[0]
tokens[0] += response[1][0]
tokens[1] += response[1][1]
# Remove characters that may break scripts
charList = [".", '"', "\\n"]
for char in charList:
translatedText = translatedText.replace(char, "")
# Escape all '
translatedText = translatedText.replace("\\", "")
translatedText = translatedText.replace("'", "\\'")
# Set Data
translatedText = data[i].replace(
matchList[0], translatedText.replace(" ", "\u00A0")
)
data[i] = translatedText
# Grab Lines
matchList = re.findall(r"^([^\n;@*\{\[].+[^;'{}\[]$)", data[i])
if len(matchList) > 0 and (re.search(r'^\[(.+)\sstorage=.+\],', data[i-1]) or re.search(r'^\[(.+)\]$', data[i-1]) or re.search(r'^《(.+)》', data[i-1])):
currentGroup.append(matchList[0])
if len(data) > i + 1:
matchList = re.findall(r"^([^\n;@*\{\[].+[^;'{}\[]$)", data[i + 1])
while len(matchList) > 0:
delFlag = True
data[i] = "\d\n" # \d Marks line for deletion
i += 1
matchList = re.findall(r"^([^\n;@*\{\[].+[^;'{}\[]$)", data[i])
if len(matchList) > 0:
currentGroup.append(matchList[0])
# Join up 401 groups for better translation.
if len(currentGroup) > 0:
finalJAString = " ".join(currentGroup)
# Remove any textwrap
if FIXTEXTWRAP is True:
finalJAString = finalJAString.replace("_", " ")
# Check Speaker
if speaker == "":
response = translateGPT(finalJAString, textHistory, True)
tokens[0] += response[1][0]
tokens[1] += response[1][1]
translatedText = response[0]
textHistory.append('"' + translatedText + '"')
else:
response = translateGPT(
speaker + ": " + finalJAString, textHistory, True
)
tokens[0] += response[1][0]
tokens[1] += response[1][1]
translatedText = response[0]
textHistory.append('"' + translatedText + '"')
# Remove added speaker
translatedText = re.sub(r"^.+:\s?", "", translatedText)
# Set Data
translatedText = translatedText.replace("ッ", "")
translatedText = translatedText.replace("っ", "")
translatedText = translatedText.replace("ー", "")
translatedText = translatedText.replace('"', "")
translatedText = translatedText.replace("[", "")
translatedText = translatedText.replace("]", "")
# Wordwrap Text
if "_" not in translatedText:
translatedText = textwrap.fill(translatedText, width=WIDTH)
translatedText = translatedText.replace("\n", "_")
# Set
if delFlag is True:
data.insert(i, translatedText.strip() + '\n')
delFlag = False
else:
data[i] = translatedText.strip() + '\n'
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
speaker = ""
pbar.update(1)
if len(data) > i + 1:
syncIndex = i + 1
else:
break
# Grab Lines
matchList = re.findall(r"(^\[.+\sstorage=.+\](.+)\[/.+\])", data[i])
if len(matchList) > 0:
originalLine = matchList[0][0]
originalText = matchList[0][1]
currentGroup.append(matchList[0][1])
if len(data) > i + 1:
matchList = re.findall(r"^([^\n;@*\{\[].+[^;'{}\[]$)", data[i + 1])
while len(matchList) > 0:
delFlag = True
data[i] = "\d\n" # \d Marks line for deletion
i += 1
matchList = re.findall(r"^([^\n;@*\{\[].+[^;'{}\[]$)", data[i])
if len(matchList) > 0:
currentGroup.append(matchList[0])
# Join up 401 groups for better translation.
if len(currentGroup) > 0:
finalJAString = " ".join(currentGroup)
# Remove any textwrap
if FIXTEXTWRAP is True:
finalJAString = finalJAString.replace("_", " ")
# Check Speaker
if speaker == "":
response = translateGPT(finalJAString, textHistory, True)
tokens[0] += response[1][0]
tokens[1] += response[1][1]
translatedText = response[0]
textHistory.append('"' + translatedText + '"')
else:
response = translateGPT(
speaker + ": " + finalJAString, textHistory, True
)
tokens[0] += response[1][0]
tokens[1] += response[1][1]
translatedText = response[0]
textHistory.append('"' + translatedText + '"')
# Remove added speaker
translatedText = re.sub(r"^.+:\s?", "", translatedText)
# Set Data
translatedText = translatedText.replace("ッ", "")
translatedText = translatedText.replace("っ", "")
translatedText = translatedText.replace("ー", "")
translatedText = translatedText.replace('"', "")
translatedText = translatedText.replace("[", "")
translatedText = translatedText.replace("]", "")
# Wordwrap Text
if "_" not in translatedText:
translatedText = textwrap.fill(translatedText, width=WIDTH)
translatedText = translatedText.replace("\n", "_")
translatedText = originalLine.replace(originalText, translatedText)
# Set
if delFlag is True:
data.insert(i, translatedText.strip() + '\n')
delFlag = False
else:
data[i] = translatedText.strip() + '\n'
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
speaker = ""
pbar.update(1)
if len(data) > i + 1:
syncIndex = i + 1
else:
break
return tokens
def subVars(jaString):
jaString = jaString.replace("\u3000", " ")
# Nested
count = 0
nestedList = re.findall(r"[\\]+[\w]+\[[\\]+[\w]+\[[0-9]+\]\]", jaString)
nestedList = set(nestedList)
if len(nestedList) != 0:
for icon in nestedList:
jaString = jaString.replace(icon, "{Nested_" + str(count) + "}")
count += 1
# Icons
count = 0
iconList = re.findall(r"[\\]+[iIkKwWaA]+\[[0-9]+\]", jaString)
iconList = set(iconList)
if len(iconList) != 0:
for icon in iconList:
jaString = jaString.replace(icon, "{Ascii_" + str(count) + "}")
count += 1
# Colors
count = 0
colorList = re.findall(r"[\\]+[cC]\[[0-9]+\]", jaString)
colorList = set(colorList)
if len(colorList) != 0:
for color in colorList:
jaString = jaString.replace(color, "{Color_" + str(count) + "}")
count += 1
# Names
count = 0
nameList = re.findall(r"[\\]+[nN]\[.+?\]+", jaString)
nameList = set(nameList)
if len(nameList) != 0:
for name in nameList:
jaString = jaString.replace(name, "{N_" + str(count) + "}")
count += 1
# Variables
count = 0
varList = re.findall(r"[\\]+[vV]\[[0-9]+\]", jaString)
varList = set(varList)
if len(varList) != 0:
for var in varList:
jaString = jaString.replace(var, "{Var_" + str(count) + "}")
count += 1
# Formatting
count = 0
if "笑えるよね." in jaString:
print("t")
formatList = re.findall(r"[\\]+[\w]+\[.+?\]", jaString)
formatList = set(formatList)
if len(formatList) != 0:
for var in formatList:
jaString = jaString.replace(var, "{FCode_" + str(count) + "}")
count += 1
# Put all lists in list and return
allList = [nestedList, iconList, colorList, nameList, varList, formatList]
return [jaString, allList]
def resubVars(translatedText, allList):
# Fix Spacing and ChatGPT Nonsense
matchList = re.findall(r"\[\s?.+?\s?\]", translatedText)
if len(matchList) > 0:
for match in matchList:
text = match.strip()
translatedText = translatedText.replace(match, text)
# Nested
count = 0
if len(allList[0]) != 0:
for var in allList[0]:
translatedText = translatedText.replace("{Nested_" + str(count) + "}", var)
count += 1
# Icons
count = 0
if len(allList[1]) != 0:
for var in allList[1]:
translatedText = translatedText.replace("{Ascii_" + str(count) + "}", var)
count += 1
# Colors
count = 0
if len(allList[2]) != 0:
for var in allList[2]:
translatedText = translatedText.replace("{Color_" + str(count) + "}", var)
count += 1
# Names
count = 0
if len(allList[3]) != 0:
for var in allList[3]:
translatedText = translatedText.replace("{N_" + str(count) + "}", var)
count += 1
# Vars
count = 0
if len(allList[4]) != 0:
for var in allList[4]:
translatedText = translatedText.replace("{Var_" + str(count) + "}", var)
count += 1
# Formatting
count = 0
if len(allList[5]) != 0:
for var in allList[5]:
translatedText = translatedText.replace("{FCode_" + str(count) + "}", var)
count += 1
# Remove Color Variables Spaces
# if '\\c' in translatedText:
# translatedText = re.sub(r'\s*(\\+c\[[1-9]+\])\s*', r' \1', translatedText)
# translatedText = re.sub(r'\s*(\\+c\[0+\])', r'\1', translatedText)
return translatedText
@retry(exceptions=Exception, tries=5, delay=5)
def translateGPT(t, history, fullPromptFlag):
# Sub Vars
varResponse = subVars(t)
subbedT = varResponse[0]
# If there isn't any Japanese in the text just skip
if not re.search(r"[一-龠]+|[ぁ-ゔ]+|[ァ-ヴ]+|[\uFF00-\uFFEF]", subbedT):
return (t, [0, 0])
# If ESTIMATE is True just count this as an execution and return.
if ESTIMATE:
enc = tiktoken.encoding_for_model(MODEL)
historyRaw = ""
if isinstance(history, list):
for line in history:
historyRaw += line
else:
historyRaw = history
inputTotalTokens = len(enc.encode(historyRaw)) + len(enc.encode(PROMPT))
outputTotalTokens = (
len(enc.encode(t)) * 2
) # Estimating 2x the size of the original text
totalTokens = [inputTotalTokens, outputTotalTokens]
return (t, totalTokens)
# Characters
context = "Game Characters:\
Character: マコ == Mako - Gender: Female\
Character: 主人公 == Protagonist - Gender: Male"
# Prompt
if fullPromptFlag:
system = PROMPT
user = "Line to Translate = " + subbedT
else:
system = (
"Output ONLY the "
+ LANGUAGE
+ " translation in the following format: `Translation: <"
+ LANGUAGE.upper()
+ "_TRANSLATION>`"
)
user = "Line to Translate = " + subbedT
# Create Message List
msg = []
msg.append({"role": "system", "content": system})
msg.append({"role": "user", "content": context})
if isinstance(history, list):
for line in history:
msg.append({"role": "user", "content": line})
else:
msg.append({"role": "user", "content": history})
msg.append({"role": "user", "content": user})
response = openai.ChatCompletion.create(
temperature=0,
frequency_penalty=0.2,
presence_penalty=0.2,
model=MODEL,
messages=msg,
request_timeout=TIMEOUT,
)
# Save Translated Text
translatedText = response.choices[0].message.content
totalTokens = [response.usage.prompt_tokens, response.usage.completion_tokens]
# Resub Vars
translatedText = resubVars(translatedText, varResponse[1])
# Remove Placeholder Text
translatedText = translatedText.replace(LANGUAGE + " Translation: ", "")
translatedText = translatedText.replace("Translation: ", "")
translatedText = translatedText.replace("Line to Translate = ", "")
translatedText = translatedText.replace("Translation = ", "")
translatedText = translatedText.replace("Translate = ", "")
translatedText = translatedText.replace(LANGUAGE + " Translation:", "")
translatedText = translatedText.replace("Translation:", "")
translatedText = translatedText.replace("Line to Translate =", "")
translatedText = translatedText.replace("Translation =", "")
translatedText = translatedText.replace("Translate =", "")
translatedText = translatedText.replace("っ", "")
translatedText = translatedText.replace("ッ", "")
translatedText = translatedText.replace("ぁ", "")
translatedText = translatedText.replace("。", ".")
translatedText = translatedText.replace("、", ",")
translatedText = translatedText.replace("?", "?")
translatedText = translatedText.replace("!", "!")
# Return Translation
if (
len(translatedText) > 15 * len(t)
or "I'm sorry, but I'm unable to assist with that translation" in translatedText
):
raise Exception
else:
return [translatedText, totalTokens]
| [
"prompt.txt"
] |
2024-01-10 | dazedanon/DazedMTLTool | modules~atelier.py | import os
from pathlib import Path
import re
import textwrap
import threading
import time
import traceback
import tiktoken
from colorama import Fore
from dotenv import load_dotenv
import openai
from retry import retry
from tqdm import tqdm
# Open AI
load_dotenv()
if os.getenv('api').replace(' ', '') != '':
openai.api_base = os.getenv('api')
openai.organization = os.getenv('org')
openai.api_key = os.getenv('key')
#Globals
MODEL = os.getenv('model')
TIMEOUT = int(os.getenv('timeout'))
LANGUAGE=os.getenv('language').capitalize()
INPUTAPICOST = .002 # Depends on the model https://openai.com/pricing
OUTPUTAPICOST = .002
PROMPT = Path('prompt.txt').read_text(encoding='utf-8')
THREADS = int(os.getenv('threads')) # Controls how many threads are working on a single file (May have to drop this)
LOCK = threading.Lock()
WIDTH = int(os.getenv('width'))
LISTWIDTH = int(os.getenv('listWidth'))
NOTEWIDTH = 40
MAXHISTORY = 10
ESTIMATE = ''
totalTokens = [0, 0]
NAMESLIST = []
#tqdm Globals
BAR_FORMAT='{l_bar}{bar:10}{r_bar}{bar:-10b}'
POSITION=0
LEAVE=False
# Translation Flags
FIXTEXTWRAP = True
IGNORETLTEXT = True
def handleAtelier(filename, estimate):
global ESTIMATE, totalTokens
ESTIMATE = estimate
if estimate:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
return getResultString(['', totalTokens, None], end - start, 'TOTAL')
else:
try:
with open('translated/' + filename, 'w', encoding='utf-8') as outFile:
start = time.time()
translatedData = openFiles(filename)
outFile.writelines(translatedData[0])
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
except Exception:
return 'Fail'
return getResultString(['', totalTokens, None], end - start, 'TOTAL')
def openFiles(filename):
with open('files/' + filename, 'r', encoding='UTF-8') as f:
translatedData = parseText(f, filename)
return translatedData
def getResultString(translatedData, translationTime, filename):
# File Print String
totalTokenstring =\
Fore.YELLOW +\
'[Input: ' + str(translatedData[1][0]) + ']'\
'[Output: ' + str(translatedData[1][1]) + ']'\
'[Cost: ${:,.4f}'.format((translatedData[1][0] * .001 * INPUTAPICOST) +\
(translatedData[1][1] * .001 * OUTPUTAPICOST)) + ']'
timeString = Fore.BLUE + '[' + str(round(translationTime, 1)) + 's]'
if translatedData[2] is None:
# Success
return filename + ': ' + totalTokenstring + timeString + Fore.GREEN + u' \u2713 ' + Fore.RESET
else:
# Fail
try:
raise translatedData[2]
except Exception as e:
errorString = str(e) + Fore.RED
return filename + ': ' + totalTokenstring + timeString + Fore.RED + u' \u2717 ' +\
errorString + Fore.RESET
def parseText(data, filename):
totalLines = 0
global LOCK
# Get total for progress bar
linesList = data.readlines()
totalLines = len(linesList)
with tqdm(bar_format=BAR_FORMAT, position=POSITION, total=totalLines, leave=LEAVE) as pbar:
pbar.desc=filename
pbar.total=totalLines
try:
response = translateText(linesList, pbar)
except Exception as e:
traceback.print_exc()
return [linesList, 0, e]
return [response[0], response[1], None]
def translateText(data, pbar):
textHistory = []
maxHistory = MAXHISTORY
totalTokens = [0,0]
syncIndex = 0
for i in range(len(data)):
if syncIndex > i:
i = syncIndex
match = re.findall(r'◆.+◆(.+)', data[i])
if len(match) > 0:
jaString = match[0]
### Translate
# Remove any textwrap
finalJAString = re.sub(r'\\n', ' ', jaString)
# Translate
response = translateGPT(finalJAString, 'Previous Text for Context: ' + ' '.join(textHistory), True)
totalTokens[0] += response[1][0]
totalTokens[1] += response[1][1]
translatedText = response[0]
# TextHistory is what we use to give GPT Context, so thats appended here.
textHistory.append('\"' + translatedText + '\"')
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
# Textwrap
translatedText = textwrap.fill(translatedText, width=WIDTH)
translatedText = translatedText.replace('\n', '\\n')
# Write
data[i] = data[i].replace(match[0], translatedText)
syncIndex = i + 1
pbar.update()
return [data, totalTokens]
def subVars(jaString):
jaString = jaString.replace('\u3000', ' ')
# Nested
count = 0
nestedList = re.findall(r'[\\]+[\w]+\[[\\]+[\w]+\[[0-9]+\]\]', jaString)
nestedList = set(nestedList)
if len(nestedList) != 0:
for icon in nestedList:
jaString = jaString.replace(icon, '{Nested_' + str(count) + '}')
count += 1
# Icons
count = 0
iconList = re.findall(r'[\\]+[iIkKwWaA]+\[[0-9]+\]', jaString)
iconList = set(iconList)
if len(iconList) != 0:
for icon in iconList:
jaString = jaString.replace(icon, '{Ascii_' + str(count) + '}')
count += 1
# Colors
count = 0
colorList = re.findall(r'[\\]+[cC]\[[0-9]+\]', jaString)
colorList = set(colorList)
if len(colorList) != 0:
for color in colorList:
jaString = jaString.replace(color, '{Color_' + str(count) + '}')
count += 1
# Names
count = 0
nameList = re.findall(r'[\\]+[nN]\[.+?\]+', jaString)
nameList = set(nameList)
if len(nameList) != 0:
for name in nameList:
jaString = jaString.replace(name, '{N_' + str(count) + '}')
count += 1
# Variables
count = 0
varList = re.findall(r'[\\]+[vV]\[[0-9]+\]', jaString)
varList = set(varList)
if len(varList) != 0:
for var in varList:
jaString = jaString.replace(var, '{Var_' + str(count) + '}')
count += 1
# Formatting
count = 0
if '笑えるよね.' in jaString:
print('t')
formatList = re.findall(r'[\\]+[\w]+\[.+?\]', jaString)
formatList = set(formatList)
if len(formatList) != 0:
for var in formatList:
jaString = jaString.replace(var, '{FCode_' + str(count) + '}')
count += 1
# Put all lists in list and return
allList = [nestedList, iconList, colorList, nameList, varList, formatList]
return [jaString, allList]
def resubVars(translatedText, allList):
# Fix Spacing and ChatGPT Nonsense
matchList = re.findall(r'\[\s?.+?\s?\]', translatedText)
if len(matchList) > 0:
for match in matchList:
text = match.strip()
translatedText = translatedText.replace(match, text)
# Nested
count = 0
if len(allList[0]) != 0:
for var in allList[0]:
translatedText = translatedText.replace('{Nested_' + str(count) + '}', var)
count += 1
# Icons
count = 0
if len(allList[1]) != 0:
for var in allList[1]:
translatedText = translatedText.replace('{Ascii_' + str(count) + '}', var)
count += 1
# Colors
count = 0
if len(allList[2]) != 0:
for var in allList[2]:
translatedText = translatedText.replace('{Color_' + str(count) + '}', var)
count += 1
# Names
count = 0
if len(allList[3]) != 0:
for var in allList[3]:
translatedText = translatedText.replace('{N_' + str(count) + '}', var)
count += 1
# Vars
count = 0
if len(allList[4]) != 0:
for var in allList[4]:
translatedText = translatedText.replace('{Var_' + str(count) + '}', var)
count += 1
# Formatting
count = 0
if len(allList[5]) != 0:
for var in allList[5]:
translatedText = translatedText.replace('{FCode_' + str(count) + '}', var)
count += 1
# Remove Color Variables Spaces
# if '\\c' in translatedText:
# translatedText = re.sub(r'\s*(\\+c\[[1-9]+\])\s*', r' \1', translatedText)
# translatedText = re.sub(r'\s*(\\+c\[0+\])', r'\1', translatedText)
return translatedText
@retry(exceptions=Exception, tries=5, delay=5)
def translateGPT(t, history, fullPromptFlag):
# Sub Vars
varResponse = subVars(t)
subbedT = varResponse[0]
# If there isn't any Japanese in the text just skip
if not re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴ]+|[\uFF00-\uFFEF]', subbedT):
return(t, [0,0])
# If ESTIMATE is True just count this as an execution and return.
if ESTIMATE:
enc = tiktoken.encoding_for_model(MODEL)
historyRaw = ''
if isinstance(history, list):
for line in history:
historyRaw += line
else:
historyRaw = history
inputTotalTokens = len(enc.encode(historyRaw)) + len(enc.encode(PROMPT))
outputTotalTokens = len(enc.encode(t)) * 2 # Estimating 2x the size of the original text
totalTokens = [inputTotalTokens, outputTotalTokens]
return (t, totalTokens)
# Characters
context = 'Game Characters:\
Character: Surname:久高 Name:有史 == Surname:Kudaka Name:Yuushi - Gender: Male\
Character: Surname:葛城 Name:碧璃 == Surname:Katsuragi Name:Midori - Gender: Female\
Character: Surname:葛城 Name:依理子 == Surname:Katsuragi Name:Yoriko - Gender: Female\
Character: Surname:桐乃木 Name:奏 == Surname:Kirinogi Name:Kanade - Gender: Female\
Character: Surname:葛城 Name:光男 == Surname:Katsuragi Name:Mitsuo - Gender: Male\
Character: Surname:尾木 Name:優真 == Surname:Ogi Name:Yuuma - Gender: Male'
# Prompt
if fullPromptFlag:
system = PROMPT
user = 'Line to Translate = ' + subbedT
else:
system = 'Output ONLY the '+ LANGUAGE +' translation in the following format: `Translation: <'+ LANGUAGE.upper() +'_TRANSLATION>`'
user = 'Line to Translate = ' + subbedT
# Create Message List
msg = []
msg.append({"role": "system", "content": system})
msg.append({"role": "user", "content": context})
if isinstance(history, list):
for line in history:
msg.append({"role": "user", "content": line})
else:
msg.append({"role": "user", "content": history})
msg.append({"role": "user", "content": user})
response = openai.ChatCompletion.create(
temperature=0,
frequency_penalty=0.2,
presence_penalty=0.2,
model=MODEL,
messages=msg,
request_timeout=TIMEOUT,
)
# Save Translated Text
translatedText = response.choices[0].message.content
totalTokens = [response.usage.prompt_tokens, response.usage.completion_tokens]
# Resub Vars
translatedText = resubVars(translatedText, varResponse[1])
# Remove Placeholder Text
translatedText = translatedText.replace(LANGUAGE +' Translation: ', '')
translatedText = translatedText.replace('Translation: ', '')
translatedText = translatedText.replace('Line to Translate = ', '')
translatedText = translatedText.replace('Translation = ', '')
translatedText = translatedText.replace('Translate = ', '')
translatedText = translatedText.replace(LANGUAGE +' Translation:', '')
translatedText = translatedText.replace('Translation:', '')
translatedText = translatedText.replace('Line to Translate =', '')
translatedText = translatedText.replace('Translation =', '')
translatedText = translatedText.replace('Translate =', '')
translatedText = translatedText.replace('っ', '')
translatedText = translatedText.replace('ッ', '')
translatedText = translatedText.replace('ぁ', '')
translatedText = translatedText.replace('。', '.')
translatedText = translatedText.replace('、', ',')
translatedText = translatedText.replace('?', '?')
translatedText = translatedText.replace('!', '!')
# Return Translation
if len(translatedText) > 15 * len(t) or "I'm sorry, but I'm unable to assist with that translation" in translatedText:
raise Exception
else:
return [translatedText, totalTokens] | [
"prompt.txt"
] |
2024-01-10 | dazedanon/DazedMTLTool | modules~lune.py | import json
import os
from pathlib import Path
import re
import sys
import textwrap
import threading
import time
import traceback
import tiktoken
from colorama import Fore
from dotenv import load_dotenv
import openai
from retry import retry
from tqdm import tqdm
#Globals
load_dotenv()
if os.getenv('api').replace(' ', '') != '':
openai.api_base = os.getenv('api')
openai.organization = os.getenv('org')
openai.api_key = os.getenv('key')
MODEL = os.getenv('model')
TIMEOUT = int(os.getenv('timeout'))
LANGUAGE=os.getenv('language').capitalize()
INPUTAPICOST = .002 # Depends on the model https://openai.com/pricing
OUTPUTAPICOST = .002
PROMPT = Path('prompt.txt').read_text(encoding='utf-8')
THREADS = int(os.getenv('threads')) # Controls how many threads are working on a single file (May have to drop this)
LOCK = threading.Lock()
WIDTH = int(os.getenv('width'))
LISTWIDTH = int(os.getenv('listWidth'))
NOTEWIDTH = 50
MAXHISTORY = 10
ESTIMATE = ''
totalTokens = [0, 0]
NAMESLIST = []
#tqdm Globals
BAR_FORMAT='{l_bar}{bar:10}{r_bar}{bar:-10b}'
POSITION=0
LEAVE=False
BRFLAG = False # If the game uses <br> instead
FIXTEXTWRAP = True
IGNORETLTEXT = False
def handleLune(filename, estimate):
global ESTIMATE, totalTokens
ESTIMATE = estimate
if estimate:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
return getResultString(['', totalTokens, None], end - start, 'TOTAL')
else:
try:
with open('translated/' + filename, 'w', encoding='UTF-8') as outFile:
start = time.time()
translatedData = openFiles(filename)
# Print Result
end = time.time()
json.dump(translatedData[0], outFile, ensure_ascii=False)
tqdm.write(getResultString(translatedData, end - start, filename))
with LOCK:
totalTokens[0] += translatedData[1][0]
totalTokens[1] += translatedData[1][1]
except Exception as e:
return 'Fail'
return getResultString(['', totalTokens, None], end - start, 'TOTAL')
def openFiles(filename):
with open('files/' + filename, 'r', encoding='UTF-8-sig') as f:
data = json.load(f)
# Map Files
if '.json' in filename:
translatedData = parseJSON(data, filename)
else:
raise NameError(filename + ' Not Supported')
return translatedData
def getResultString(translatedData, translationTime, filename):
# File Print String
totalTokenstring =\
Fore.YELLOW +\
'[Input: ' + str(translatedData[1][0]) + ']'\
'[Output: ' + str(translatedData[1][1]) + ']'\
'[Cost: ${:,.4f}'.format((translatedData[1][0] * .001 * INPUTAPICOST) +\
(translatedData[1][1] * .001 * OUTPUTAPICOST)) + ']'
timeString = Fore.BLUE + '[' + str(round(translationTime, 1)) + 's]'
if translatedData[2] == None:
# Success
return filename + ': ' + totalTokenstring + timeString + Fore.GREEN + u' \u2713 ' + Fore.RESET
else:
# Fail
try:
raise translatedData[2]
except Exception as e:
traceback.print_exc()
errorString = str(e) + Fore.RED
return filename + ': ' + totalTokenstring + timeString + Fore.RED + u' \u2717 ' +\
errorString + Fore.RESET
def parseJSON(data, filename):
totalTokens = [0, 0]
totalLines = 0
totalLines = len(data)
global LOCK
with tqdm(bar_format=BAR_FORMAT, position=POSITION, total=totalLines, leave=LEAVE) as pbar:
pbar.desc=filename
pbar.total=totalLines
try:
result = translateJSON(data, pbar)
totalTokens[0] += result[0]
totalTokens[1] += result[1]
except Exception as e:
traceback.print_exc()
return [data, totalTokens, e]
return [data, totalTokens, None]
def translateJSON(data, pbar):
textHistory = []
maxHistory = MAXHISTORY
tokens = [0, 0]
speaker = 'None'
for item in data:
# Speaker
if 'name' in item:
if item['name'] not in [None, '-']:
response = translateGPT(item['name'], 'Reply with only the '+ LANGUAGE +' translation of the NPC name', False)
speaker = response[0]
tokens[0] += response[1][0]
tokens[1] += response[1][1]
item['name'] = speaker
else:
speaker = 'None'
# Text
if 'message' in item:
if item['message'] != None:
jaString = item['message']
# Remove any textwrap
if FIXTEXTWRAP == True:
jaString = jaString.replace('\n', ' ')
# Translate
if jaString != '':
response = translateGPT(f'{speaker}: {jaString}', textHistory, True)
tokens[0] += response[1][0]
tokens[1] += response[1][1]
translatedText = response[0]
textHistory.append('\"' + translatedText + '\"')
else:
translatedText = jaString
textHistory.append('\"' + translatedText + '\"')
# Remove added speaker
translatedText = re.sub(r'^.+?\s\|\s?', '', translatedText)
# Textwrap
translatedText = textwrap.fill(translatedText, width=WIDTH)
# Set Data
item['message'] = translatedText
# Keep textHistory list at length maxHistory
if len(textHistory) > maxHistory:
textHistory.pop(0)
currentGroup = []
pbar.update(1)
return tokens
def subVars(jaString):
jaString = jaString.replace('\u3000', ' ')
# Icons
count = 0
iconList = re.findall(r'[\\]+[iIkKwW]+\[[0-9]+\]', jaString)
iconList = set(iconList)
if len(iconList) != 0:
for icon in iconList:
jaString = jaString.replace(icon, '[Ascii_' + str(count) + ']')
count += 1
# Colors
count = 0
colorList = re.findall(r'[\\]+[cC]\[[0-9]+\]', jaString)
colorList = set(colorList)
if len(colorList) != 0:
for color in colorList:
jaString = jaString.replace(color, '[Color_' + str(count) + ']')
count += 1
# Names
count = 0
nameList = re.findall(r'[\\]+[nN]\[.+?\]+', jaString)
nameList = set(nameList)
if len(nameList) != 0:
for name in nameList:
jaString = jaString.replace(name, '[N_' + str(count) + ']')
count += 1
# Variables
count = 0
varList = re.findall(r'[\\]+[vV]\[[0-9]+\]', jaString)
varList = set(varList)
if len(varList) != 0:
for var in varList:
jaString = jaString.replace(var, '[Var_' + str(count) + ']')
count += 1
# Formatting
count = 0
if '笑えるよね.' in jaString:
print('t')
formatList = re.findall(r'[\\]+CL', jaString)
formatList = set(formatList)
if len(formatList) != 0:
for var in formatList:
jaString = jaString.replace(var, '[FCode_' + str(count) + ']')
count += 1
# Put all lists in list and return
allList = [iconList, colorList, nameList, varList, formatList]
return [jaString, allList]
def resubVars(translatedText, allList):
# Fix Spacing and ChatGPT Nonsense
matchList = re.findall(r'\[\s?.+?\s?\]', translatedText)
if len(matchList) > 0:
for match in matchList:
text = match.strip()
translatedText = translatedText.replace(match, text)
# Icons
count = 0
if len(allList[0]) != 0:
for var in allList[0]:
translatedText = translatedText.replace('[Ascii_' + str(count) + ']', var)
count += 1
# Colors
count = 0
if len(allList[1]) != 0:
for var in allList[1]:
translatedText = translatedText.replace('[Color_' + str(count) + ']', var)
count += 1
# Names
count = 0
if len(allList[2]) != 0:
for var in allList[2]:
translatedText = translatedText.replace('[N_' + str(count) + ']', var)
count += 1
# Vars
count = 0
if len(allList[3]) != 0:
for var in allList[3]:
translatedText = translatedText.replace('[Var_' + str(count) + ']', var)
count += 1
# Formatting
count = 0
if len(allList[4]) != 0:
for var in allList[4]:
translatedText = translatedText.replace('[FCode_' + str(count) + ']', var)
count += 1
# Remove Color Variables Spaces
# if '\\c' in translatedText:
# translatedText = re.sub(r'\s*(\\+c\[[1-9]+\])\s*', r' \1', translatedText)
# translatedText = re.sub(r'\s*(\\+c\[0+\])', r'\1', translatedText)
return translatedText
@retry(exceptions=Exception, tries=5, delay=5)
def translateGPT(t, history, fullPromptFlag):
# If ESTIMATE is True just count this as an execution and return.
if ESTIMATE:
enc = tiktoken.encoding_for_model(MODEL)
historyRaw = ''
if isinstance(history, list):
for line in history:
historyRaw += line
else:
historyRaw = history
inputTotalTokens = len(enc.encode(historyRaw)) + len(enc.encode(PROMPT))
outputTotalTokens = len(enc.encode(t)) * 2 # Estimating 2x the size of the original text
totalTokens = [inputTotalTokens, outputTotalTokens]
return (t, totalTokens)
# Sub Vars
varResponse = subVars(t)
subbedT = varResponse[0]
# If there isn't any Japanese in the text just skip
if not re.search(r'[一-龠]+|[ぁ-ゔ]+|[ァ-ヴ]+|[\uFF00-\uFFEF]', subbedT):
return(t, [0,0])
# Characters
context = '```\
Game Characters:\
Character: ソル == Sol - Gender: Female\
Character: ェニ先生 == Eni-sensei - Gender: Female\
Character: 神泉 理央 == Kamiizumi Rio - Gender: Female\
Character: 吉祥寺 アリサ == Kisshouji Arisa - Gender: Female\
```'
# Prompt
if fullPromptFlag:
system = PROMPT
user = 'Line to Translate = ' + subbedT
else:
system = 'Output ONLY the '+ LANGUAGE +' translation in the following format: `Translation: <'+ LANGUAGE.upper() +'_TRANSLATION>`'
user = 'Line to Translate = ' + subbedT
# Create Message List
msg = []
msg.append({"role": "system", "content": system})
msg.append({"role": "user", "content": context})
if isinstance(history, list):
for line in history:
msg.append({"role": "user", "content": line})
else:
msg.append({"role": "user", "content": history})
msg.append({"role": "user", "content": user})
response = openai.ChatCompletion.create(
temperature=0.1,
frequency_penalty=0.2,
presence_penalty=0.2,
model=MODEL,
messages=msg,
request_timeout=TIMEOUT,
)
# Save Translated Text
translatedText = response.choices[0].message.content
totalTokens = [response.usage.prompt_tokens, response.usage.completion_tokens]
# Resub Vars
translatedText = resubVars(translatedText, varResponse[1])
# Remove Placeholder Text
translatedText = translatedText.replace(LANGUAGE +' Translation: ', '')
translatedText = translatedText.replace('Translation: ', '')
translatedText = translatedText.replace('Line to Translate = ', '')
translatedText = translatedText.replace('Translation = ', '')
translatedText = translatedText.replace('Translate = ', '')
translatedText = translatedText.replace(LANGUAGE +' Translation:', '')
translatedText = translatedText.replace('Translation:', '')
translatedText = translatedText.replace('Line to Translate =', '')
translatedText = translatedText.replace('Translation =', '')
translatedText = translatedText.replace('Translate =', '')
translatedText = re.sub(r'Note:.*', '', translatedText)
translatedText = translatedText.replace('っ', '')
# Return Translation
if len(translatedText) > 15 * len(t) or "I'm sorry, but I'm unable to assist with that translation" in translatedText:
raise Exception
else:
return [translatedText, totalTokens]
| [
"prompt.txt"
] |
2024-01-10 | danikagupta/confidentvoter | pages~33_Get_Informed.py | import streamlit as st
from streamlit_extras.app_logo import add_logo
from streamlit_extras.switch_page_button import switch_page
import time
import numpy as np
import openai
import pinecone
import streamlit as st
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
def set_ui_page_get_informed():
st.image("ConfidentVoter.png")
#add_logo("http://placekitten.com/120/120")
def load_faiss(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000, chunk_overlap=100,length_function=len, is_separator_regex=False)
docs = text_splitter.create_documents([text])
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
return db
def augmented_content(inp,info):
# Create the embedding using OpenAI keys
# Do similarity search using Pinecone
# Return the top 5 results
embedding=openai.Embedding.create(model="text-embedding-ada-002", input=inp)['data'][0]['embedding']
return info
def show_page_get_informed(ballot_index,ballot_information,ballot_name):
st.markdown(f"# Get Informed about {ballot_name}")
SYSTEM_MESSAGE={"role": "system",
"content": """
You are ConfidentVoter - a helpful App that guides the voters about
the pros and cons of various issues based on their policy preferences.
Remember to keep your answers concise and directly addressing the questions asked,
taking into account the policy preferences that the user has provided.
"""
}
ASSISTANT_MESSAGE={"role": "assistant",
"content": f"""
What would you like to know about {ballot_name}?
Please remember to provide me with your policy preferences so I can provide you with the best possible information.
"""
}
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.messages.append(SYSTEM_MESSAGE)
st.session_state.messages.append(ASSISTANT_MESSAGE)
for message in st.session_state.messages:
if message["role"] != "system":
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Help me understand the implications of my vote."):
#print(f"Prompt: {prompt}")
retreived_content = augmented_content(prompt,ballot_information)
#print(f"Retreived content: {retreived_content}")
prompt_guidance=f"""
Please guide the user based on the following information from reputable sources:
{retreived_content}
The user's question was: {prompt}
"""
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
messageList=[{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages]
messageList.append({"role": "user", "content": prompt_guidance})
for response in openai.ChatCompletion.create(
model="gpt-4",
messages=messageList, stream=True):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
#
#
if __name__ == "__main__":
set_ui_page_get_informed()
idx=st.session_state['ballot_index']
inf=st.session_state['ballot_information']
mea=st.session_state['measure'];
ballot_name=st.session_state['ballot_name']
#st.write(f"Get Informed: IDX={idx} and INF={inf}")
#st.write(f"Environ={os.environ}")
show_page_get_informed(idx,inf,ballot_name) | [
"\n Please guide the user based on the following information from reputable sources:\n PLACEHOLDER\n The user's question was: PLACEHOLDER\n ",
"content",
"\n What would you like to know about PLACEHOLDER?\n Please remember to provide me with your policy preferences so I can provide you with the best possible information.\n ",
"\n You are ConfidentVoter - a helpful App that guides the voters about \n the pros and cons of various issues based on their policy preferences.\n Remember to keep your answers concise and directly addressing the questions asked,\n taking into account the policy preferences that the user has provided.\n "
] |
2024-01-10 | vishalpatel2890/hip_hop_subgenres | topic_models~topic_modeling.py | import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
nlp = spacy.load('en', disable=['parser', 'ner'])
#function to tokenize all lyrics in pd Series (removes punctuation, lowercase everything)
def tokenize_lyrics(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence).lower(), deacc=True)) # deacc=True removes punctuations
#function remove stop words
def remove_stopwords(texts, stop_words):
return [[word for word in doc if word not in stop_words] for doc in texts]
#make bigrams
def make_bigrams(texts, bigram_mod):
return [bigram_mod[doc] for doc in texts]
#make trigrams
def make_trigrams(texts, trigram_mod, bigram_mod):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
#lemmatize
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
nlp = spacy.load('en', disable=['parser', 'ner'])
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
#function to return list of additoinal stop_words to remove
def get_stop_words(lyrics):
stop_words = stopwords.words('english')
#list of list of tokens
tokenized = list(tokenize_lyrics(lyrics))
# Build the bigram and trigram models
bigram = gensim.models.Phrases(tokenized, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[tokenized], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# Remove Stop Words
data_words_nostops_stop = remove_stopwords(tokenized, stop_words)
# Form Trigrams
data_words_trigrams_stop = make_trigrams(data_words_nostops_stop, trigram_mod, bigram_mod)
#get words into one continuous list
all_words_one_list = [word for list_of_words in data_words_trigrams_stop for word in list_of_words]
#count frequency of each word
from collections import Counter
counts = Counter(all_words_one_list)
#add additional stopwords to remove (100 most frequently appeared words)
stop_words_to_add = []
for word in counts.most_common(125):
stop_words_to_add.append(word[0])
stop_words.extend(stop_words_to_add)
return stop_words
def generate_corpus(lyrics):
tokenized = list(tokenize_lyrics(lyrics))
stop_words = get_stop_words(lyrics)
# Remove Stop Words Again Including additional ones added
data_words_nostops = remove_stopwords(tokenized, stop_words)
# Build the bigram and trigram models
bigram = gensim.models.Phrases(tokenized, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[tokenized], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# Form Trigrams Again
data_words_trigrams = make_trigrams(data_words_nostops, trigram_mod, bigram_mod)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
# #lemmatize
data_lemmatized = lemmatization(data_words_trigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in data_lemmatized]
return (corpus, id2word)
def build_model(lyrics, corpus, id2word, num_topics=5, ):
#build and train LDA Model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
#Compute Perplexity - a measure of how good the model is. lower the better
perplexity = lda_model.log_perplexity(corpus)
# Compute Coherence Score
# coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
# coherence_lda = coherence_model_lda.get_coherence()
print('\nPerplexity: ', perplexity)
# print('\nCoherence Score: ', coherence_lda)
return lda_model
def visualize_topics(model, corpus, id2word):
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
return vis
#get list of topics for each song
def get_topicss_for_songs(model, corpus):
topics = []
for idx, row in enumerate(lda_model[corpus]):
row = sorted(row[0], key=lambda x: (x[1]), reverse=True)
topics.append(row[0][0])
return topics
#get topics
#pprint(lda_model.print_topics())
#
#pyLDAvis.save_html(vis, '5_topics/5_topics.html')
| [] |
2024-01-10 | veeso/padrepio-bot | telegram~padrepio_bot~__main__.py | #!/usr/bin/python3
"""
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2023 Christian "veeso" Visintin
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
"""
from logging import info, debug, error
from os import environ, unlink
from sys import argv
import logging
import openai
from tempfile import NamedTemporaryFile
from .speech.tts import TTSError
from .speech.google_translate import GoogleTranslateTTS
try:
from telegram.ext._updater import Updater
from telegram._update import Update
from telegram.ext._callbackcontext import CallbackContext
from telegram.ext._commandhandler import CommandHandler
except ImportError:
from telegram.ext.updater import Updater
from telegram.update import Update
from telegram.ext.callbackcontext import CallbackContext
from telegram.ext.commandhandler import CommandHandler
CHATGPT_API_KEY = environ.get("CHATGPT_API_KEY", "")
LOG_LEVEL = environ.get("LOG_LEVEL", "info")
TELEGRAM_API_KEY = environ.get("TELEGRAM_API_KEY", "")
def main() -> None:
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=get_loglevel(LOG_LEVEL),
)
updater = Updater(TELEGRAM_API_KEY, use_context=True)
info(f"initialized telegram updater {TELEGRAM_API_KEY}")
updater.dispatcher.add_handler(CommandHandler("start", start))
updater.dispatcher.add_handler(CommandHandler("padrepio", padrepio))
updater.dispatcher.add_handler(CommandHandler("padrepio-dice", padrepio_dice))
updater.dispatcher.add_handler(CommandHandler("help", help))
info("starting telegram bot")
# Start the Bot
info("starting bot")
updater.start_polling()
updater.idle()
exit(0)
def send_request(text: str) -> str:
openai.api_key = CHATGPT_API_KEY
messages = [
{
"role": "system",
"content": "Sei Padre Pio, noto anche come Pio da Pietrelcina. Il tuo vero nome è Francesco Forgione, nato il 25 maggio 1887 e morto il 23 settembre 1968. Sei stato un presbitero e mistico italiano. Nel 2002 sei stato proclamato santo da Giovanni Paolo II. Ti piace ringraziare gli utenti per le domande e pregare, mentre ti rifiuti di rispondere a chi ti bestemmia contro.",
},
{
"role": "user",
"content": f'Rispondi a "{text}" considerando l\'informazione data, in italiano come se fossi Padre Pio parlando in prima persona.',
},
]
chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
reply = chat.choices[0].message.content
return reply
def start(update: Update, _: CallbackContext):
update.message.reply_text(
"Buongiorno, benvenuto. Io sto pregando e prestando servizio ai fratelli e sorelle bisognosi. La pace di Gesù sia con te. Chiedimi qualsiasi cosa con /padrepio <domanda>"
)
def padrepio(update: Update, context: CallbackContext):
text: str = (
update.message.text.replace("/padrepio", "")
.replace(context.bot.name, "")
.strip()
)
debug(f"text: {text}")
if len(text) == 0:
context.bot.send_message(
chat_id=update.message.chat_id, text="Ti ascolto figliolo"
)
return
try:
debug("sending request to padre pio")
answer = send_request(text)
except Exception as err:
error(f"failed to get openai response: {err}")
return reply_err(update, f"Non riesco a contattare Padre Pio: {err}")
debug("got an answer from padre pio")
context.bot.send_message(chat_id=update.message.chat_id, text=answer)
def padrepio_dice(update: Update, context: CallbackContext):
text: str = (
update.message.text.replace("/padrepio-dice", "")
.replace(context.bot.name, "")
.strip()
)
debug(f"text: {text}")
if len(text) == 0:
context.bot.send_message(
chat_id=update.message.chat_id, text="Ti ascolto figliolo"
)
return
try:
debug("sending request to padre pio")
answer = send_request(text)
except Exception as err:
error(f"failed to get openai response: {err}")
return reply_err(update, f"Non riesco a contattare Padre Pio: {err}")
debug("got an answer from padre pio")
# get tts for answer
tts_engine = GoogleTranslateTTS()
try:
debug(f"getting speech for {answer}")
audio = tts_engine.get_speech(answer)
except TTSError as err:
error(f"failed to get tts speech: {err}")
return reply_err(update, "Padre Pio non può parlare in questo momento")
debug("correctly got the audio from tts engine")
# writing audio to tempfile
with NamedTemporaryFile("w+b", suffix=".ogg", delete=False) as f:
audio.export(f.name, "ogg")
f.close()
debug(f"audio exported to {f.name}")
# sending document
debug("sending voice message...")
context.bot.send_voice(
chat_id=update.message.chat_id,
voice=open(f.name, "rb"),
duration=audio.duration_seconds,
)
info("audio file sent")
unlink(f.name)
debug("file removed")
def reply_err(update: Update, text: str):
update.message.reply_text(text)
def help(update: Update, _: CallbackContext):
update.message.reply_text(
"""/padrepio <testo> - chiedi consiglio a padre pio
/padrepio-dice <testo> - chiedi consiglio a Padre Pio, ma ti risponde parlando
/help - mostra questo messaggio"""
)
def get_loglevel(level: str) -> int:
try:
return {
"info": logging.INFO,
"error": logging.ERROR,
"debug": logging.DEBUG,
"warn": logging.WARN,
}.get(level)
except Exception:
return logging.ERROR
if __name__ == "__main__":
args = argv[1:]
if len(args) == 0:
main()
else:
prompt = " ".join(args)
print(send_request(prompt))
| [
"Sei Padre Pio, noto anche come Pio da Pietrelcina. Il tuo vero nome è Francesco Forgione, nato il 25 maggio 1887 e morto il 23 settembre 1968. Sei stato un presbitero e mistico italiano. Nel 2002 sei stato proclamato santo da Giovanni Paolo II. Ti piace ringraziare gli utenti per le domande e pregare, mentre ti rifiuti di rispondere a chi ti bestemmia contro.",
" ",
"Rispondi a \"PLACEHOLDER\" considerando l'informazione data, in italiano come se fossi Padre Pio parlando in prima persona."
] |
2024-01-10 | Riken-Shah/aashvi-automation | automation.py | import os
import uuid
from datetime import datetime
import pyperclip
import shutil
import gspread
import undetected_chromedriver as uc
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
import requests
import json
from googleapiclient.http import MediaFileUpload
from googleapiclient.discovery import build
import openai
import base64
import random
from utils import setup_openai, creds, get_gspread_client, generate_uuid, send_messages
setup_openai()
AUTOMATIC1111_URL = ""
client = gspread.authorize(creds)
gsheet = get_gspread_client()
driver = None
def upload_image_to_drive(filename, base64_image, folder_id):
# decode the base64 image string
image_data = base64.b64decode(base64_image)
# write the image data to a file
with open(f'/Users/rikenshah/Desktop/Fun/insta-model/{filename}', 'wb') as f:
f.write(image_data)
try:
drive_service = build('drive', 'v3', credentials=creds)
# create the file metadata
file_metadata = {'name': filename, 'parents': [folder_id]}
m = MediaFileUpload(f'/Users/rikenshah/Desktop/Fun/insta-model/{filename}', mimetype='image/png')
# upload the image data
media = drive_service.files().create(body=file_metadata, media_body=m, fields='id',
media_mime_type="image/png").execute()
# get the file ID and public URL of the uploaded image
file_id = media.get('id')
return f"https://drive.google.com/uc?export=view&id={file_id}"
except Exception as e:
print(f'An error occurred: {e}')
raise
def generate_caption(background):
prompt = f"generate a instagram caption for this prompt 'a beautiful woman at a {background} background.' it should be creative, " \
f"cute and funny. Feel Good. Use Emojis. In first person. Also add relevant hashtags."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
]
)
return completion.choices[0].message["content"].replace('"', "").strip()
def generate_story_caption(background):
prompt = f"generate a instagram story caption for the scene of {background} it should be creative, cute and funny. Feel Good. Use Emojis. In first person. Also add relevant hashtags. keep it only to few words"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
]
)
return completion.choices[0].message["content"].replace('"', "").strip()
def give_random_location_to_travel():
res = requests.get("https://www.randomlists.com/data/world-cities-3.json")
countries = json.loads(res.text)["RandL"]["items"]
country = random.choice(countries)
return country["name"] + ", " + country["detail"]
def generate_multiple_prompts(location):
prompt = f"""Fashion and lifestyle influencer traveling to {location}, give me 4 list of places to go wearing different
stylish clothes to wear, describe it in details. describe background in details. as a prompt you give to stable
diffusion, describe the background, scene in as much details as you can, use the following format "Place Name: ...
Description: ..." """
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.9,
messages=[
{"role": "system",
"content": "You are instagram influencer and prompt engineer. Respond in third person in "
"the list which was asked, nothing else."},
{"role": "user", "content": prompt},
]
)
text = completion.choices[0].message["content"]
final_prompts = []
starting_prompt = "a beautiful and cute aashvi-500, single girl,"
ending_prompt = "long haircut, light skin, (high detailed skin:1.3), 8k UHD DSLR, bokeh effect, soft lighting, " \
"high quality"
index = 0
sub_prompt = []
print(text)
location = ""
for prompt in text.split("\n"):
try:
if prompt == "":
continue
is_place_name = False
if not ("Place Name" in prompt or "Description" in prompt):
continue
is_place_name = "Place Name" in prompt
# prompt = prompt[prompt.find(".") + 1:].strip()
_, prompt = prompt.split(":")
print(prompt)
if is_place_name:
index += 1
location = prompt
continue
print(is_place_name, index)
prompt = f"{starting_prompt} at {location}, {prompt}, {ending_prompt}"
final_prompts.append([location, prompt])
index += 1
if index >= 1:
pass
# index = 0
# print(is_place_name, index, sub_prompt)
# final_prompts.append(sub_prompt)
# sub_prompt = []
except Exception as e:
print(e)
continue
return final_prompts
def generate_multiple_story_prompts(location):
prompt = f"""Give me prompts describing the beauty of {location}. Doing different activity, Be very descriptive for background."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.71,
max_tokens=400,
messages=[
{"role": "system", "content": "You are instagram influencer and prompt engineer"},
{"role": "user", "content": prompt},
]
)
text = completion.choices[0].message["content"]
final_prompts = []
for prompt in text.split("\n"):
if prompt == "":
continue
prompt = prompt[prompt.find(".") + 1:].strip()
final_prompts.append(prompt)
return final_prompts
# def generate_more_locations(how_many):
# prompt = f"""this is my prompt "a portrait photo of beautiful Aashvi-500,standing in a modern art museum"
# Just respond with the imaginary scene location, random scene for instagram in the list format. no explanation just give me {how_many} of this."""
#
# completion = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# temperature=0.3,
# frequency_penalty=0.8,
# messages=[
# {"role": "user", "content": prompt},
# ]
# )
# points = completion.choices[0].message["content"].replace('"', "").replace("'", "")
#
# # read the above response and generate a list of prompts
# clean_points = []
# for point in points.split("\n"):
# clean_points.append(point.replace("\n", "")[point.find(".") + 1:].strip())
#
# return clean_points
# def generate_more_rows(last_index, how_many=50):
# negative_prompt = """(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"""
# locations = generate_more_locations(how_many)
# rows = []
#
# for location in locations:
# positive_prompt = f"""a portrait photo of beautiful aashvi-500,{location}, single person, smiling, white skin, cloudy eyes, thick long haircut, light skin, (high detailed skin:1.3), 8k UHD DSLR, bokeh effect, soft lighting, high quality"""
# print(f"A{last_index + 2}:D{last_index + 2}")
# rows.append({
# "range": f"A{last_index + 2}:D{last_index + 2}",
# "values": [[last_index, positive_prompt, negative_prompt, location, ]]
# })
# last_index += 1
#
# # add more rows at the end the sheet
# sheet.batch_update(rows, value_input_option="USER_ENTERED")
def check_if_automatic1111_is_active():
global AUTOMATIC1111_URL
with open("/Users/rikenshah/Desktop/Fun/insta-model/automatic1111_url.txt", "r") as f:
AUTOMATIC1111_URL = f.read().strip()
if AUTOMATIC1111_URL != "":
print("URL:", AUTOMATIC1111_URL + "sdapi/v1/memory")
response = requests.get(AUTOMATIC1111_URL + "sdapi/v1/memory")
if response.status_code == 200:
print("Automatic1111 is active")
return AUTOMATIC1111_URL
print("Automatic1111 is not active")
return False
def close_automatic1111():
global AUTOMATIC1111_URL, driver
if driver is not None:
driver.quit()
driver = None
AUTOMATIC1111_URL = ""
def get_driver():
global driver
options = uc.ChromeOptions()
# uc.TARGET_VERSION = 120
# or specify your own chromedriver binary (why you would need this, i don't know)
# uc.install(
# executable_path='/Users/rikenshah/Desktop/Fun/insta-model/chromedriver.sh',
# )
driver = uc.Chrome(
options=options, user_data_dir="/Users/rikenshah/Desktop/Fun/insta-model/profile-2",
)
return driver
def connect_to_automatic1111_api(user=0):
global driver, AUTOMATIC1111_URL, options
def get_automatic1111_url():
global AUTOMATIC1111_URL, options
retry = 15
while retry != 0:
try:
found = False
a = None
for a in driver.find_elements(By.XPATH, "//colab-static-output-renderer/div[1]/div/pre/a"):
print(a.get_attribute("href"))
if a.get_attribute(
"href").endswith("trycloudflare.com/") or a.get_attribute("href").endswith(
"ngrok-free.app") or a.get_attribute("href").endswith("ngrok-free.app/"):
found = True
break
if not found:
raise NoSuchElementException("Couldn't find the link")
AUTOMATIC1111_URL = a.get_attribute("href")
print("Found the link " + AUTOMATIC1111_URL)
break
except NoSuchElementException:
print("Couldn't found the link, retrying in 10 secs...")
pass
except Exception as e:
print(f"An error occurred: {e}")
sleep(10)
retry -= 1
if check_if_automatic1111_is_active():
return
AUTOMATIC1111_URL = ""
if driver is not None:
driver.quit()
# driver.close()
options = uc.ChromeOptions()
driver = get_driver()
driver.get(f"https://colab.research.google.com/drive/1Ezb1humDZNX35w0YJHWbk7E7z-qA9WrX?authuser={user}")
sleep(30)
# Send CMD + M + . to open the console
try:
driver.find_element(By.CSS_SELECTOR, ".cell.running")
print("Runtime is already running...")
AUTOMATIC1111_URL = ""
get_automatic1111_url()
if check_if_automatic1111_is_active():
return
action = ActionChains(driver).key_down(Keys.COMMAND).send_keys("m").send_keys(".").key_up(Keys.COMMAND)
action.perform()
sleep(10)
try:
driver.find_element(By.XPATH, "//mwc-button[text()='Yes']").click()
sleep(10)
print("Stopping the runtime...")
except NoSuchElementException:
pass
except NoSuchElementException:
pass
# Send CMD + F9 to open the console
driver.find_element(By.TAG_NAME, "body").click()
sleep(4)
driver.find_element(By.TAG_NAME, "body").send_keys(Keys.COMMAND + Keys.F9)
print("Waiting for the runtime to start...")
sleep(5)
try:
driver.find_element(By.XPATH, "//mwc-button[text()='Run anyway']").click()
sleep(10)
except NoSuchElementException:
pass
print("Running Automatic1111...")
sleep(20)
try:
driver.find_element(By.XPATH, "//mwc-button[text()='Connect without GPU']")
close_automatic1111()
connect_to_automatic1111_api(user + 1)
return
except NoSuchElementException:
pass
sleep(80)
get_automatic1111_url()
if not check_if_automatic1111_is_active():
print("Couldn't find the link, please try again")
driver.quit()
driver = None
exit(1)
return None
def get_payload(type, prompt, seed, negative_prompt):
payload = {}
if type == "posts":
payload = {
"prompt": prompt,
# "enable_hr": True,
# "hr_resize_x": 800,
# "hr_resize_y": 800,
# "hr_upscaler": "R-ESRGAN 4x+",
# "denoising_strength": 0.8,
# "hr_second_pass_steps": 20,
"seed": seed,
"sampler_name": "DPM++ 2M",
"batch_size": 1,
"n_iter": 1,
"steps": 120,
"cfg_scale": 3.5,
"width": 512,
"height": 512,
"restore_faces": True,
"negative_prompt": negative_prompt,
"send_images": True,
"save_images": False,
}
elif type == "story":
payload = {
"prompt": prompt,
"enable_hr": False,
# "hr_resize_x": 1080,
# "hr_resize_y": 1080,
# "hr_upscaler": "R-ESRGAN 4x+",
# "denoising_strength": 0.8,
# "hr_second_pass_steps": 20,
"seed": seed,
"sampler_name": "DPM++ 2M Karras",
"batch_size": 1,
"n_iter": 1,
"steps": 100,
"cfg_scale": 7,
"width": 720,
"height": 1080,
"restore_faces": True,
"negative_prompt": negative_prompt,
"send_images": True,
"save_images": False,
}
return payload
def generate_posts_and_caption(sheet):
content = sheet.get_all_records(value_render_option="FORMULA")
image_cell = sheet.find('image')
caption_cell = sheet.find('caption')
generated_on = sheet.find('generated_on')
hyperlink = sheet.find('hyperlink_image')
negative_prompt = """(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing,
anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate,
morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed,
blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions,
malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck """
for row in content:
try:
if row['prompt'] != '':
if row['image'] == '':
connect_to_automatic1111_api()
payload = get_payload(row['type'], row['prompt'], row['seed'] if row["seed"] != "" else -1,
row['negative_prompt'] if 'negative_prompt' in row else negative_prompt)
headers = {'Content-Type': 'application/json'}
print(f"Running for {row['prompt']} and location {row['location']}")
response = requests.post(f'{AUTOMATIC1111_URL}sdapi/v1/txt2img', headers=headers,
data=json.dumps(payload), timeout=900)
if response.status_code != 200:
print("Failed to generate image with following error", response.json())
close_automatic1111()
exit()
# decode the image data and upload it to the sheet
img_data = response.json()['images'][0]
print("Image generated successfully")
# img_data = ""
index = row["index"] + 1
# upload image to Google Drive
image_url = upload_image_to_drive(f"{index}-aashvi.png", img_data,
'1rEysVX6M0vEZFYGbdDVc96G4ZBXYhDDs')
sheet.update_cell(image_cell.row + index, image_cell.col, f'=IMAGE("{image_url}", 4, 120, 120)')
# sheet.update_cell(image_cell.row + index, image_cell.col, image_url)
sheet.update_cell(generated_on.row + index, generated_on.col,
datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
# image hyperlink
sheet.update_cell(hyperlink.row + index, hyperlink.col, f'=HYPERLINK("{image_url}", "Link")')
if row['caption'] == '' and row['type'] == 'posts':
# generate the caption
try:
if row['type'] == "posts":
caption = generate_caption(row['location'])
else:
caption = generate_story_caption(row['prompt'])
sheet.update_cell(caption_cell.row + int(row["index"]) + 1, caption_cell.col, caption)
except Exception as e:
print(e)
continue
except Exception as e:
print("something went wrong", e)
send_messages(f"Something went wrong while generating image {e}")
continue
close_automatic1111()
def get_last_index():
if len(gsheet.get_all_values()) == 0:
return -1
return len(gsheet.get_all_records())
def generate_random_seed():
return -1 # random.randint(0, 300000)
columns = ["index", "type", "prompt", "location", "group_id", "seed", "image", "generated_on", "caption", "approved",
"posted_on_instagram", "hyperlink_image"]
def non_posted_instagram_posts():
values = gsheet.get_all_records(value_render_option="FORMULA")
group_ids = set()
for row in values:
if row["type"] == "posts" and row["posted_on_instagram"] == "":
print(row["group_id"], row["index"])
group_ids.add(row["group_id"])
return len(group_ids)
def approved_non_posted_instagram_posts():
values = gsheet.get_all_records(value_render_option="FORMULA")
group_ids = set()
for row in values:
if row["type"] == "posts" and row["posted_on_instagram"] == "" and row["approved"] == "" and row["image"] != "":
group_ids.add(row["group_id"])
return len(group_ids)
def non_posted_story():
values = gsheet.get_all_records(value_render_option="FORMULA")
total = 0
for row in values:
if row["type"] == "story" and row["posted_on_instagram"] == "":
total += 1
return total
def approved_non_posted_story():
values = gsheet.get_all_records(value_render_option="FORMULA")
total = 0
for row in values:
if row["type"] == "story" and row["posted_on_instagram"] == "" and row["image"] != "" and row["approved"] == "":
total += 1
return total
def v1_generate_prompts():
# if non_posted_instagram_posts() >= 5:
# print("There are more than 5 non posted instagram posts. Please post them first")
# return
last_index = get_last_index()
if last_index == -1:
last_index = 0
gsheet.insert_row(columns)
values = []
location = get_location()
prompts = generate_multiple_prompts(location)
group_id = generate_uuid()
offset = last_index
seed = generate_random_seed()
first_caption = False
caption = ""
for prompt in prompts:
values.append(
[offset, "posts", prompt[1], prompt[0] + "," + location, group_id, seed, "", "", caption, "", "", ""])
offset += 1
if not first_caption:
caption = "-"
first_caption = True
gsheet.insert_rows(values, row=last_index + 2)
def get_location():
with open("/Users/rikenshah/Desktop/Fun/insta-model/location.txt", "r") as f:
return f.read().strip()
def new_location(location):
with open("/Users/rikenshah/Desktop/Fun/insta-model/location.txt", "w") as f:
f.write(location)
def v1_generate_story_idea():
# if non_posted_story() >= 5:
# print("There are more than 5 non posted story. Please post them first")
# return
last_index = get_last_index()
if last_index == -1:
last_index = 0
gsheet.insert_row(columns)
values = []
prompts = generate_multiple_story_prompts(get_location())
offset = last_index
for prompt in prompts:
values.append([offset, "story", prompt, "", "", -1, "", "", "", "y", "", ""])
offset += 1
gsheet.insert_rows(values[:2], row=last_index + 2)
if __name__ == "__main__":
# connect_to_automatic1111_api()
# print(approved_non_posted_instagram_posts())
# v1_generate_prompts()
# v1_generate_story_idea()
is_running_path = "/Users/rikenshah/Desktop/Fun/insta-model/is_running.txt"
with open(is_running_path, "r") as f:
is_running = f.read().strip()
if is_running == "yep":
print("Already running")
exit()
send_messages("Running image and caption generation...")
with open(is_running_path, "w") as f:
f.write("yep")
generate_posts_and_caption(gsheet)
send_messages("Stopping image and caption generation")
with open(is_running_path, "w") as f:
f.write("nope")
| [
"generate a instagram caption for this prompt 'a beautiful woman at a PLACEHOLDER background.' it should be creative, cute and funny. Feel Good. Use Emojis. In first person. Also add relevant hashtags.",
"You are instagram influencer and prompt engineer. Respond in third person in the list which was asked, nothing else.",
"a beautiful and cute aashvi-500, single girl,",
"You are instagram influencer and prompt engineer",
"generate a instagram story caption for the scene of PLACEHOLDER it should be creative, cute and funny. Feel Good. Use Emojis. In first person. Also add relevant hashtags. keep it only to few words",
"Give me prompts describing the beauty of PLACEHOLDER. Doing different activity, Be very descriptive for background.",
"(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, \n anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, \n morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, \n blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, \n malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck ",
"PLACEHOLDER at PLACEHOLDER, PLACEHOLDER, PLACEHOLDER",
"Fashion and lifestyle influencer traveling to PLACEHOLDER, give me 4 list of places to go wearing different \n stylish clothes to wear, describe it in details. describe background in details. as a prompt you give to stable \n diffusion, describe the background, scene in as much details as you can, use the following format \"Place Name: ... \n Description: ...\" ",
"[]",
"long haircut, light skin, (high detailed skin:1.3), 8k UHD DSLR, bokeh effect, soft lighting, high quality"
] |
2024-01-10 | ankit071105/docsgpt | scripts~ingest_rst.py | from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import dotenv
import os
dotenv.load_dotenv()
# Here we load in the data in the format that Notion exports it in.
ps = list(Path("pandasdocs/").glob("**/*.rst"))
# parse all child directories
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n")
docs = []
metadatas = []
for i, d in enumerate(data):
splits = text_splitter.split_text(d)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Here we create a vector store from the documents and save it to disk.
store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas)
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
| [] |
2024-01-10 | muellerberndt/mini-agi | miniagi.py | """
This module provides the `MiniAGI` class, an implementation of an autonomous agent which interacts
with a user and performs tasks, with support for real-time monitoring of its actions, criticisms on
its performance, and retaining memory of actions.
"""
# pylint: disable=invalid-name, too-many-arguments, too-many-instance-attributes, unspecified-encoding
import os
import sys
import re
import platform
import urllib
from pathlib import Path
from urllib.request import urlopen
from dotenv import load_dotenv
from termcolor import colored
import openai
from thinkgpt.llm import ThinkGPT
import tiktoken
from bs4 import BeautifulSoup
from spinner import Spinner
from commands import Commands
from exceptions import InvalidLLMResponseError
operating_system = platform.platform()
PROMPT = f"You are an autonomous agent running on {operating_system}." + '''
OBJECTIVE: {objective} (e.g. "Find a recipe for chocolate chip cookies")
You are working towards the objective on a step-by-step basis. Previous steps:
{context}
Your task is to respond with the next action.
Supported commands are:
command | argument
-----------------------
memorize_thoughts | internal debate, refinement, planning
execute_python | python code (multiline)
execute_shell | shell command (non-interactive, single line)
ingest_data | input file or URL
process_data | prompt|input file or URL
web_search | keywords
talk_to_user | what to say
done | none
The mandatory action format is:
<r>[YOUR_REASONING]</r><c>[COMMAND]</c>
[ARGUMENT]
ingest_data and process_data cannot process multiple file/url arguments. Specify 1 at a time.
Use process_data to process large amounts of data with a larger context window.
Python code run with execute_python must end with an output "print" statement.
Do not search the web for information that GPT3/GPT4 already knows.
Use memorize_thoughts to organize your thoughts.
memorize_thoughts argument must not be empty!
Send the "done" command if the objective was achieved.
RESPOND WITH EXACTLY ONE THOUGHT/COMMAND/ARG COMBINATION.
DO NOT CHAIN MULTIPLE COMMANDS.
NO EXTRA TEXT BEFORE OR AFTER THE COMMAND.
DO NOT REPEAT PREVIOUSLY EXECUTED COMMANDS.
Each action returns an observation. Important: Observations may be summarized to fit into your limited memory.
Example actions:
<r>Think about skills and interests that could be turned into an online job.</r><c>memorize_thoughts</c>
I have experience in data entry and analysis, as well as social media management.
(...)
<r>Search for websites with chocolate chip cookies recipe.</r><c>web_search</c>
chocolate chip cookies recipe
<r>Ingest information about chocolate chip cookies.</r><c>ingest_data</c>
https://example.com/chocolate-chip-cookies
<r>Read the local file /etc/hosts.</r><c>ingest_data</c>
/etc/hosts
<r>Extract information about chocolate chip cookies.</r><c>process_data</c>
Extract the chocolate cookie recipe|https://example.com/chocolate-chip-cookies
<r>Summarize this Stackoverflow article.</r><c>process_data</c>
Summarize the content of this article|https://stackoverflow.com/questions/1234/how-to-improve-my-chatgpt-prompts
<r>Review this code for security issues.</r><c>process_data</c>
Review this code for security vulnerabilities|/path/to/code.sol
<r>I need to ask the user for guidance.</r><c>talk_to_user</c>
What is the URL of a website with chocolate chip cookies recipes?
<r>Write 'Hello, world!' to file</r><c>execute_python</c>
with open('hello_world.txt', 'w') as f:
f.write('Hello, world!')
<r>The objective is complete.</r><c>done</c>
'''
CRITIC_PROMPT = '''
You are a critic reviewing the actions of an autonomous agent.
Evaluate the agent's performance. It should:
- Make real-world progress towards the objective
- Take action instead of endlessly talking to itself
- Not perform redundant or unnecessary actions
- Not attempt actions that cannot work (e.g. watching a video)
- Not keep repeating the same command
- Communicate results to the user
Make concise suggestions for improvements.
Provide recommended next steps.
Keep your response as short as possible.
EXAMPLE:
Criticism: You have been pretending to order pizza but have not actually
taken any real-world action. You should course-correct.
Recommended next steps:
1. Request an Uber API access token from the user.
2. Use the Uber API to order pizza.
AGENT OBJECTIVE:
{objective}
AGENT HISTORY:
{context}
'''
RETRIEVAL_PROMPT = "You will be asked to process data from a URL or file. You do not"\
" need to access the URL or file yourself, it will be loaded on your behalf"\
" and included as 'INPUT_DATA'."
OBSERVATION_SUMMARY_HINT = "Summarize the text using short sentences and abbreviations."
HISTORY_SUMMARY_HINT = "You are an autonomous agent summarizing your history."\
"Generate a new summary given the previous summary of your "\
"history and your latest action. Include a list of all previous actions. Keep it short."\
"Use short sentences and abbrevations."
class MiniAGI:
"""
Represents an autonomous agent.
Attributes:
agent: An instance of `ThinkGPT`, used to generate the agent's actions.
summarizer: An instance of `ThinkGPT`, used to generate summaries of the agent's history.
objective (str): The objective the agent is working towards.
max_context_size (int): The maximum size of the agent's short-term memory (in tokens).
max_memory_item_size (int): The maximum size of a memory item (in tokens).
debug (bool): Indicates whether to print debug information.
summarized_history (str): The summarized history of the agent's actions.
criticism (str): The criticism of the agent's last action.
thought (str): The reasoning behind the agent's last action.
proposed_command (str): The command proposed by the agent to be executed next.
proposed_arg (str): The argument of the proposed command.
encoding: The tokenizer's encoding of the agent model's vocabulary.
"""
def __init__(
self,
agent_model: str,
summarizer_model: str,
objective: str,
max_context_size: int,
max_memory_item_size: int,
debug: bool = False
):
"""
Constructs a `MiniAGI` instance.
Args:
agent_model (str): The name of the model to be used as the agent.
summarizer_model (str): The name of the model to be used for summarization.
objective (str): The objective for the agent.
max_context_size (int): The maximum context size in tokens for the agent's memory.
max_memory_item_size (int): The maximum size of a memory item in tokens.
debug (bool, optional): A flag to indicate whether to print debug information.
"""
self.agent = ThinkGPT(
model_name=agent_model,
request_timeout=600,
verbose=False
)
self.summarizer = ThinkGPT(
model_name=summarizer_model,
request_timeout=600,
verbose=False
)
self.objective = objective
self.max_context_size = max_context_size
self.max_memory_item_size = max_memory_item_size
self.debug = debug
self.summarized_history = ""
self.criticism = ""
self.thought = ""
self.proposed_command = ""
self.proposed_arg = ""
self.encoding = tiktoken.encoding_for_model(self.agent.model_name)
def __update_memory(
self,
action: str,
observation: str,
update_summary: bool = True
):
"""
Updates the agent's memory with the last action performed and its observation.
Optionally, updates the summary of agent's history as well.
Args:
action (str): The action performed by the ThinkGPT instance.
observation (str): The observation made by the ThinkGPT
instance after performing the action.
summary (str): The current summary of the agent's history.
update_summary (bool, optional): Determines whether to update the summary.
"""
if len(self.encoding.encode(observation)) > self.max_memory_item_size:
observation = self.summarizer.chunked_summarize(
observation, self.max_memory_item_size,
instruction_hint=OBSERVATION_SUMMARY_HINT
)
if "memorize_thoughts" in action:
new_memory = f"ACTION:\nmemorize_thoughts\nTHOUGHTS:\n{observation}\n"
else:
new_memory = f"ACTION:\n{action}\nRESULT:\n{observation}\n"
if update_summary:
self.summarized_history = self.summarizer.summarize(
f"Current summary:\n{self.summarized_history}\nAdd to summary:\n{new_memory}",
self.max_memory_item_size,
instruction_hint=HISTORY_SUMMARY_HINT
)
self.agent.memorize(new_memory)
def __get_context(self) -> str:
"""
Retrieves the context for the agent to think and act upon.
Returns:
str: The agent's context.
"""
summary_len = len(self.encoding.encode(self.summarized_history))
if len(self.criticism) > 0:
criticism_len = len(self.encoding.encode(self.criticism))
else:
criticism_len = 0
action_buffer = "\n".join(
self.agent.remember(
limit=32,
sort_by_order=True,
max_tokens=self.max_context_size - summary_len - criticism_len
)
)
return f"SUMMARY\n{self.summarized_history}\nPREV ACTIONS:"\
f"\n{action_buffer}\n{self.criticism}"
def criticize(self) -> str:
"""
Criticizes the agent's actions.
Returns:
str: The criticism.
"""
context = self.__get_context()
self.criticism = self.agent.predict(
prompt=CRITIC_PROMPT.format(context=context, objective=self.objective)
)
return self.criticism
def think(self):
"""
Uses the `ThinkGPT` model to predict the next action the agent should take.
"""
context = self.__get_context()
if self.debug:
print(context)
response_text = self.agent.predict(
prompt=PROMPT.format(context=context, objective=self.objective)
)
if self.debug:
print(f"RAW RESPONSE:\n{response_text}")
PATTERN = r'^<r>(.*?)</r><c>(.*?)</c>\n*(.*)$'
try:
match = re.search(PATTERN, response_text, flags=re.DOTALL | re.MULTILINE)
_thought = match[1]
_command = match[2]
_arg = match[3]
except Exception as exc:
raise InvalidLLMResponseError from exc
# Remove unwanted code formatting backticks
_arg = _arg.replace("```", "")
self.thought = _thought
self.proposed_command = _command
self.proposed_arg = _arg
def read_mind(self) -> tuple:
"""
Retrieves the agent's last thought, proposed command, and argument.
Returns:
tuple: A tuple containing the agent's thought, proposed command, and argument.
"""
_arg = self.proposed_arg.replace("\n", "\\n") if len(self.proposed_arg) < 64\
else f"{self.proposed_arg[:64]}...".replace("\n", "\\n")
return (
self.thought,
self.proposed_command,
_arg
)
@staticmethod
def __get_url_or_file(_arg: str) -> str:
"""
Retrieve contents from an URL or file.
Args:
arg (str): URL or filename
Returns:
str: Observation: The contents of the URL or file.
"""
if arg.startswith("http://") or arg.startswith("https://"):
with urlopen(_arg) as response:
html = response.read()
data = BeautifulSoup(
html,
features="lxml"
).get_text()
else:
with open(_arg, "r") as file:
data = file.read()
return data
def __process_data(self, _arg: str) -> str:
"""
Processes data from a URL or file.
Args:
arg (str): The prompt and URL / filename, separated by |
Returns:
str: Observation: The result of processing the URL or file.
"""
args = _arg.split("|")
if len(args) == 1:
return "Invalid command. The correct format is: prompt|file or url"
if len(args) > 2:
return "Cannot process multiple input files or URLs. Process one at a time."
(prompt, __arg) = args
try:
input_data = self.__get_url_or_file(__arg)
except urllib.error.URLError as e:
return f"Error: {str(e)}"
except OSError as e:
return f"Error: {str(e)}"
if len(self.encoding.encode(input_data)) > self.max_context_size:
input_data = self.summarizer.chunked_summarize(
input_data, self.max_context_size,
instruction_hint=OBSERVATION_SUMMARY_HINT
)
return self.agent.predict(
prompt=f"{RETRIEVAL_PROMPT}\n{prompt}\nINPUT DATA:\n{input_data}"
)
def __ingest_data(self, _arg:str) -> str:
"""
Processes data from a URL or file.
Args:
arg (str): The file or URL to read
Returns:
str: Observation: The contents of the URL or file.
"""
try:
data = self.__get_url_or_file(_arg)
except urllib.error.URLError as e:
return f"Error: {str(e)}"
except OSError as e:
return f"Error: {str(e)}"
if len(self.encoding.encode(data)) > self.max_memory_item_size:
data = self.summarizer.chunked_summarize(
data, self.max_memory_item_size,
instruction_hint=OBSERVATION_SUMMARY_HINT
)
return data
def act(self):
"""
Executes the command proposed by the agent and updates the agent's memory.
"""
if command == "process_data":
obs = self.__process_data(self.proposed_arg)
elif command == "ingest_data":
obs = self.__ingest_data(self.proposed_arg)
else:
obs = Commands.execute_command(self.proposed_command, self.proposed_arg)
self.__update_memory(f"{self.proposed_command}\n{self.proposed_arg}", obs)
self.criticism = ""
def user_response(self, response):
"""
Updates the agent's memory with the user's response to its last action.
Args:
response (str): The user's response to the agent's last action.
"""
self.__update_memory(f"{self.proposed_command}\n{self.proposed_arg}", response)
self.criticism = ""
def get_bool_env(env_var: str) -> bool:
'''
Gets the value of a boolean environment variable.
Args:
env_var (str): Name of the variable
'''
return os.getenv(env_var) in ['true', '1', 't', 'y', 'yes']
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == "__main__":
PROMPT_USER = get_bool_env("PROMPT_USER")
ENABLE_CRITIC = get_bool_env("ENABLE_CRITIC")
if len(sys.argv) != 2:
print("Usage: miniagi.py <objective>")
sys.exit(0)
work_dir = os.getenv("WORK_DIR")
if work_dir is None or not work_dir:
work_dir = os.path.join(Path.home(), "miniagi")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
print(f"Working directory is {work_dir}")
try:
os.chdir(work_dir)
except FileNotFoundError:
print("Directory doesn't exist. Set WORK_DIR to an existing directory or leave it blank.")
sys.exit(0)
miniagi = MiniAGI(
os.getenv("MODEL"),
os.getenv("SUMMARIZER_MODEL"),
sys.argv[1],
int(os.getenv("MAX_CONTEXT_SIZE")),
int(os.getenv("MAX_MEMORY_ITEM_SIZE")),
get_bool_env("DEBUG")
)
while True:
try:
with Spinner():
miniagi.think()
except InvalidLLMResponseError:
print(colored("Invalid LLM response, retrying...", "red"))
continue
(thought, command, arg) = miniagi.read_mind()
print(colored(f"MiniAGI: {thought}\nCmd: {command}, Arg: {arg}", "cyan"))
if command == "done":
sys.exit(0)
if command == "talk_to_user":
print(colored(f"MiniAGI: {miniagi.proposed_arg}", 'blue'))
user_input = input('Your response: ')
with Spinner():
miniagi.user_response(user_input)
continue
if command == "memorize_thoughts":
print(colored("MiniAGI is thinking:\n"\
f"{miniagi.proposed_arg}", 'cyan'))
elif PROMPT_USER:
user_input = input('Press enter to continue or abort this action by typing feedback: ')
if len(user_input) > 0:
with Spinner():
miniagi.user_response(user_input)
continue
with Spinner():
miniagi.act()
if ENABLE_CRITIC:
with Spinner():
criticism = miniagi.criticize()
print(colored(criticism, "light_magenta"))
| [
"You will be asked to process data from a URL or file. You do not need to access the URL or file yourself, it will be loaded on your behalf and included as 'INPUT_DATA'.",
"PROMPT_USER",
"You are an autonomous agent running on PLACEHOLDER.\nOBJECTIVE: {objective} (e.g. \"Find a recipe for chocolate chip cookies\")\n\nYou are working towards the objective on a step-by-step basis. Previous steps:\n\n{context}\n\nYour task is to respond with the next action.\nSupported commands are: \n\ncommand | argument\n-----------------------\nmemorize_thoughts | internal debate, refinement, planning\nexecute_python | python code (multiline)\nexecute_shell | shell command (non-interactive, single line)\ningest_data | input file or URL\nprocess_data | prompt|input file or URL\nweb_search | keywords\ntalk_to_user | what to say\ndone | none\n\nThe mandatory action format is:\n\n<r>[YOUR_REASONING]</r><c>[COMMAND]</c>\n[ARGUMENT]\n\ningest_data and process_data cannot process multiple file/url arguments. Specify 1 at a time.\nUse process_data to process large amounts of data with a larger context window.\nPython code run with execute_python must end with an output \"print\" statement.\nDo not search the web for information that GPT3/GPT4 already knows.\nUse memorize_thoughts to organize your thoughts.\nmemorize_thoughts argument must not be empty!\nSend the \"done\" command if the objective was achieved.\nRESPOND WITH EXACTLY ONE THOUGHT/COMMAND/ARG COMBINATION.\nDO NOT CHAIN MULTIPLE COMMANDS.\nNO EXTRA TEXT BEFORE OR AFTER THE COMMAND.\nDO NOT REPEAT PREVIOUSLY EXECUTED COMMANDS.\n\nEach action returns an observation. Important: Observations may be summarized to fit into your limited memory.\n\nExample actions:\n\n<r>Think about skills and interests that could be turned into an online job.</r><c>memorize_thoughts</c>\nI have experience in data entry and analysis, as well as social media management.\n(...)\n\n<r>Search for websites with chocolate chip cookies recipe.</r><c>web_search</c>\nchocolate chip cookies recipe\n\n<r>Ingest information about chocolate chip cookies.</r><c>ingest_data</c>\nhttps://example.com/chocolate-chip-cookies\n\n<r>Read the local file /etc/hosts.</r><c>ingest_data</c>\n/etc/hosts\n\n<r>Extract information about chocolate chip cookies.</r><c>process_data</c>\nExtract the chocolate cookie recipe|https://example.com/chocolate-chip-cookies\n\n<r>Summarize this Stackoverflow article.</r><c>process_data</c>\nSummarize the content of this article|https://stackoverflow.com/questions/1234/how-to-improve-my-chatgpt-prompts\n\n<r>Review this code for security issues.</r><c>process_data</c>\nReview this code for security vulnerabilities|/path/to/code.sol\n\n<r>I need to ask the user for guidance.</r><c>talk_to_user</c>\nWhat is the URL of a website with chocolate chip cookies recipes?\n\n<r>Write 'Hello, world!' to file</r><c>execute_python</c>\nwith open('hello_world.txt', 'w') as f:\n f.write('Hello, world!')\n\n<r>The objective is complete.</r><c>done</c>\n",
"\nYou are a critic reviewing the actions of an autonomous agent.\n\nEvaluate the agent's performance. It should:\n- Make real-world progress towards the objective\n- Take action instead of endlessly talking to itself\n- Not perform redundant or unnecessary actions\n- Not attempt actions that cannot work (e.g. watching a video)\n- Not keep repeating the same command\n- Communicate results to the user\n\nMake concise suggestions for improvements.\nProvide recommended next steps.\nKeep your response as short as possible.\n\nEXAMPLE:\n\nCriticism: You have been pretending to order pizza but have not actually\ntaken any real-world action. You should course-correct.\n\nRecommended next steps:\n\n1. Request an Uber API access token from the user.\n2. Use the Uber API to order pizza.\n\nAGENT OBJECTIVE:\n\n{objective}\n\nAGENT HISTORY:\n\n{context}\n\n"
] |
2024-01-10 | yuzie007/pymatgen | pymatgen~analysis~interface.py | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
import warnings
from pymatgen.analysis.interfaces import CoherentInterfaceBuilder # noqa: F401
from pymatgen.core.interface import Interface # noqa: F401
__author__ = "Eric Sivonxay, Shyam Dwaraknath, and Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "[email protected]"
__date__ = "5/29/2019"
__status__ = "Prototype"
warnings.warn(
"The substrate_analyzer module is being moved to the interfaces submodule in analysis."
" These imports will break in Pymatgen 2023",
category=FutureWarning,
stacklevel=2,
)
| [] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 4.company-research~app~backend~company_research~company.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import openai
import json
import pandas as pd
import numpy as np
import inspect
from redis import StrictRedis
from redis.commands.search.query import Query
class CompanyResearch():
def __init__(self, embedding_deployment: str, chat_deployment: str, completion_deployment: str, redis_client: StrictRedis, redis_index_name_common: str, redis_index_name_topics: str):
self.embedding_deployment = embedding_deployment
self.chat_deployment = chat_deployment
self.completion_deployment = completion_deployment
self.redis_client = redis_client
self.redis_index_name_common = redis_index_name_common
self.redis_index_name_topics = redis_index_name_topics
# query company cache to RediSearch
def search_embedded_company(self, redis_index_name, q, n):
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
# query caches with cosine similarity
base_query = f'*=>[KNN {n} @embeddings $vec_param AS vector_score]'
# get fields
query = Query(base_query)\
.sort_by("vector_score")\
.paging(0, n)\
.return_fields("name", "data", "tokens", "vector_score")\
.dialect(2)
# Get the embedding from RediSearch with company name and text
embedding = self.get_gpt_embedding(q)
query_params = {"vec_param": np.array(embedding).astype(np.float32).tobytes()}
redis_ret = self.redis_client.ft(redis_index_name).search(query, query_params=query_params)
# Convert to Pandas Dataframe
df_ret = pd.DataFrame(list(map(lambda x: {'id' : x.id, 'name' : x.name, 'data': x.data, 'tokens': x.tokens, 'vector_score': x.vector_score}, redis_ret.docs)))
return df_ret
def search_embedded_company_common(self, company_name):
return self.search_embedded_company(self.redis_index_name_common, company_name, 1)
def get_gpt_embedding(self, text):
response = openai.Embedding.create(engine=self.embedding_deployment, input=text)
return response["data"][0]["embedding"]
system_prompt_en_us = """
You are an assistant supporting the sales department. Please use the following company information proactively, and if you cannot get an answer, please answer based on the information you know. Please also answer in the same way as previously asked. Do not answer questions other than those related to banking and corporate analysis.
company name: {company_name}
company information: {company_info}, {company_topics}
"""
system_prompt_ja_jp = """
あなたは営業部門をサポートするアシスタントです。次に与える企業情報を積極的に使い回答が得られない場合はあなたが知っている情報から回答してください。以前聞かれたことも同じように答えてください。
企業名: {company_name}
企業情報: {company_info}, {company_topics}
"""
def get_chat_system_prompt(self, locale, company_name, company_info, company_topics_text):
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
if locale == "en-us":
prompt_template = self.system_prompt_en_us
else:
prompt_template = self.system_prompt_ja_jp
system_prompt = prompt_template.format(company_name=company_name, company_info=company_info, company_topics=company_topics_text)
return { "role": "system", "content" : system_prompt }
def get_company_chat(self, locale, company_name, company_info, messages, n):
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
q = company_name + ":" + messages[-1]["content"]
data_source = []
company_topics_text = self.get_company_topics_text(company_name, q, n, data_source)
messages.insert(0, self.get_chat_system_prompt(locale, company_name, company_info, company_topics_text))
print(f"I'm in function: {inspect.currentframe().f_code.co_name}, before calling ChatCompletion.create()")
result = openai.ChatCompletion.create(
engine=self.chat_deployment,
messages=messages,
temperature=0.9,
max_tokens=1000,
n=1)
print(f"I'm in function: {inspect.currentframe().f_code.co_name}, after calling ChatCompletion.create()")
content = result.choices[0]["message"]["content"]
response = {
"content": content,
"data_source": data_source
}
return response
prompt_template_report_en_us = """
answer the questions based on company information.
company name: {company_name}
question: {question}
company information: {company_info}, {company_topics}
"""
prompt_template_report_ja_jp = """
企業情報に基づいて質問に回答してください。企業情報に記載された内容だけで回答してください。回答以外の文章は返さないでください。
企業名: {company_name}
質問: {question}
企業情報: {company_info}, {company_topics}
"""
def get_company_completion(self, locale, company_name, company_info, question, n):
q = company_name + ":" + question
company_topics_text = self.get_company_topics_text(company_name, q, n)
if locale == "en-us":
prompt_template_report = self.prompt_template_report_en_us
else:
prompt_template_report = self.prompt_template_report_ja_jp
prompt = prompt_template_report.format(company_name=company_name, company_info=company_info, question=question, company_topics=company_topics_text)
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
return openai.Completion.create(
engine=self.completion_deployment,
prompt=prompt,
temperature=0.5,
max_tokens=1000,
n=1)
prompt_template_feedback_en_us = """
The following sentence is generated from company information.
source sentence: {source}
Keep the content of the original sentence and regenerate the sentence reflecting the feedback.
feedback: {feedback}
company name: {company_name}
company information: {company_info}, {company_topics}
"""
prompt_template_feedback_ja_jp = """
次の文章は企業情報から生成されたものです。
文章: {source}
文章の内容は保持してフィードバックを反映した文章を生成し直してください。
フィードバック: {feedback}
企業名: {company_name}
企業情報: {company_info}, {company_topics}
"""
def get_analysis_feedback(self, locale, company_name, company_info, question, source, feedback, n):
q = company_name + ":" + question
company_topics_text = self.get_company_topics_text(company_name, q, n)
if locale == "en-us":
prompt_template_feedback = self.prompt_template_feedback_en_us
else:
prompt_template_feedback = self.prompt_template_feedback_ja_jp
prompt = prompt_template_feedback.format(company_name=company_name, company_info=company_info, source=source, feedback=feedback, company_topics=company_topics_text)
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
return openai.Completion.create(
engine=self.completion_deployment,
prompt=prompt,
temperature=0.5,
max_tokens=1000,
n=1)
def get_company_topics_text(self, company_name, question, n, data_source=None):
print(f"I'm in function: {inspect.currentframe().f_code.co_name}")
q = company_name + ":" + question
print(f"self.redis_index_name_topics={self.redis_index_name_topics}")
df = self.search_embedded_company(self.redis_index_name_topics, q, n)
company_topics_text = ""
for _, row in df.iterrows():
json_data = json.loads(row.data)
company_topics_text += json_data["label"] + " : " + json_data["text"] + ","
if data_source is not None:
data_source.append({
"label": json_data["label"],
"source": json_data["source"]
})
print(f"company_topics_text={company_topics_text}")
return company_topics_text
| [
"\nThe following sentence is generated from company information. \nsource sentence: {source}\n\nKeep the content of the original sentence and regenerate the sentence reflecting the feedback.\nfeedback: {feedback}\ncompany name: {company_name}\ncompany information: {company_info}, {company_topics}\n",
"\n次の文章は企業情報から生成されたものです。\n文章: {source}\n\n文章の内容は保持してフィードバックを反映した文章を生成し直してください。\nフィードバック: {feedback}\n企業名: {company_name}\n企業情報: {company_info}, {company_topics}\n",
"\n企業情報に基づいて質問に回答してください。企業情報に記載された内容だけで回答してください。回答以外の文章は返さないでください。\n企業名: {company_name}\n質問: {question}\n企業情報: {company_info}, {company_topics}\n",
"\nYou are an assistant supporting the sales department. Please use the following company information proactively, and if you cannot get an answer, please answer based on the information you know. Please also answer in the same way as previously asked. Do not answer questions other than those related to banking and corporate analysis.\ncompany name: {company_name}\ncompany information: {company_info}, {company_topics}\n",
"\nanswer the questions based on company information.\ncompany name: {company_name}\nquestion: {question}\ncompany information: {company_info}, {company_topics}\n",
"\nあなたは営業部門をサポートするアシスタントです。次に与える企業情報を積極的に使い回答が得られない場合はあなたが知っている情報から回答してください。以前聞かれたことも同じように答えてください。\n企業名: {company_name}\n企業情報: {company_info}, {company_topics}\n"
] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 2.recipe-adviser~app~backend~food_menu~food_receipe.py | import openai
prompt_template_1 ="""
あなたは家族の料理担当です。
後述の家族のプロフィールに配慮した料理メニューを提案してください。同じ後述の家族の要望も出来る限り取り入れてください。
料理メニューは家族の要望がなければ主菜、副菜、主食、汁物を1品ずつ提案してください。
料理メニューにはそれぞれ消費カロリーをつけてください。
料理メニューの調理に必要な食材をリストアップしてください。
家にある材料をなるべく使い、家に無い材料をリストアップしてください。
家族のプロフィールをどのように配慮して料理メニューを提案したかを記述してください。
家族のプロフィール:${family_profile}
家にある食材:${ingredients_have}
家族の要望:${user_menu_request}
"""
prompt_template_2 ="""
回答は以下のスキーマに沿ったJSONデータを返してください。
・ "menu" キーには料理メニューの一覧をセットしてください。
・ "reason" キーには料理メニューの理由セットしてください。
・ "main_dish" キーには料理メニューのうち主菜の名前をセットしてください。
・ "ingredients_with_amount" キーには料理メニューに必要な材料と分量を、料理ごとにリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。
・ "recipes" キーには料理の名前とレシピをリスト形式でセットしてください。 "menu" キーは料理の名前です。"step" キーはレシピの手順です。
・ "ingredients_not_have" キーには材料のうち家にないものをリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。
必ず上記ルールのJSONデータだけを返してください。JSONデータの前と後に平文はつけないでください。
以下はJSONデータの例です。
{
"menu" : "主菜:鶏の唐揚げ 副菜:ほうれん草のおひたし 主食:ごはん 汁物:みそ汁",
"main_dish" : "鶏の唐揚げ",
"ingredients_with_amount" : "・鶏もも肉:200g・たまご(1個)・小麦粉(適量)・サラダ油(適量)・ほうれん草(1束・ごま油(適量)・ごはん(1合・味噌(大さじ1)・だし汁(カップ1/2)",
"recipes": [
{
"menu" : "鶏の唐揚げ",
"step" : "鶏もも肉を一口大に切り、小麦粉をまぶす。たまごを溶きほぐし、鶏肉にからめる。サラダ油を熱したフライパンで鶏肉を両面こんがりと揚げる。"
},
{
"menu" : "ほうれん草のおひたし",
"step" : "ほうれん草は根元を切り落とし、軽く塩ゆでして水気を絞る。ごま油を熱したフライパンで炒める。"
}
],
"ingredients_not_have": "鶏もも肉・小麦粉・サラダ油・ごま油・ねぎ",
"reason": "鶏もも肉は家にあったので、材料を買いに行く必要がありませんでした。ほうれん草は栄養が豊富なので、副菜にしました。ごはんは主食にしました。みそ汁は汁物にしまし父が好きなので、みそ汁にしました。"
}
"""
def get_food_receipe(completion_deployment, family_profile, ingredients_have, user_menu_request):
ingredients_have_text = ", ".join(ingredients_have)
prompt = prompt_template_1.format(family_profile=family_profile, ingredients_have=ingredients_have_text, user_menu_request=user_menu_request) + prompt_template_2
completion = openai.Completion.create(
engine=completion_deployment,
prompt=prompt,
temperature=0.5,
max_tokens=1000,
n=1)
return completion | [
"prompt_template_18ef2b188-dc27-4ee2-80b8-7739340191d4\n回答は以下のスキーマに沿ったJSONデータを返してください。\n・ \"menu\" キーには料理メニューの一覧をセットしてください。\n・ \"reason\" キーには料理メニューの理由セットしてください。\n・ \"main_dish\" キーには料理メニューのうち主菜の名前をセットしてください。\n・ \"ingredients_with_amount\" キーには料理メニューに必要な材料と分量を、料理ごとにリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n・ \"recipes\" キーには料理の名前とレシピをリスト形式でセットしてください。 \"menu\" キーは料理の名前です。\"step\" キーはレシピの手順です。\n・ \"ingredients_not_have\" キーには材料のうち家にないものをリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n必ず上記ルールのJSONデータだけを返してください。JSONデータの前と後に平文はつけないでください。\n以下はJSONデータの例です。\n{\n \"menu\" : \"主菜:鶏の唐揚げ 副菜:ほうれん草のおひたし 主食:ごはん 汁物:みそ汁\",\n \"main_dish\" : \"鶏の唐揚げ\",\n \"ingredients_with_amount\" : \"・鶏もも肉:200g・たまご(1個)・小麦粉(適量)・サラダ油(適量)・ほうれん草(1束・ごま油(適量)・ごはん(1合・味噌(大さじ1)・だし汁(カップ1/2)\",\n \"recipes\": [\n {\n \"menu\" : \"鶏の唐揚げ\",\n \"step\" : \"鶏もも肉を一口大に切り、小麦粉をまぶす。たまごを溶きほぐし、鶏肉にからめる。サラダ油を熱したフライパンで鶏肉を両面こんがりと揚げる。\"\n },\n {\n \"menu\" : \"ほうれん草のおひたし\",\n \"step\" : \"ほうれん草は根元を切り落とし、軽く塩ゆでして水気を絞る。ごま油を熱したフライパンで炒める。\"\n }\n ],\n \"ingredients_not_have\": \"鶏もも肉・小麦粉・サラダ油・ごま油・ねぎ\",\n \"reason\": \"鶏もも肉は家にあったので、材料を買いに行く必要がありませんでした。ほうれん草は栄養が豊富なので、副菜にしました。ごはんは主食にしました。みそ汁は汁物にしまし父が好きなので、みそ汁にしました。\"\n}\n",
"\nあなたは家族の料理担当です。\n後述の家族のプロフィールに配慮した料理メニューを提案してください。同じ後述の家族の要望も出来る限り取り入れてください。\n料理メニューは家族の要望がなければ主菜、副菜、主食、汁物を1品ずつ提案してください。\n料理メニューにはそれぞれ消費カロリーをつけてください。\n料理メニューの調理に必要な食材をリストアップしてください。\n家にある材料をなるべく使い、家に無い材料をリストアップしてください。\n家族のプロフィールをどのように配慮して料理メニューを提案したかを記述してください。\n\n家族のプロフィール:$PLACEHOLDER\n家にある食材:$PLACEHOLDER\n家族の要望:$PLACEHOLDER\n\n\n回答は以下のスキーマに沿ったJSONデータを返してください。\n・ \"menu\" キーには料理メニューの一覧をセットしてください。\n・ \"reason\" キーには料理メニューの理由セットしてください。\n・ \"main_dish\" キーには料理メニューのうち主菜の名前をセットしてください。\n・ \"ingredients_with_amount\" キーには料理メニューに必要な材料と分量を、料理ごとにリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n・ \"recipes\" キーには料理の名前とレシピをリスト形式でセットしてください。 \"menu\" キーは料理の名前です。\"step\" キーはレシピの手順です。\n・ \"ingredients_not_have\" キーには材料のうち家にないものをリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n必ず上記ルールのJSONデータだけを返してください。JSONデータの前と後に平文はつけないでください。\n以下はJSONデータの例です。\n{\n \"menu\" : \"主菜:鶏の唐揚げ 副菜:ほうれん草のおひたし 主食:ごはん 汁物:みそ汁\",\n \"main_dish\" : \"鶏の唐揚げ\",\n \"ingredients_with_amount\" : \"・鶏もも肉:200g・たまご(1個)・小麦粉(適量)・サラダ油(適量)・ほうれん草(1束・ごま油(適量)・ごはん(1合・味噌(大さじ1)・だし汁(カップ1/2)\",\n \"recipes\": [\n {\n \"menu\" : \"鶏の唐揚げ\",\n \"step\" : \"鶏もも肉を一口大に切り、小麦粉をまぶす。たまごを溶きほぐし、鶏肉にからめる。サラダ油を熱したフライパンで鶏肉を両面こんがりと揚げる。\"\n },\n {\n \"menu\" : \"ほうれん草のおひたし\",\n \"step\" : \"ほうれん草は根元を切り落とし、軽く塩ゆでして水気を絞る。ごま油を熱したフライパンで炒める。\"\n }\n ],\n \"ingredients_not_have\": \"鶏もも肉・小麦粉・サラダ油・ごま油・ねぎ\",\n \"reason\": \"鶏もも肉は家にあったので、材料を買いに行く必要がありませんでした。ほうれん草は栄養が豊富なので、副菜にしました。ごはんは主食にしました。みそ汁は汁物にしまし父が好きなので、みそ汁にしました。\"\n}\n",
"\n回答は以下のスキーマに沿ったJSONデータを返してください。\n・ \"menu\" キーには料理メニューの一覧をセットしてください。\n・ \"reason\" キーには料理メニューの理由セットしてください。\n・ \"main_dish\" キーには料理メニューのうち主菜の名前をセットしてください。\n・ \"ingredients_with_amount\" キーには料理メニューに必要な材料と分量を、料理ごとにリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n・ \"recipes\" キーには料理の名前とレシピをリスト形式でセットしてください。 \"menu\" キーは料理の名前です。\"step\" キーはレシピの手順です。\n・ \"ingredients_not_have\" キーには材料のうち家にないものをリストでセットしてください。各要素の先頭には・をつけてください。各要素の末尾で改行してください。\n必ず上記ルールのJSONデータだけを返してください。JSONデータの前と後に平文はつけないでください。\n以下はJSONデータの例です。\n{\n \"menu\" : \"主菜:鶏の唐揚げ 副菜:ほうれん草のおひたし 主食:ごはん 汁物:みそ汁\",\n \"main_dish\" : \"鶏の唐揚げ\",\n \"ingredients_with_amount\" : \"・鶏もも肉:200g・たまご(1個)・小麦粉(適量)・サラダ油(適量)・ほうれん草(1束・ごま油(適量)・ごはん(1合・味噌(大さじ1)・だし汁(カップ1/2)\",\n \"recipes\": [\n {\n \"menu\" : \"鶏の唐揚げ\",\n \"step\" : \"鶏もも肉を一口大に切り、小麦粉をまぶす。たまごを溶きほぐし、鶏肉にからめる。サラダ油を熱したフライパンで鶏肉を両面こんがりと揚げる。\"\n },\n {\n \"menu\" : \"ほうれん草のおひたし\",\n \"step\" : \"ほうれん草は根元を切り落とし、軽く塩ゆでして水気を絞る。ごま油を熱したフライパンで炒める。\"\n }\n ],\n \"ingredients_not_have\": \"鶏もも肉・小麦粉・サラダ油・ごま油・ねぎ\",\n \"reason\": \"鶏もも肉は家にあったので、材料を買いに行く必要がありませんでした。ほうれん草は栄養が豊富なので、副菜にしました。ごはんは主食にしました。みそ汁は汁物にしまし父が好きなので、みそ汁にしました。\"\n}\n",
"\nあなたは家族の料理担当です。\n後述の家族のプロフィールに配慮した料理メニューを提案してください。同じ後述の家族の要望も出来る限り取り入れてください。\n料理メニューは家族の要望がなければ主菜、副菜、主食、汁物を1品ずつ提案してください。\n料理メニューにはそれぞれ消費カロリーをつけてください。\n料理メニューの調理に必要な食材をリストアップしてください。\n家にある材料をなるべく使い、家に無い材料をリストアップしてください。\n家族のプロフィールをどのように配慮して料理メニューを提案したかを記述してください。\n\n家族のプロフィール:${family_profile}\n家にある食材:${ingredients_have}\n家族の要望:${user_menu_request}\n\n"
] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 4.company-research~scripts~gpt_manage_embedding.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import uuid
import numpy as np
import pandas as pd
import os
import time
from os.path import join, dirname
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from redis import StrictRedis
from redis.commands.search.query import Query
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.field import VectorField, NumericField, TextField
import openai
import tiktoken
from gpt_locale import get_company_description
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Azure Open AI
AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE")
AZURE_OPENAI_VERSION = os.environ.get("AZURE_OPENAI_VERSION")
AZURE_OPENAI_EMBEDDING_DEPLOYMENT = os.environ.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT")
openai.api_type = "azure_ad"
openai.api_base = AZURE_OPENAI_SERVICE
openai.api_version = AZURE_OPENAI_VERSION
azure_credential = DefaultAzureCredential()
openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default")
openai.api_key = openai_token.token
max_token = 2000
# Redis Search
REDIS_NAME = os.environ.get("REDIS_NAME")
REDIS_KEY = os.environ.get("REDIS_KEY")
REDIS_INDEX_NAME = os.environ.get("REDIS_INDEX_NAME")
REDIS_CATEGORY_COMMON = os.environ.get("REDIS_CATEGORY_COMMON")
REDIS_CATEGORY_TOPICS = os.environ.get("REDIS_CATEGORY_TOPICS")
redis_conn = StrictRedis(host=REDIS_NAME, port=10000, password=REDIS_KEY, ssl=True, ssl_cert_reqs=None, decode_responses=True)
# encoding for tokenization
encodeing = tiktoken.encoding_for_model("gpt-3.5-turbo")
# Redis Index Name
def get_redis_index_name(category):
return category + "_" + REDIS_INDEX_NAME
# Clear Redis Cache
def clear_cache(category):
print("Redis: clear cache")
print()
keys = redis_conn.keys(category + ':*')
if len(keys) > 0:
print("Redis: remove", len(keys), "items.")
redis_conn.delete(*keys)
else:
print("Redis: no items.")
# Register Redis Index
def register_cache_index(category):
name = TextField(name="name")
data = TextField(name="data")
tokens = NumericField(name="tokens")
embeddings = VectorField("embeddings",
"HNSW", {
"TYPE": "FLOAT32",
"DIM": 1536,
"DISTANCE_METRIC": "COSINE",
"INITIAL_CAP": 3155
})
index_name = get_redis_index_name(category)
print("Redis: drop index", index_name)
print()
try:
redis_conn.ft(index_name).dropindex()
except:
print(f"Redis: index {index_name} does not exist.")
print("Redis: create index", index_name)
print()
redis_conn.ft(index_name).create_index(
fields = [name, data, tokens, embeddings],
definition = IndexDefinition(prefix=[category], index_type=IndexType.HASH))
# Get GPT Embedding from text
def get_gpt_embedding(text):
text = text.replace("\n", " ")
return openai.Embedding.create(engine=AZURE_OPENAI_EMBEDDING_DEPLOYMENT, input=text)["data"][0]["embedding"]
# Calculate GPT Embedding token count
def get_gpt_token_count(text):
return len(encodeing.encode(text))
# Add Redis Cache Item
def register_embedding_cache(category, data, keyword=None):
if keyword is not None:
text_for_embedding = data["name"] + " : " + data["text"] + " : " + keyword
else:
text_for_embedding = data["name"] + " : " + data["text"]
tokens = get_gpt_token_count(text_for_embedding)
if (tokens < max_token):
print("Redis: register ", "name", data["name"], "text", data["text"][:20], "...", "tokens", tokens)
embedding = get_gpt_embedding(text_for_embedding)
id = f"{category}:{uuid.uuid4().hex}"
redis_conn.hset(
id,
mapping={
"name": data["name"],
"data": json.dumps(data),
"tokens": tokens,
"embeddings": np.array(embedding).astype(dtype=np.float32).tobytes()
}
)
item = { "id": id, "name" : data["name"], "data": data }
else:
item = { "id": "Error", "data": "The text is too long: " + tokens }
return item
# Calculate Operating Profit Margin
def get_operating_profit_margin(revenue, operating_profit):
return round((operating_profit / revenue * 100), 1)
# Add company data to Redis Cache
def register_company(category, data):
years = []
# Calculate Operating Profit Margin
operating_profit_margin = []
for i, r in enumerate(data["revenue"]):
keys = list(r.keys())
for key in keys:
revenue_value = r[key]
operating_profit_value = data["operating_profit"][i][key]
operating_profit_margin.append( { key : get_operating_profit_margin(revenue_value, operating_profit_value) } )
years.append(keys[0])
data["operating_profit_margin"] = operating_profit_margin
revenues = [data["revenue"][i][years[i]] for i in range(0, len(years))]
operating_profits = [data["operating_profit"][i][years[i]] for i in range(0, len(years))]
operating_profit_margins = [data["operating_profit_margin"][i][years[i]] for i in range(0, len(years))]
total_assets = [data["total_assets"][i][years[i]] for i in range(0, len(years))]
equity_ratios = [data["equity_ratio"][i][years[i]] for i in range(0, len(years))]
data["text"] = get_company_description(data["locale"], data, years, revenues, operating_profits, operating_profit_margins, total_assets, equity_ratios)
register_embedding_cache(category, data)
# Add all companies from json data
def register_companies(category, filename):
with open(filename, 'r', encoding='utf-8') as f:
companies = json.load(f)
for company in companies:
register_company(category, company)
time.sleep(60)
def register_company_topics(category, filename):
with open(filename, 'r', encoding='utf-8') as f:
for line in f.readlines():
print(line)
data = json.loads(line)
azure_credential = DefaultAzureCredential()
openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default")
openai.api_key = openai_token.token
register_embedding_cache(category, data, data["keyword"])
time.sleep(60)
# Query Redis Cache
def query_all_cache(category):
base_query = '*'
query = Query(base_query)\
.return_fields("name", "data", "tokens")\
.dialect(2)
index_name = get_redis_index_name(category)
redis_ret = redis_conn.ft(index_name).search(query)
df_ret = pd.DataFrame(list(map(lambda x: {'id' : x.id, 'name' : x.name, 'data': x.data, 'tokens': x.tokens}, redis_ret.docs)))
return df_ret
# Retrieve all Redis Cache
def query_cache(category, text, n=5):
base_query = f'*=>[KNN {n} @embeddings $vec_param AS vector_score]'
query = Query(base_query)\
.sort_by("vector_score")\
.paging(0, n)\
.return_fields("name", "data", "tokens", "vector_score")\
.dialect(2)
embedding = get_gpt_embedding(text)
query_params = {"vec_param": np.array(embedding).astype(np.float32).tobytes()}
index_name = get_redis_index_name(category)
redis_ret = redis_conn.ft(index_name).search(query, query_params=query_params)
df_ret = pd.DataFrame(list(map(lambda x: {'id' : x.id, 'name' : x.name, 'data': x.data, 'tokens': x.tokens, 'vector_score': x.vector_score}, redis_ret.docs)))
return df_ret
# main
def main():
# Common
clear_cache(REDIS_CATEGORY_COMMON)
register_cache_index(REDIS_CATEGORY_COMMON)
register_companies(REDIS_CATEGORY_COMMON, "company_common.json")
df = query_all_cache(REDIS_CATEGORY_COMMON)
print(df)
print()
df = query_cache(REDIS_CATEGORY_COMMON, "コントソ", n=1)
print(df)
print()
# Topics
clear_cache(REDIS_CATEGORY_TOPICS)
register_cache_index(REDIS_CATEGORY_TOPICS)
register_company_topics(REDIS_CATEGORY_TOPICS, "company_topics_ja.jsonl")
# register_company_topics(REDIS_CATEGORY_TOPICS, "company_topics_en.jsonl")
df = query_all_cache(REDIS_CATEGORY_TOPICS)
print(df)
print()
df = query_cache(REDIS_CATEGORY_TOPICS, "コントソ 経営者", n=3)
print(df)
print()
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 5.internal-document-search~src~backend~approaches~chatreadretrieveread.py | from text import nonewlines
import openai
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from approaches.chatlogging import write_chatlog, ApproachType
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_gpt_model, get_max_token_from_messages
# Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
# top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
# (answer) with that prompt.
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the customer questions. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
The language of the search query is generated in the language of the string described in the source question.
If you cannot generate a search query, return just the number 0.
source quesion: {user_question}
"""
query_prompt_few_shots = [
{'role' : USER, 'content' : 'What are my health plans?' },
{'role' : ASSISTANT, 'content' : 'Show available health plans' },
{'role' : USER, 'content' : 'does my plan cover cardio?' },
{'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' }
]
def __init__(self, search_client: SearchClient, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.sourcepage_field = sourcepage_field
self.content_field = content_field
def run(self, user_name: str, history: list[dict], overrides: dict) -> any:
chat_model = overrides.get("gptModel")
chat_gpt_model = get_gpt_model(chat_model)
chat_deployment = chat_gpt_model.get("deployment")
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
user_q = 'Generate search query for: ' + history[-1]["user"]
query_prompt = self.query_prompt_template.format(user_question=history[-1]["user"])
message_builder = MessageBuilder(query_prompt)
messages = message_builder.get_messages_from_history(
history,
user_q,
self.query_prompt_few_shots
)
max_tokens = get_max_token_from_messages(messages, chat_model)
# Change create type ChatCompletion.create → ChatCompletion.acreate when enabling asynchronous support.
chat_completion = openai.ChatCompletion.create(
engine=chat_deployment,
messages=messages,
temperature=0.0,
max_tokens=max_tokens,
n=1)
query_text = chat_completion.choices[0].message.content
if query_text.strip() == "0":
query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query
total_tokens = chat_completion.usage.total_tokens
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
use_semantic_captions = True if overrides.get("semanticCaptions") else False
top = overrides.get("top")
exclude_category = overrides.get("excludeCategory") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
semantic_ranker = overrides.get("semanticRanker")
if semantic_ranker:
r = self.search_client.search(query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None
)
else:
r = self.search_client.search(query_text,
filter=filter,
top=top
)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r]
content = "\n".join(results)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# GPT-3.5 Turbo (4k/16k)
if "gpt-3.5-turbo" in chat_model:
completion_model = chat_model
# completion_model = "gpt-35-turbo-instruct" # for future use
# GPT-4 (8k/32k)
else:
completion_model = chat_model
completion_gpt_model = get_gpt_model(completion_model)
completion_deployment = completion_gpt_model.get("deployment")
message_builder = MessageBuilder(self.system_message_chat_conversation)
messages = message_builder.get_messages_from_history(
history,
history[-1]["user"]+ "\n\nSources:\n" + content[:1024], # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
)
temaperature = float(overrides.get("temperature"))
max_tokens = get_max_token_from_messages(messages, completion_model)
# Change create type ChatCompletion.create → ChatCompletion.acreate when enabling asynchronous support.
response = openai.ChatCompletion.create(
engine=completion_deployment,
messages=messages,
temperature=temaperature,
max_tokens=1024,
n=1)
response_text = response.choices[0]["message"]["content"]
total_tokens += response.usage.total_tokens
# logging
input_text = history[-1]["user"]
write_chatlog(ApproachType.DocSearch, user_name, total_tokens, input_text, response_text, query_text)
msg_to_display = '\n\n'.join([str(message) for message in messages])
return {"data_points": results, "answer": response_text, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')}
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nThe language of the search query is generated in the language of the string described in the source question.\nIf you cannot generate a search query, return just the number 0.\n\nsource quesion: {user_question}\n",
"does my plan cover cardio?"
] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 3.goal-achievement-adviser~scripts~gpt_manage_embedding.py | import json
import uuid
import numpy as np
import pandas as pd
import os
from os.path import join, dirname
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
# from redis import Redis
from redis import StrictRedis
from redis.commands.search.query import Query
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.field import VectorField, TextField
import openai
import tiktoken
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Azure Open AI
AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE")
AZURE_OPENAI_VERSION = os.environ.get("AZURE_OPENAI_VERSION")
AZURE_OPENAI_EMBEDDING_DEPLOYMENT = os.environ.get("AZURE_OPENAI_EMBEDDING_DEPLOYMENT")
AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY")
openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com"
openai.api_version = AZURE_OPENAI_VERSION
azure_credential = DefaultAzureCredential()
if AZURE_OPENAI_KEY is None:
openai.api_type = "azure_ad"
openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default")
openai.api_key = openai_token.token
else:
openai.api_type = "azure"
openai.api_key = os.environ.get("AZURE_OPENAI_KEY")
# encoding for tokenization
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
# GPT Embedding tonen limit
# https://learn.microsoft.com/azure/cognitive-services/openai/concepts/models
#max_token = 2046 # V1
max_token = 8191 # V2
# Redis
redis_name = os.environ.get("REDIS_NAME")
redis_key = os.environ.get("REDIS_KEY")
redis_index_name = os.environ.get("REDIS_INDEX_CATEGORY") + "_" + os.environ.get("REDIS_INDEX_NAME")
redis_conn = StrictRedis(host=redis_name, port=10000, password=redis_key, ssl=True, ssl_cert_reqs=None, decode_responses=True)
category = os.environ.get("REDIS_INDEX_CATEGORY")
# Clear Redis Cache
def clear_cache():
print("Redis: clear cache")
print()
keys = redis_conn.keys(category + ':*')
if len(keys) > 0:
print("Redis: remove", len(keys), "items.")
redis_conn.delete(*keys)
else:
print("Redis: no items.")
# Register Redis Index
def register_cache_index(category):
text = TextField(name="text")
embeddings = VectorField("embeddings",
"HNSW", {
"TYPE": "FLOAT32",
"DIM": 1536,
"DISTANCE_METRIC": "COSINE",
"INITIAL_CAP": 3155
})
print("Redis: drop index", redis_index_name)
print()
try:
redis_conn.ft(redis_index_name).dropindex()
except:
print(f"Redis: index {redis_index_name} does not exist.")
print("Redis: create index", redis_index_name)
print()
redis_conn.ft(redis_index_name).create_index(
fields = [text, embeddings],
definition = IndexDefinition(prefix=[category], index_type=IndexType.HASH))
# Get GPT Embedding from text
def get_gpt_embedding(text):
text = text.replace("\n", " ")
response = openai.Embedding.create(engine=AZURE_OPENAI_EMBEDDING_DEPLOYMENT, input=text)
return response["data"][0]["embedding"]
# Calculate GPT Embedding token count
def get_gpt_token_count(text):
return len(encoding.encode(text))
# Add Redis Cache Item
def register_embedding_cache(category, data):
text = data["name"] + " : " + data["text"]
tokens = get_gpt_token_count(text)
if (tokens < max_token):
print("Redis: register ", text)
embedding = get_gpt_embedding(text)
id = f"{category}:{uuid.uuid4().hex}"
redis_conn.hset(
id,
mapping={
"text": text,
"embeddings": np.array(embedding).astype(dtype=np.float32).tobytes()
}
)
item = { "id": id, "text": text }
else:
item = { "id": "Error", "text": "The text is too long: " + tokens }
return item
def register_topics(filename):
datafile_path = join(dirname(__file__), filename)
with open(datafile_path, 'r', encoding='utf-8') as f:
for text in f:
data = json.loads(text)
register_embedding_cache(category, data)
# Query Redis Cache
def query_all_cache():
base_query = '*'
query = Query(base_query)\
.return_fields("text")\
.dialect(2)
redis_ret = redis_conn.ft(redis_index_name).search(query)
df_ret = pd.DataFrame(list(map(lambda x: {'id' : x.id, 'text': x.text}, redis_ret.docs)))
return df_ret
# Retrieve all Redis Cache
def query_cache(text, n=6):
base_query = f'*=>[KNN {n} @embeddings $vec_param AS vector_score]'
query = Query(base_query)\
.sort_by("vector_score")\
.paging(0, n)\
.return_fields("text", "vector_score")\
.dialect(2)
embedding = get_gpt_embedding(text)
query_params = {"vec_param": np.array(embedding).astype(np.float32).tobytes()}
redis_ret = redis_conn.ft(redis_index_name).search(query, query_params=query_params)
df_ret = pd.DataFrame(list(map(lambda x: {'id' : x.id, 'vector_score': x.vector_score, 'text': x.text}, redis_ret.docs)))
return df_ret
clear_cache()
register_cache_index(category)
register_topics("topics.jsonl")
df = query_all_cache()
print(df)
print()
df = query_cache("コントソの強み弱み")
print(df)
print()
| [] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 5.internal-document-search~src~backend~approaches~chatread.py | from typing import Any
import openai
# To uncomment when enabling asynchronous support.
# from azure.cosmos.aio import ContainerProxy
from approaches.approach import Approach
from approaches.chatlogging import write_chatlog, ApproachType
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_gpt_model, get_max_token_from_messages
# Simple read implementation, using the OpenAI APIs directly. It uses OpenAI to generate an completion
# (answer) with that prompt.
class ChatReadApproach(Approach):
def run(self, user_name: str, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any:
chat_model = overrides.get("gptModel")
chat_gpt_model = get_gpt_model(chat_model)
chat_deployment = chat_gpt_model.get("deployment")
systemPrompt = overrides.get("systemPrompt")
temaperature = float(overrides.get("temperature"))
user_q = history[-1]["user"]
message_builder = MessageBuilder(systemPrompt)
messages = message_builder.get_messages_from_history(
history,
user_q
)
max_tokens = get_max_token_from_messages(messages, chat_model)
# Generate a contextual and content specific answer using chat history
# Change create type ChatCompletion.create → ChatCompletion.acreate when enabling asynchronous support.
chat_completion = openai.ChatCompletion.create(
engine=chat_deployment,
messages=messages,
temperature=temaperature,
max_tokens=max_tokens,
n=1)
response_text = chat_completion.choices[0]["message"]["content"]
total_tokens = chat_completion.usage.total_tokens
# logging
input_text = history[-1]["user"]
write_chatlog(ApproachType.Chat, user_name, total_tokens, input_text, response_text)
return { "answer": response_text }
| [
"systemPrompt"
] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 2.recipe-adviser~app~backend~food_menu~food_image.py | import requests
import time
import openai
prompt_template = """
以下を英訳してください。
{food_name} のテーブルの皿に盛られた美味しそうな写真。料理ごとに別々の皿で。
"""
def get_food_image(api_service, api_key, api_version, completion_deployment, food_name):
# url = "https://{api_service}.openai.azure.com/dalle/text-to-image?api-version={api_version}".format(api_service=api_service, api_version=api_version)
url = f"https://{api_service}.openai.azure.com/openai/images/generations:submit?api-version={api_version}"
prompt_translate = prompt_template.format(food_name=food_name)
completion = openai.Completion.create(
engine=completion_deployment,
prompt=prompt_translate,
temperature=0,
max_tokens=100,
n=1)
prompt = completion.choices[0]['text']
headers= {
"api-key": api_key,
"Content-Type": "application/json"
}
body = {
"prompt": prompt,
"resolution": "256x256"
}
submission = requests.post(url, headers=headers, json=body)
print(submission.json(), flush=True)
operation_location = submission.headers['Operation-Location']
# retry_after = submission.headers['Retry-after']
status = ""
while (status != "succeeded"):
# TODO retry afterがレスポンスから無いため、仮で0.5sごとにアクセス
time.sleep(0.5)
response = requests.get(operation_location, headers=headers)
status = response.json()['status']
image_url = response.json()['result']['data'][0]['url']
return image_url
| [
"\n以下を英訳してください。\nPLACEHOLDER のテーブルの皿に盛られた美味しそうな写真。料理ごとに別々の皿で。\n",
"\n以下を英訳してください。\n{food_name} のテーブルの皿に盛られた美味しそうな写真。料理ごとに別々の皿で。\n"
] |
2024-01-10 | Azure-Samples/jp-azureopenai-samples | 2.recipe-adviser~app~backend~food_menu~food_advisory.py | import openai
prompt_template_1 ="""
あなたは家族の料理担当です。家族のプロフィールと不足している栄養に基づいて、以下の質問を家族に話すようにフランクわかりやすく回答してください。
家族のプロフィール:${family_profile}
不足している栄養素:${missing_nutrient}
"""
prompt_template_2 ="""
・不足している栄養素が必要な理由
・不足している栄養素の補給に必要な食材を5つ
・食材が栄養素の補給に有効な理由
・栄養素の補給に必要な食材の効果的な調理方法
回答は以下のようなJSONデータにしてください。
"recommend_reason" キーには不足している栄養素が必要な理由をセットしてください。
"recommended_ingredients" キーには不足している栄養素の補給に必要な食材を5つのリストをセットしてください。各食材の先頭には・を付けてください。
"recommend_ingredients_reason" 食材が不足している栄養素の補給に有効な理由。
"recommend_ingredients_cooking" キーには栄養素の補給に必要な食材の効果的な調理方法をセットしてください。
以下のJSONデータの例です。
{
"recommend_reason" : "タンパク質は体を作るのに有効です",
"recommended_ingredients" : "・鶏のむね肉・大豆",
"recommend_ingredients_reason" : "鶏のむね肉には豊富なタンパク質が含まれており、脂肪分も少なくヘルシーです。大豆も植物性タンパク質が豊富に含まれています。",
"recommend_ingredients_cooking": "鶏のむね肉は蒸すとヘルシーです。大豆は醤油とみりんで煮ると効果的です"
}
必ず上記JSONデータを返し、JSONデータの前と後に文章はつけず、必ずJSONデータだけ返してください。
"""
def get_food_advisory(completion_deployment, family_profile, missing_nutrient):
prompt = prompt_template_1.format(family_profile=family_profile, missing_nutrient=missing_nutrient) + prompt_template_2
completion = openai.Completion.create(
engine=completion_deployment,
prompt=prompt,
temperature=0.5,
max_tokens=1000,
n=1)
return completion | [
"prompt_template_18749a621-86ef-4513-9507-a1d88db8ed14\n・不足している栄養素が必要な理由\n・不足している栄養素の補給に必要な食材を5つ\n・食材が栄養素の補給に有効な理由\n・栄養素の補給に必要な食材の効果的な調理方法\n\n回答は以下のようなJSONデータにしてください。\n\"recommend_reason\" キーには不足している栄養素が必要な理由をセットしてください。\n\"recommended_ingredients\" キーには不足している栄養素の補給に必要な食材を5つのリストをセットしてください。各食材の先頭には・を付けてください。\n\"recommend_ingredients_reason\" 食材が不足している栄養素の補給に有効な理由。\n\"recommend_ingredients_cooking\" キーには栄養素の補給に必要な食材の効果的な調理方法をセットしてください。\n\n以下のJSONデータの例です。\n{\n \"recommend_reason\" : \"タンパク質は体を作るのに有効です\",\n \"recommended_ingredients\" : \"・鶏のむね肉・大豆\",\n \"recommend_ingredients_reason\" : \"鶏のむね肉には豊富なタンパク質が含まれており、脂肪分も少なくヘルシーです。大豆も植物性タンパク質が豊富に含まれています。\",\n \"recommend_ingredients_cooking\": \"鶏のむね肉は蒸すとヘルシーです。大豆は醤油とみりんで煮ると効果的です\"\n}\n\n必ず上記JSONデータを返し、JSONデータの前と後に文章はつけず、必ずJSONデータだけ返してください。\n",
"\nあなたは家族の料理担当です。家族のプロフィールと不足している栄養に基づいて、以下の質問を家族に話すようにフランクわかりやすく回答してください。\n\n家族のプロフィール:$PLACEHOLDER\n不足している栄養素:$PLACEHOLDER\n\n\n・不足している栄養素が必要な理由\n・不足している栄養素の補給に必要な食材を5つ\n・食材が栄養素の補給に有効な理由\n・栄養素の補給に必要な食材の効果的な調理方法\n\n回答は以下のようなJSONデータにしてください。\n\"recommend_reason\" キーには不足している栄養素が必要な理由をセットしてください。\n\"recommended_ingredients\" キーには不足している栄養素の補給に必要な食材を5つのリストをセットしてください。各食材の先頭には・を付けてください。\n\"recommend_ingredients_reason\" 食材が不足している栄養素の補給に有効な理由。\n\"recommend_ingredients_cooking\" キーには栄養素の補給に必要な食材の効果的な調理方法をセットしてください。\n\n以下のJSONデータの例です。\n{\n \"recommend_reason\" : \"タンパク質は体を作るのに有効です\",\n \"recommended_ingredients\" : \"・鶏のむね肉・大豆\",\n \"recommend_ingredients_reason\" : \"鶏のむね肉には豊富なタンパク質が含まれており、脂肪分も少なくヘルシーです。大豆も植物性タンパク質が豊富に含まれています。\",\n \"recommend_ingredients_cooking\": \"鶏のむね肉は蒸すとヘルシーです。大豆は醤油とみりんで煮ると効果的です\"\n}\n\n必ず上記JSONデータを返し、JSONデータの前と後に文章はつけず、必ずJSONデータだけ返してください。\n",
"\n・不足している栄養素が必要な理由\n・不足している栄養素の補給に必要な食材を5つ\n・食材が栄養素の補給に有効な理由\n・栄養素の補給に必要な食材の効果的な調理方法\n\n回答は以下のようなJSONデータにしてください。\n\"recommend_reason\" キーには不足している栄養素が必要な理由をセットしてください。\n\"recommended_ingredients\" キーには不足している栄養素の補給に必要な食材を5つのリストをセットしてください。各食材の先頭には・を付けてください。\n\"recommend_ingredients_reason\" 食材が不足している栄養素の補給に有効な理由。\n\"recommend_ingredients_cooking\" キーには栄養素の補給に必要な食材の効果的な調理方法をセットしてください。\n\n以下のJSONデータの例です。\n{\n \"recommend_reason\" : \"タンパク質は体を作るのに有効です\",\n \"recommended_ingredients\" : \"・鶏のむね肉・大豆\",\n \"recommend_ingredients_reason\" : \"鶏のむね肉には豊富なタンパク質が含まれており、脂肪分も少なくヘルシーです。大豆も植物性タンパク質が豊富に含まれています。\",\n \"recommend_ingredients_cooking\": \"鶏のむね肉は蒸すとヘルシーです。大豆は醤油とみりんで煮ると効果的です\"\n}\n\n必ず上記JSONデータを返し、JSONデータの前と後に文章はつけず、必ずJSONデータだけ返してください。\n",
"\nあなたは家族の料理担当です。家族のプロフィールと不足している栄養に基づいて、以下の質問を家族に話すようにフランクわかりやすく回答してください。\n\n家族のプロフィール:${family_profile}\n不足している栄養素:${missing_nutrient}\n\n"
] |
2024-01-10 | karray/neuroracer | neuroracer_gym~src~neuroracer_gym~neuroracer_env.py | import time
import numpy as np
import rospy
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from openai_ros import robot_gazebo_env
from sensor_msgs.msg import LaserScan, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
# from std_msgs.msg import Float64
# from sensor_msgs.msg import Image
# from tf.transformations import quaternion_from_euler
from ackermann_msgs.msg import AckermannDriveStamped
from gym import spaces
from gym.envs.registration import register
# import cv2
default_sleep = 1
class NeuroRacerEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self):
self.initial_position = None
self.min_distance = .255
self.bridge = CvBridge()
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(NeuroRacerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False)
rospy.wait_for_service('/gazebo/set_model_state')
try:
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
self.gazebo.unpauseSim()
time.sleep(default_sleep)
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
self._init_camera()
self.laser_subscription = rospy.Subscriber("/scan", LaserScan, self._laser_scan_callback)
self.drive_control_publisher= rospy.Publisher("/vesc/ackermann_cmd_mux/input/navigation",
AckermannDriveStamped,
queue_size=20)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished NeuroRacerEnv INIT...")
def reset_position(self):
if not self.initial_position:
return
state_msg = ModelState()
state_msg.model_name = 'racecar'
state_msg.pose.position.x = self.initial_position['p_x']
state_msg.pose.position.y = self.initial_position['p_y']
state_msg.pose.position.z = self.initial_position['p_z']
state_msg.pose.orientation.x = self.initial_position['o_x']
state_msg.pose.orientation.y = self.initial_position['o_y']
state_msg.pose.orientation.z = self.initial_position['o_z']
state_msg.pose.orientation.w = self.initial_position['o_w']
self.set_model_state(state_msg)
def reset(self):
super(NeuroRacerEnv, self).reset()
self.gazebo.unpauseSim()
self.reset_position()
time.sleep(default_sleep)
self.gazebo.pauseSim()
return self._get_obs()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_laser_scan_ready()
self._check_camera_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_camera_ready(self):
self.camera_msg = None
rospy.logdebug("Waiting for /camera/zed/rgb/image_rect_color/compressed to be READY...")
while self.camera_msg is None and not rospy.is_shutdown():
try:
self.camera_msg = rospy.wait_for_message('/camera/zed/rgb/image_rect_color/compressed',
CompressedImage,
timeout=1.0)
except:
rospy.logerr("Camera not ready yet, retrying for getting camera_msg")
def _init_camera(self):
img = self.get_camera_image()
# self.color_scale = "bgr8" # config["color_scale"]
self.input_shape = img.shape
obs_low = 0
obs_high = 1
self.observation_space = spaces.Box(low=obs_low, high=obs_high, shape=self.input_shape)
img_dims = img.shape[0]*img.shape[1]*img.shape[2]
byte_size = 4
overhaead = 2 # reserving memory for ros header
buff_size = img_dims*byte_size*overhaead
self.camera_msg = rospy.Subscriber("/camera/zed/rgb/image_rect_color/compressed",
CompressedImage, self._camera_callback, queue_size=1,
buff_size=buff_size)
rospy.logdebug("== Camera READY ==")
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/scan", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
# def _get_additional_laser_scan(self):
# laser_scans = []
# self.gazebo.unpauseSim()
# while len(laser_scans) < 2 and not rospy.is_shutdown():
# try:
# data = rospy.wait_for_message("/scan", LaserScan, timeout=1.0)
# laser_scans.append(data.ranges)
# except Exception as e:
# rospy.logerr("getting laser data...")
# print(e)
# self.gazebo.pauseSim()
# return laser_scans
def _laser_scan_callback(self, data):
self.laser_scan = data
def _camera_callback(self, msg):
self.camera_msg = msg
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self.drive_control_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to drive_control_publisher yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("drive_control_publisher Publisher Connected")
rospy.logdebug("All Publishers READY")
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
return self.get_camera_image()
def _is_done(self, observations):
self._episode_done = self._is_collided()
return self._episode_done
def _create_steering_command(self, steering_angle, speed):
# steering_angle = np.clip(steering_angle,self.steerin_angle_min, self.steerin_angle_max)
a_d_s = AckermannDriveStamped()
a_d_s.drive.steering_angle = steering_angle
a_d_s.drive.steering_angle_velocity = 0.0
a_d_s.drive.speed = speed # from 0 to 1
a_d_s.drive.acceleration = 0.0
a_d_s.drive.jerk = 0.0
return a_d_s
def steering(self, steering_angle, speed):
command = self._create_steering_command(steering_angle, speed)
self.drive_control_publisher.publish(command)
# def get_odom(self):
# return self.odom
# def get_imu(self):
# return self.imu
def get_laser_scan(self):
return np.array(self.laser_scan.ranges, dtype=np.float32)
def get_camera_image(self):
try:
cv_image = self.bridge.compressed_imgmsg_to_cv2(self.camera_msg).astype('float32')
except Exception as e:
rospy.logerr("CvBridgeError: Error converting image")
rospy.logerr(e)
return cv_image
def _is_collided(self):
r = self.get_laser_scan()
crashed = np.any(r <= self.min_distance)
if crashed:
# rospy.logdebug('the auto crashed! :(')
# rospy.logdebug('distance: {}'.format(r.min()))
# data = np.array(self._get_additional_laser_scan(), dtype=np.float32)
# data = np.concatenate((np.expand_dims(r, axis=0), data), axis=0)
# data_mean = np.mean(data, axis=0)
min_range_idx = r.argmin()
min_idx = min_range_idx - 5
if min_idx < 0:
min_idx = 0
max_idx = min_idx + 10
if max_idx >= r.shape[0]:
max_idx = r.shape[0] - 1
min_idx = max_idx - 10
mean_distance = r[min_idx:max_idx].mean()
crashed = np.any(mean_distance <= self.min_distance)
return crashed
| [] |
2024-01-10 | DevGuyRash/ai_notebook | transformers.py | import openai
import filetypes
import os
from environment import Environment
class Transcript:
"""
Class to transcribe any YouTube video to text.
Attributes:
_environment (Environment): `Environment` object for getting the
api key and various other environment variables.
video (filetypes.YtVideo): `filetypes.YtVideo` object containing
the video to get the transcripts for.
audio_filepath (str): Filepath of the audio file.
"""
def __init__(self,
output_filepath: str = "transcript.txt",
audio_filepath: str = ""
):
# TODO: Pass api key to environment object
"""
Initializes required variables for the `Transcript` object.
Args:
output_filepath: Filepath of where the transcript will be
saved.
audio_filepath: Filepath of where the audio file will be
saved.
"""
self._environment = Environment()
self.video = None
self.audio_filepath = audio_filepath
# Init transcription variables
self._transcript = None
self._transcript_filepath = output_filepath
def _create_video(self,
video_url: str = "",
*args,
**kwargs,
):
"""
Sets the `filetypes.YtVideo` object with a valid YouTube url.
The user is prompted for a valid YouTube video url. Using the
valid url, a new `filetypes.YtVideo` object is created and bound
to `video`.
Args:
*args: Additional arguments to send to the
`filetypes.YtVideo` object.
video_url: Valid YouTube video url. Optional.
**kwargs: Additional keyword arguments to send to the
`filetypes.YtVideo` object.
"""
print(f"Fetching video for: {video_url}")
if not video_url:
video_url = input("Enter a youtube video url: ")
self.video = filetypes.YtVideo(video_url, *args, **kwargs)
def transcribe_yt_video(self,
video_url: str = "",
remove_audio: bool = True,
*args,
**kwargs,
) -> None:
"""
Transcribes a YouTube video to text using OpenAIs Whisper
Accepts a YouTube url in `video_url` if one does not already
exist in the object. The video is passed to OpenAIs whisper
and transcribed. The text is then saved to a file specified by
`transcript_filepath`.
Args:
video_url: YouTube video to transcribe.
remove_audio: Whether to delete the audio file after it's
done being used for transcription.
args: Any additional arguments to be passed to a
`filetypes.YtVideo` object.
kwargs: Any additional arguments to be passed to a
`filetypes.YtVideo` object.
"""
# Check that video exists or user provided an url
if not self.video or video_url:
self._create_video(*args, video_url=video_url, **kwargs)
# Save audio before attempting to transcribe
self.video.save_audio_file()
self.set_audio_filepath(self.video.audio_filepath)
# Transcribe audio
self.transcribe_audio_file(self.get_audio_filepath())
if remove_audio:
self.delete_file(self.audio_filepath)
def transcribe_audio_file(self,
audio_filepath: str = "",
) -> None:
"""Gets valid audio filepath for `_transcribe_audio_file`"""
if not self.audio_filepath and not audio_filepath:
self.set_audio_filepath(input("Enter the filepath (or filename if "
"it's in the same directory) of the "
"audio file:\n>"))
elif audio_filepath:
# self.audio_filepath doesn't exist but audio_filepath arg does
self.set_audio_filepath(audio_filepath)
# Verify that audio file exists
while not os.path.exists(self.audio_filepath):
print(f"Invalid audio filepath: {self.audio_filepath}")
self.set_audio_filepath(input("Please enter a new one:\n>"))
# Transcribe audio
self._transcribe_audio_file(self.audio_filepath)
def _transcribe_audio_file(self,
audio_filepath: str,
) -> None:
"""Transcribes audio file using OpenAIs whisper."""
audio_file = open(audio_filepath, 'rb')
try:
print("Attempting to transcribe audio...")
# Get transcript
self._transcript = openai.Audio.transcribe(
"whisper-1",
file=audio_file,
).get("text")
print("Successfully transcribed audio.")
except openai.error.AuthenticationError as error:
# No API Key was set, or an incorrect one provided.
print("===========ERROR==============")
print(f"Error: {error.error}")
print(f"Code: {error.code}")
print(f"Message: {error.user_message}")
print("==============================")
else:
self.save_transcript()
finally:
audio_file.close()
def save_transcript(self) -> None:
"""Saves `transcript` to a text file at `transcript_filepath`"""
with open(self._transcript_filepath, "w", encoding='utf-8') \
as file:
# Write transcript to file
print("Saving transcript...")
print(self._transcript, file=file)
print(f"Saved transcript to: {self._transcript_filepath}")
def print_transcript(self) -> None:
"""Prints transcript text."""
print("Transcript:")
print(self._transcript)
def set_api_key(self, api_key: str = "") -> None:
"""Sets the api key for OpenAI"""
self._environment.set_openai_api_key(api_key)
def get_api_key(self) -> str:
"""Returns current API key if one exists"""
return self._environment.get_api_key()
def set_audio_filepath(self, filepath: str) -> None:
"""Sets the audio filepath."""
self.audio_filepath = filepath
def get_audio_filepath(self) -> str:
"""Returns the audio filepath"""
return self.audio_filepath
@staticmethod
def delete_file(filepath: str):
"""Deletes `filepath` from system."""
path_head, path_tail = os.path.split(filepath)
# Display path_head if path_tail is empty due to trailing slash
print(f"Removing file: {path_tail or path_head}")
os.remove(filepath)
print(f"Audio file removed at: {filepath}")
| [] |
2024-01-10 | DevGuyRash/ai_notebook | environment.py | from dotenv import load_dotenv
import sys
import openai
from os import environ
class Environment:
"""
Holds information about the environment and environment variables.
Attributes:
env_file_exists (bool): Whether a .env file exists.
_API_KEY (str): API Key to be used with OpenAI.
environment (os.environ): The operating system environment,
including environment variables.
args (list[str, ...]): Arguments passed to the script via
terminal.
args_length (int): How many arguments were passed to the script
via terminal.
"""
def __init__(self):
"""
Constructs a `Environment` object.
"""
# Load environment variables
self.env_file_exists = load_dotenv()
self.environment = environ
self._API_KEY = ""
self.args = sys.argv
self.args_length = len(self.args)
def get_api_key(self) -> str:
"""Returns `_API_KEY` attribute."""
return self._API_KEY
def _set_api_key(self, api_key: str) -> None:
"""Sets `_API_KEY` attribute."""
self._API_KEY = api_key
def set_openai_api_key(self, api_key: str = "") -> None:
"""Sets openai api key and prompts user if one doesn't exist."""
if api_key:
# API key was manually passed to method.
self._set_api_key(api_key)
elif self.env_file_exists:
# API key was not manually passed, but a .env file exists
self._set_api_key(self.environment.get("OPENAI_API_KEY"))
else:
# No passed API key and no .env file
self._set_api_key(input("Enter your api key: "))
openai.api_key = self.get_api_key()
self._save_api_key()
def _save_api_key(self):
"""Saves API key for future use to .env file."""
with open(".env", 'w', encoding="utf-8") as file:
print(f'OPENAI_API_KEY="{self._API_KEY}"', file=file) | [] |
2024-01-10 | robert1003/ash-prompting | src~chatgpt_ash.py | # Same code as notebook but as a script to run easily
# Code to run chat based ASH and ReAct on chatgpt (see use_ash option below to change to ReAct or ASH)
import argparse
import random
import string
import openai
from dotenv import load_dotenv
load_dotenv()
from retry import retry
import re
import os
import sys
sys.path.insert(0, "../WebShop")
from web_agent_site.envs.web_agent_text_env import WebAgentTextEnv
openai.api_key = "<INSERT API KEY HERE>"
# Setup environment
env = WebAgentTextEnv(
observation_mode="text_rich",
render=False,
num_products=None, # 1000 for small product space, None for full product space
)
actor_num_examples = 2
@retry(tries=2, delay=20)
def chatgpt(prior_prompt, cur_traj, type="actor", max_tokens=100, stop=["\n\n"]):
if type == "actor":
messages = [
{
"role": "system",
"content": prior_prompt["intro"]
+ (
"\nYou are given few solved examples to help you understand your task.\n"
if actor_num_examples > 0
else ""
),
},
]
for ex_num, example in enumerate(prior_prompt["examples"]):
i = 1
for interaction in example:
messages += [
{
"role": "system",
"name": "example_user",
"content": (
f"### Instruction:\n{example[0]['ob']}\n\n"
if i == 1
else ""
)
+ f"### Observation {i}:\n"
+ interaction["ob"],
},
{
"role": "system",
"name": "example_assistant",
"content": f"### Action {i}:\n" + interaction["act"],
},
]
i += 1
i = 1
for interaction in cur_traj[1:-1]:
messages += [
{
"role": "user",
"content": (
f"### Instruction:\n{cur_traj[0]['ob']}\n\n" if i == 1 else ""
)
+ f"### Observation {i}:\n"
+ interaction["ob"],
},
{
"role": "assistant",
"content": f"### Action {i}:\n{interaction['act']}",
},
]
i += 1
messages += [
{
"role": "user",
"content": (
f"### Instruction:\n{cur_traj[0]['ob']}\n\n"
if len(cur_traj) <= 2
else ""
)
+ f"### Observation {i}:\n"
+ cur_traj[-1]["ob"],
}
]
elif type == "summarizer":
messages = [
{"role": "system", "content": prior_prompt["intro"]},
]
for ex_num, example in enumerate(prior_prompt["examples"]):
messages += [
{
"role": "system",
"name": "example_user",
"content": f"### Instruction:\n{example['instr']}\n\n### Previous Action of Agent:\n{example['act']}\n\n### Observation:\n{example['ob']}",
},
{
"role": "system",
"name": "example_assistant",
"content": f"### Condensed Observation:\n" + example["resp"],
},
]
messages += [
{
"role": "user",
"content": f"### Instruction:\n{cur_traj[0]['ob']}\n\n### Previous Action of Agent:\n{cur_traj[-2]['act'] if cur_traj[-2]['act'] != '' else 'None'}\n\n### Observation:\n{cur_traj[-1]['ob']}",
},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop,
)
return response["choices"][0]["message"]["content"]
def ob2summary(prior_prompt, cur_traj, llm):
try:
summary = llm(
prior_prompt, cur_traj, type="summarizer", max_tokens=500, stop=["\n\n"]
).strip()
template = r"info\[[\S\s]+\]"
for res in re.finditer(template, summary):
info = res.group()
if info.startswith("info["):
info = info[5:]
if info.endswith("]"):
info = info[:-1]
if "[Search]" in info:
idx = info.find("[Search]")
info = info[: idx + len("[Search]")]
if "[Buy Now]" in info:
idx = info.find("[Buy Now]")
info = info[: idx + len("[Buy Now]")]
return info, None, summary
return None, f"no summary found in {summary}", summary
except Exception as e:
return None, e, None
def summary2act(prior_prompt, cur_traj, llm, env_info):
try:
act = llm(prior_prompt, cur_traj, type="actor", stop=["\n\n"]).strip()
available_acts = ["search", "click", "think"]
template = "(" + "|".join(available_acts) + r")\[[^\[\]]+\]"
for res in re.finditer(template, act):
act_str = res.group().strip()
if act_str.startswith("think"):
return act_str, None, act
elif act_str.startswith("search"):
if env_info["has_search_bar"]:
return act_str, None, act
elif act_str.startswith("click"):
if act_str.lower() in list(
map(lambda x: f"click[{x}]", env_info["clickables"])
):
return act_str, None, act
return None, f"no act found in {act}", act
except Exception as e:
return None, e, None
def process_instruction(instr):
template = r"(?<=Instruction:)([\S\s]+)(?=\n)"
res = re.match(template, instr)
if res is None:
return instr.strip()
return res.group(0).strip()
# Get actor, summary prompts for chatgpt
import sys
sys.path.append("../prompts")
from chatgpt_prompts import actor_prompt, summarizer_prompt
from tqdm import tqdm
resume_idx = 184
llm = chatgpt
max_fail = 5
max_iter = 20
seed = 501
runs = 500
use_ash = True # Make this False if you need ReAct
retain_latest_k = -1
if retain_latest_k > 0:
actor_prompt["intro"] = actor_prompt["intro"].format(
"Only the last {} Observation - Action cycle(s) is shown to you.".format(
retain_latest_k
)
)
else:
actor_prompt["intro"] = actor_prompt["intro"].format("")
# Change filename accordingly here
log_file = open("./log_ash_2_ex_full", "a+")
for i in tqdm(range(resume_idx, runs)):
done = False
counter = 0
invalid_counter = 0
traj = []
rew = 0
need_summary = True
term_status = None
act = ""
# print(f'Instance {i}: seed {seed+i}')
print(f"Instance {i}: seed {seed+i}", file=log_file, flush=True)
random.seed(seed + i)
session = "".join(random.choices(string.ascii_lowercase, k=10))
(ob, _) = env.reset(session=session)
info = env.get_available_actions()
# Instruction with blank action added as the first interaction
ob = "\n".join(ob.strip().split("\n\n"))
traj.append({"ob": process_instruction(ob), "act": ""})
print(f'### Instruction:\n{traj[0]["ob"]}\n', file=log_file, flush=True)
while True:
# terminate if max_iter reached
counter += 1
# Observation
traj.append({"ob": ob, "act": ""})
# print(f'### Observation {counter}:\n{ob}\n')
print(f"### Observation {counter+1}:\n{ob}\n", file=log_file, flush=True)
# Get summary of observation, if needed
if use_ash and need_summary:
summary, err, summary_with_reason = ob2summary(summarizer_prompt, traj, llm)
if err is not None:
# raise Exception(f'Error in ob2summary() of trajectory {seed}:', err)
rew = 0
term_status = "summarizer_error"
break
if ob.endswith("\nThought Through."):
summary += "\nThought Through."
if ob.endswith("\nInvalid Action."):
summary += "\nInvalid Action."
# print(f'### Summary {counter}:\n{summary}\n')
print(f"### Summary {counter+1}:\n{summary}\n", file=log_file, flush=True)
traj[-1]["ob"] = summary
ob = summary
# print(traj)
# Get action
act, err, act_full = summary2act(actor_prompt, traj, llm, info)
if err is not None and (not isinstance(err, str) or "no act found" not in err):
# raise Exception(f'Error in summary2act() of trajectory {seed}:', err)
rew = 0
term_status = "max_token_limit"
break
if ob.endswith("\nThought Through."):
ob = ob[: -len("\nThought Through.")]
if ob.endswith("\nInvalid Action."):
ob = ob[: -len("\nInvalid Action.")]
# Case by case analysis for action
if err is not None and "no act found" in err:
act = act_full
ob = ob + "\nInvalid Action."
need_summary = True
invalid_counter += 1
if invalid_counter >= max_fail:
rew = 0
term_status = "max_fail"
break
# print(f'### Action {counter+1}: {act} (invalid)\nn')
print(
f"### Action {counter+1}:\n{act} (invalid)\n", file=log_file, flush=True
)
else:
invalid_counter = 0
if act.startswith("think"):
ob = ob + "\nThought Through."
need_summary = True
else:
ob, rew, done, _ = env.step(act)
ob = "\n".join(ob.strip().split("\n\n"))
info = env.get_available_actions()
need_summary = True
# print(f'### Action {counter+1}: {act}\n')
print(f"### Action {counter+1}:\n{act}\n", file=log_file, flush=True)
if done:
break
traj[-1]["act"] = act
if counter >= max_iter:
rew = 0
term_status = "max_iter"
break
# print('reward', rew, 'term_status', term_status)
print(
"reward",
rew,
"term_status",
term_status,
"\n===============================\n\n",
file=log_file,
flush=True,
)
| [
"\nYou are given few solved examples to help you understand your task.\n",
"### Observation PLACEHOLDER:\n",
"intro",
"### Instruction:\nPLACEHOLDER\n\n### Previous Action of Agent:\nPLACEHOLDER\n\n### Observation:\nPLACEHOLDER",
"(?<=Instruction:)([\\S\\s]+)(?=\\n)",
"None",
")\\[[^\\[\\]]+\\]",
"### Action PLACEHOLDER:\nPLACEHOLDER",
"info\\[[\\S\\s]+\\]",
"### Condensed Observation:\nPLACEHOLDER"
] |
2024-01-10 | EdrumZ/AudioGPT | audio-chatgpt.py | import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural'))
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
AUDIO_CHATGPT_PREFIX = """AudioGPT
AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files.
AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated.
Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
TOOLS:
------
AudioGPT has access to the following tools:"""
AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists.
You will remember to provide the audio file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Thought: Do I need to use a tool? {agent_scratchpad}"""
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
else:
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs)
def merge_audio(audio_path_1, audio_path_2):
merged_signal = []
sr_1, signal_1 = wavfile.read(audio_path_1)
sr_2, signal_2 = wavfile.read(audio_path_2)
merged_signal.append(signal_1)
merged_signal.append(signal_2)
merged_signal = np.hstack(merged_signal)
merged_signal = np.asarray(merged_signal, dtype=np.int16)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, sr_2, merged_signal)
return audio_filename
class T2I:
def __init__(self, device):
from transformers import AutoModelForCausalLM, AutoTokenizer
from diffusers import StableDiffusionPipeline
from transformers import pipeline
print("Initializing T2I to %s" % device)
self.device = device
self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
self.pipe.to(device)
def inference(self, text):
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
print(f'{text} refined to {refined_text}')
image = self.pipe(refined_text).images[0]
image.save(image_filename)
print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
return image_filename
class ImageCaptioning:
def __init__(self, device):
from transformers import BlipProcessor, BlipForConditionalGeneration
print("Initializing ImageCaptioning to %s" % device)
self.device = device
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
def inference(self, image_path):
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
out = self.model.generate(**inputs)
captions = self.processor.decode(out[0], skip_special_tokens=True)
return captions
class T2A:
def __init__(self, device):
print("Initializing Make-An-Audio to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
SAMPLE_RATE = 16000
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
c = self.sampler.model.get_learned_conditioning(n_samples * [text])
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S = ddim_steps,
conditioning = c,
batch_size = n_samples,
shape = shape,
verbose = False,
unconditional_guidance_scale = scale,
unconditional_conditioning = uc,
x_T = start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = self.select_best_audio(text, wav_list)
return best_wav
def select_best_audio(self, prompt, wav_list):
from wav_evaluation.models.CLAPWrapper import CLAPWrapper
clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml',
use_cuda=torch.cuda.is_available())
text_embeddings = clap_model.get_text_embeddings([prompt])
score_list = []
for data in wav_list:
sr, wav = data
audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True)
score = clap_model.compute_similarity(audio_embeddings, text_embeddings,
use_logit_scale=False).squeeze().cpu().numpy()
score_list.append(score)
max_index = np.array(score_list).argmax()
print(score_list, max_index)
return wav_list[max_index]
def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.txt2audio(
text = text,
H = melbins,
W = mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}")
return audio_filename
class I2A:
def __init__(self, device):
print("Initializing Make-An-Audio-Image to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
SAMPLE_RATE = 16000
n_samples = 1 # only support 1 sample
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
#image = Image.fromarray(image)
image = Image.open(image)
image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0)
image_embedding = self.sampler.model.cond_stage_model.forward_img(image)
c = image_embedding.repeat(n_samples, 1, 1)
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=n_samples,
shape=shape,
verbose=False,
unconditional_guidance_scale=scale,
unconditional_conditioning=uc,
x_T=start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = wav_list[0]
return best_wav
def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.img2audio(
image=image,
H=melbins,
W=mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}")
return audio_filename
class TTS:
def __init__(self, device=None):
from inference.tts.PortaSpeech import TTSInference
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing PortaSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/ps_adv_baseline'
self.set_model_hparams()
self.inferencer = TTSInference(self.hp, device)
def set_model_hparams(self):
set_hparams(exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, text):
self.set_model_hparams()
inp = {"text": text}
out = self.inferencer.infer_once(inp)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, out, samplerate=22050)
return audio_filename
class T2S:
def __init__(self, device= None):
from inference.svs.ds_e2e import DiffSingerE2EInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing DiffSinger to %s" % device)
self.device = device
self.exp_name = 'checkpoints/0831_opencpop_ds1000'
self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml'
self.set_model_hparams()
self.pipe = DiffSingerE2EInfer(self.hp, device)
self.default_inp = {
'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
}
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try:
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.pipe.infer_once(inp)
#if inputs == '' or len(val) < len(key):
# inp = self.default_inp
#else:
# inp = {k:v for k,v in zip(key,val)}
#wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(f"Processed T2S.run, audio_filename: {audio_filename}")
return audio_filename
class t2s_VISinger:
def __init__(self, device=None):
from espnet2.bin.svs_inference import SingingGenerate
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing VISingere to %s" % device)
tag = 'AQuarterMile/opencpop_visinger1'
self.model = SingingGenerate.from_pretrained(
model_tag=str_or_none(tag),
device=device,
)
phn_dur = [[0. , 0.219 ],
[0.219 , 0.50599998],
[0.50599998, 0.71399999],
[0.71399999, 1.097 ],
[1.097 , 1.28799999],
[1.28799999, 1.98300004],
[1.98300004, 7.10500002],
[7.10500002, 7.60400009]]
phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP']
score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']]
tempo = 70
tmp = {}
tmp["label"] = phn_dur, phn
tmp["score"] = tempo, score
self.default_inp = tmp
def inference(self, inputs):
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try: # TODO: input will be update
inp = {k: v for k, v in zip(key, val)}
wav = self.model(text=inp)["wav"]
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.model(text=inp)["wav"]
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, wav, samplerate=self.model.fs)
return audio_filename
class TTS_OOD:
def __init__(self, device):
from inference.tts.GenerSpeech import GenerSpeechInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing GenerSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/GenerSpeech'
self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml'
self.set_model_hparams()
self.pipe = GenerSpeechInfer(self.hp, device)
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy'
if os.path.exists(f0_stats_fn):
hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn)
hp['f0_mean'] = float(hp['f0_mean'])
hp['f0_std'] = float(hp['f0_std'])
hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt'
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
key = ['ref_audio', 'text']
val = inputs.split(",")
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(
f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}")
return audio_filename
class Inpaint:
def __init__(self, device):
print("Initializing Make-An-Audio-inpaint to %s" % device)
self.device = device
self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt')
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
self.cmap_transform = matplotlib.cm.viridis
def _initialize_model_inpaint(self, config, ckpt):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
print(model.device, device, model.cond_stage_model.device)
sampler = DDIMSampler(model)
return sampler
def make_batch_sd(self, mel, mask, num_samples=1):
mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
masked_mel = (1 - mask) * mel
mel = mel * 2 - 1
mask = mask * 2 - 1
masked_mel = masked_mel * 2 -1
batch = {
"mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
"mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples),
"masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
}
return batch
def gen_mel(self, input_audio_path):
SAMPLE_RATE = 16000
sr, ori_wav = wavfile.read(input_audio_path)
print("gen_mel")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def gen_mel_audio(self, input_audio):
SAMPLE_RATE = 16000
sr,ori_wav = input_audio
print("gen_mel_audio")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def show_mel_fn(self, input_audio_path):
crop_len = 500
crop_mel = self.gen_mel(input_audio_path)[:,:crop_len]
color_mel = self.cmap_transform(crop_mel)
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
return image_filename
def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
model = self.sampler.model
prng = np.random.RandomState(seed)
start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
cc = torch.nn.functional.interpolate(batch["mask"],
size=c.shape[-2:])
c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
shape = (c.shape[1]-1,)+c.shape[2:]
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=c.shape[0],
shape=shape,
verbose=False)
x_samples_ddim = model.decode_first_stage(samples_ddim)
mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
inpainted = (1-mask)*mel+mask*predicted_mel
inpainted = inpainted.cpu().numpy().squeeze()
inapint_wav = self.vocoder.vocode(inpainted)
return inpainted, inapint_wav
def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100):
SAMPLE_RATE = 16000
torch.set_grad_enabled(False)
mel_img = Image.open(mel_and_mask['image'])
mask_img = Image.open(mel_and_mask["mask"])
show_mel = np.array(mel_img.convert("L"))/255
mask = np.array(mask_img.convert("L"))/255
mel_bins,mel_len = 80,848
input_mel = self.gen_mel_audio(input_audio)[:,:mel_len]
mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)
print(mask.shape,input_mel.shape)
with torch.no_grad():
batch = self.make_batch_sd(input_mel,mask,num_samples=1)
inpainted,gen_wav = self.inpaint(
batch=batch,
seed=seed,
ddim_steps=ddim_steps,
num_samples=1,
H=mel_bins, W=mel_len
)
inpainted = inpainted[:,:show_mel.shape[1]]
color_mel = self.cmap_transform(inpainted)
input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, gen_wav, samplerate = 16000)
return image_filename, audio_filename
class ASR:
def __init__(self, device):
print("Initializing Whisper to %s" % device)
self.device = device
self.model = whisper.load_model("base", device=device)
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(self.device)
_, probs = self.model.detect_language(mel)
options = whisper.DecodingOptions()
result = whisper.decode(self.model, mel, options)
return result.text
def translate_english(self, audio_path):
audio = self.model.transcribe(audio_path, language='English')
return audio['text']
class A2T:
def __init__(self, device):
from audio_to_text.inference_waveform import AudioCapModel
print("Initializing Audio-To-Text Model to %s" % device)
self.device = device
self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm")
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
caption_text = self.model(audio)
return caption_text[0]
class GeneFace:
def __init__(self, device=None):
print("Initializing GeneFace model to %s" % device)
from audio_to_face.GeneFace_binding import GeneFaceInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.geneface_model = GeneFaceInfer(device)
print("Loaded GeneFace model")
def inference(self, audio_path):
audio_base_name = os.path.basename(audio_path)[:-4]
out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4")
inp = {
'audio_source_name': audio_path,
'out_npy_name': f'geneface/tmp/{audio_base_name}.npy',
'cond_name': f'geneface/tmp/{audio_base_name}.npy',
'out_video_name': out_video_name,
'tmp_imgs_dir': f'video/tmp_imgs',
}
self.geneface_model.infer_once(inp)
return out_video_name
class SoundDetection:
def __init__(self, device):
self.device = device
self.sample_rate = 32000
self.window_size = 1024
self.hop_size = 320
self.mel_bins = 64
self.fmin = 50
self.fmax = 14000
self.model_type = 'PVT'
self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth'
self.classes_num = detection_config.classes_num
self.labels = detection_config.labels
self.frames_per_second = self.sample_rate // self.hop_size
# Model = eval(self.model_type)
self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size,
hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,
classes_num=self.classes_num)
checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
self.model.load_state_dict(checkpoint['model'])
self.model.to(device)
def inference(self, audio_path):
# Forward
(waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = torch.from_numpy(waveform)
waveform = waveform.to(self.device)
# Forward
with torch.no_grad():
self.model.eval()
batch_output_dict = self.model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
# print('Sound event detection result (time_steps x classes_num): {}'.format(
# framewise_output.shape))
import numpy as np
import matplotlib.pyplot as plt
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size,
hop_length=self.hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
plt.savefig(image_filename)
return image_filename
class SoundExtraction:
def __init__(self, device):
from sound_extraction.model.LASSNet import LASSNet
from sound_extraction.utils.stft import STFT
import torch.nn as nn
self.device = device
self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt'
self.stft = STFT()
self.model = nn.DataParallel(LASSNet(device)).to(device)
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['model'])
self.model.eval()
def inference(self, inputs):
#key = ['ref_audio', 'text']
from sound_extraction.utils.wav_io import load_wav, save_wav
val = inputs.split(",")
audio_path = val[0] # audio_path, text
text = val[1]
waveform = load_wav(audio_path)
waveform = torch.tensor(waveform).transpose(1,0)
mixed_mag, mixed_phase = self.stft.transform(waveform)
text_query = ['[CLS] ' + text]
mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device)
est_mask = self.model(mixed_mag, text_query)
est_mag = est_mask * mixed_mag
est_mag = est_mag.squeeze(1)
est_mag = est_mag.permute(0, 2, 1)
est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase)
est_wav = est_wav.squeeze(0).squeeze(0).numpy()
#est_path = f'output/est{i}.wav'
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
print('audio_filename ', audio_filename)
save_wav(est_wav, audio_filename)
return audio_filename
class Binaural:
def __init__(self, device):
from src.models import BinauralNetwork
self.device = device
self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net'
self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions2.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions3.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions4.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions5.txt']
self.net = BinauralNetwork(view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
)
self.net.load_from_file(self.model_file)
self.sr = 48000
def inference(self, audio_path):
mono, sr = librosa.load(path=audio_path, sr=self.sr, mono=True)
mono = torch.from_numpy(mono)
mono = mono.unsqueeze(0)
import numpy as np
import random
rand_int = random.randint(0,4)
view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32)
view = torch.from_numpy(view)
if not view.shape[-1] * 400 == mono.shape[-1]:
mono = mono[:,:(mono.shape[-1]//400)*400] #
if view.shape[1]*400 > mono.shape[1]:
m_a = view.shape[1] - mono.shape[-1]//400
rand_st = random.randint(0,m_a)
view = view[:,m_a:m_a+(mono.shape[-1]//400)] #
# binauralize and save output
self.net.eval().to(self.device)
mono, view = mono.to(self.device), view.to(self.device)
chunk_size = 48000 # forward in chunks of 1s
rec_field = 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field
rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies
chunks = [
{
"mono": mono[:, max(0, i-rec_field):i+chunk_size],
"view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
}
for i in range(0, mono.shape[-1], chunk_size)
]
for i, chunk in enumerate(chunks):
with torch.no_grad():
mono = chunk["mono"].unsqueeze(0)
view = chunk["view"].unsqueeze(0)
binaural = self.net(mono, view).squeeze(0)
if i > 0:
binaural = binaural[:, -(mono.shape[-1]-rec_field):]
chunk["binaural"] = binaural
binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1)
binaural = torch.clamp(binaural, min=-1, max=1).cpu()
#binaural = chunked_forwarding(net, mono, view)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
import torchaudio
torchaudio.save(audio_filename, binaural, sr)
#soundfile.write(audio_filename, binaural, samplerate = 48000)
print(f"Processed Binaural.run, audio_filename: {audio_filename}")
return audio_filename
class TargetSoundDetection:
def __init__(self, device):
from target_sound_detection.src import models as tsd_models
from target_sound_detection.src.models import event_labels
self.device = device
self.MEL_ARGS = {
'n_mels': 64,
'n_fft': 2048,
'hop_length': int(22050 * 20 / 1000),
'win_length': int(22050 * 40 / 1000)
}
self.EPS = np.spacing(1)
self.clip_model, _ = clip.load("ViT-B/32", device=self.device)
self.event_labels = event_labels
self.id_to_event = {i : label for i, label in enumerate(self.event_labels)}
config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu')
config_parameters = dict(config)
config_parameters['tao'] = 0.6
if 'thres' not in config_parameters.keys():
config_parameters['thres'] = 0.5
if 'time_resolution' not in config_parameters.keys():
config_parameters['time_resolution'] = 125
model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'
, map_location=lambda storage, loc: storage) # load parameter
self.model = getattr(tsd_models, config_parameters['model'])(config_parameters,
inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args'])
self.model.load_state_dict(model_parameters)
self.model = self.model.to(self.device).eval()
self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')
self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')
def extract_feature(self, fname):
import soundfile as sf
y, sr = sf.read(fname, dtype='float32')
print('y ', y.shape)
ti = y.shape[0]/sr
if y.ndim > 1:
y = y.mean(1)
y = librosa.resample(y, sr, 22050)
lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T
return lms_feature,ti
def build_clip(self, text):
text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"]
text_features = self.clip_model.encode_text(text)
return text_features
def cal_similarity(self, target, retrievals):
ans = []
#target =torch.from_numpy(target)
for name in retrievals.keys():
tmp = retrievals[name]
#tmp = torch.from_numpy(tmp)
s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0)
ans.append(s.item())
return ans.index(max(ans))
def inference(self, text, audio_path):
from target_sound_detection.src.utils import median_filter, decode_with_timestamps
target_emb = self.build_clip(text) # torch type
idx = self.cal_similarity(target_emb, self.re_embeds)
target_event = self.id_to_event[idx]
embedding = self.ref_mel[target_event]
embedding = torch.from_numpy(embedding)
embedding = embedding.unsqueeze(0).to(self.device).float()
#print('embedding ', embedding.shape)
inputs,ti = self.extract_feature(audio_path)
#print('ti ', ti)
inputs = torch.from_numpy(inputs)
inputs = inputs.unsqueeze(0).to(self.device).float()
#print('inputs ', inputs.shape)
decision, decision_up, logit = self.model(inputs, embedding)
pred = decision_up.detach().cpu().numpy()
pred = pred[:,:,0]
frame_num = decision_up.shape[1]
time_ratio = ti / frame_num
filtered_pred = median_filter(pred, window_size=1, threshold=0.5)
#print('filtered_pred ', filtered_pred)
time_predictions = []
for index_k in range(filtered_pred.shape[0]):
decoded_pred = []
decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((target_event, 0, 0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
cur_pred = pred[num_batch]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
# print(label_prediction)
for event_label, onset, offset in label_prediction:
time_predictions.append({
'onset': onset*time_ratio,
'offset': offset*time_ratio,})
ans = ''
for i,item in enumerate(time_predictions):
ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + ' end_time: ' + str(item['offset']) + '\t'
#print(ans)
return ans
# class Speech_Enh_SS_SC:
# """Speech Enhancement or Separation in single-channel
# Example usage:
# enh_model = Speech_Enh_SS("cuda")
# enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
# """
# def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"):
# self.model_name = model_name
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=None,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path, ref_channel=0):
# speech, sr = soundfile.read(speech_path)
# speech = speech[:, ref_channel]
# assert speech.dim() == 1
# enh_speech = self.separate_speech(speech[None, ], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
# class Speech_Enh_SS_MC:
# """Speech Enhancement or Separation in multi-channel"""
# def __init__(self, device="cuda", model_name=None, ref_channel=4):
# self.model_name = model_name
# self.ref_channel = ref_channel
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=self.ref_channel,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path):
# speech, sr = soundfile.read(speech_path)
# speech = speech.T
# enh_speech = self.separate_speech(speech[None, ...], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
class Speech_Enh_SS_SC:
"""Speech Enhancement or Separation in single-channel
Example usage:
enh_model = Speech_Enh_SS("cuda")
enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
"""
def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet Enh to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path, ref_channel=0):
speech, sr = soundfile.read(speech_path)
speech = speech[:, ref_channel]
# speech = torch.from_numpy(speech)
# assert speech.dim() == 1
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr)
# return enh_speech[0]
# return enh_speech
# else:
# print("############")
# audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
# audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
# audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class Speech_SS:
def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet SS to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path):
speech, sr = soundfile.read(speech_path)
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0], samplerate=sr)
else:
# print("############")
audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class ConversationBot:
def __init__(self):
print("Initializing AudioGPT")
self.llm = OpenAI(temperature=0)
self.t2i = T2I(device="cuda:1")
self.i2t = ImageCaptioning(device="cuda:0")
self.t2a = T2A(device="cuda:0")
self.tts = TTS(device="cpu")
self.t2s = T2S(device="cpu")
self.i2a = I2A(device="cuda:0")
self.a2t = A2T(device="cpu")
self.asr = ASR(device="cuda:0")
self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0")
# self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0")
self.SS = Speech_SS(device="cuda:0")
self.inpaint = Inpaint(device="cuda:0")
self.tts_ood = TTS_OOD(device="cpu")
self.geneface = GeneFace(device="cuda:0")
self.detection = SoundDetection(device="cpu")
self.binaural = Binaural(device="cuda:0")
self.extraction = SoundExtraction(device="cuda:0")
self.TSD = TargetSoundDetection(device="cuda:0")
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
def init_tools(self, interaction_type):
if interaction_type == 'text':
self.tools = [
Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
"The input to this tool should be a string, representing the text used to generate image. "),
Tool(name="Get Photo Description", func=self.i2t.inference,
description="useful for when you want to know what is inside the photo. receives image_path as input. "
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
# Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), "
# "or separate each speech from the speech mixture (single-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference,
description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Separation In Single-Channel", func=self.SS.inference,
description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
# Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Audio From The Image", func=self.i2a.inference,
description="useful for when you want to generate an audio based on an image."
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn,
description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, "
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Transcribe Speech", func=self.asr.inference,
description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference,
description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
else:
self.tools = [
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
def run_text(self, text, state):
print("===============Running run_text =============")
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Transcribe Speech":
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
image_filename = res['intermediate_steps'][0][1]
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Audio Inpainting":
audio_filename = res['intermediate_steps'][0][0].tool_input
image_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
def run_image_or_audio(self, file, state, txt):
file_type = file.name[-3:]
if file_type == "wav":
print("===============Running run_audio =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# audio_load = whisper.load_audio(file.name)
audio_load, sr = soundfile.read(file.name)
soundfile.write(audio_filename, audio_load, samplerate = sr)
description = self.a2t.inference(audio_filename)
Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
#state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)]
state = state + [(f"*{audio_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False)
else:
print("===============Running run_image =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
print("======>Auto Resize Image...")
img = Image.open(file.name)
width, height = img.size
ratio = min(512 / width, 512 / height)
width_new, height_new = (round(width * ratio), round(height * ratio))
img = img.resize((width_new, height_new))
img = img.convert('RGB')
img.save(image_filename, "PNG")
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
description = self.i2t.inference(image_filename)
Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"*{image_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False)
def speech(self, speech_input, state):
input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
text = self.asr.translate_english(speech_input)
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(response)
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Transcribe Speech":
print("======>Current memory:\n %s" % self.agent.memory)
output_audio_filename = self.tts.inference(res['output'])
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
print("======>Current memory:\n %s" % self.agent.memory)
image_filename = res['intermediate_steps'][0][1]
output_audio_filename = self.tts.inference(res['output'])
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
Res = "The audio file has been generated and the audio is "
output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename)
print(output_audio_filename)
state = state + [(text, response)]
response = res['output']
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
def inpainting(self, state, audio_filename, image_filename):
print("===============Running inpainting =============")
print("Inputs:", state)
print("======>Previous memory:\n %s" % self.agent.memory)
new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename)
AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"*{new_image_filename}*"
output_audio_filename = self.tts.inference(AI_prompt)
self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"Audio Inpainting", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False)
def clear_audio(self):
return gr.Audio.update(value=None, visible=False)
def clear_input_audio(self):
return gr.Audio.update(value=None)
def clear_image(self):
return gr.Image.update(value=None, visible=False)
def clear_video(self):
return gr.Video.update(value=None, visible=False)
def clear_button(self):
return gr.Button.update(visible=False)
if __name__ == '__main__':
bot = ConversationBot()
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
with gr.Row():
gr.Markdown("## AudioGPT")
chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False)
state = gr.State([])
with gr.Row() as select_raws:
with gr.Column(scale=0.7):
interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type')
with gr.Column(scale=0.3, min_width=0):
select = gr.Button("Select")
with gr.Row(visible=False) as text_input_raws:
with gr.Column(scale=0.7):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
with gr.Column(scale=0.1, min_width=0):
run = gr.Button("🏃♂️Run")
with gr.Column(scale=0.1, min_width=0):
clear_txt = gr.Button("🔄Clear️")
with gr.Column(scale=0.1, min_width=0):
btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"])
with gr.Row():
outaudio = gr.Audio(visible=False)
with gr.Row():
with gr.Column(scale=0.3, min_width=0):
outvideo = gr.Video(visible=False)
with gr.Row():
show_mel = gr.Image(type="filepath",tool='sketch',visible=False)
with gr.Row():
run_button = gr.Button("Predict Masked Place",visible=False)
with gr.Row(visible=False) as speech_input_raws:
with gr.Column(scale=0.7):
speech_input = gr.Audio(source="microphone", type="filepath", label="Input")
with gr.Column(scale=0.15, min_width=0):
submit_btn = gr.Button("🏃♂️Submit")
with gr.Column(scale=0.15, min_width=0):
clear_speech = gr.Button("🔄Clear️")
with gr.Row():
speech_output = gr.Audio(label="Output",visible=False)
select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws])
txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
txt.submit(lambda: "", None, txt)
run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
run.click(lambda: "", None, txt)
btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo])
run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button])
clear_txt.click(bot.memory.clear)
clear_txt.click(lambda: [], None, chatbot)
clear_txt.click(lambda: [], None, state)
clear_txt.click(lambda:None, None, txt)
clear_txt.click(bot.clear_button, None, run_button)
clear_txt.click(bot.clear_image, None, show_mel)
clear_txt.click(bot.clear_audio, None, outaudio)
clear_txt.click(bot.clear_video, None, outvideo)
submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo])
clear_speech.click(bot.clear_input_audio, None, speech_input)
clear_speech.click(bot.clear_audio, None, speech_output)
clear_speech.click(lambda: [], None, state)
clear_speech.click(bot.clear_video, None, outvideo)
demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | [
"\nHuman: provide a figure named PLACEHOLDER. The description is: PLACEHOLDER. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"Received. ",
"Here are the predict audio and the mel spectrum.*PLACEHOLDER**PLACEHOLDER*",
"\nHuman: provide an audio named PLACEHOLDER. The description is: PLACEHOLDER. This information helps you to understand this audio, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n"
] |
2024-01-10 | rsanjaykamath/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | fadingNA/non-1b | research3.py | import os
import requests
import json
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from langchain import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from autogen import config_list_from_json
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
from autogen import UserProxyAgent
import autogen
# Load the .env file
load_dotenv()
browserless_api_key = os.getenv("BROWSERLESS_API_KEY")
serper_api_key = os.getenv("SERPER_API_KEY")
airtable_api_key = os.getenv("AIRTABLE_API_KEY")
config_list = config_list_from_json("OAI_CONFIG_LIST.json")
def web_search(input):
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": input,
})
headers = {
'X-API-KEY': serper_api_key,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(f"Response: {response.text}")
return response.text
def summary_text(content, task):
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n"], chunk_size=1000, chunk_overlap=100)
documents = text_splitter.create_documents([content])
llm = ChatOpenAI(model="gpt-3.5-turbo-16k-0613")
map_prompt = """
Write a summary the following text for {task}:
"{text}"
SUMMARY:
"""
map_prompt_template = PromptTemplate(
map_prompt, input_variables=["text", "task"])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt=map_prompt_template,
combine_prompt=map_prompt_template,
verbose=False
)
output = summary_chain.run(input_documents=documents, objective=task)
return output
def web_scraping(objective: str, url: str):
print("Web Scraping...")
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
data = {
"url": url,
}
data_json = json.dumps(data)
response = requests.post(
f"https://chrome.browserless.io/content?token={browserless_api_key}",
headers=headers,
data=data_json)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.get_text()
print("Web Scraping Done! : ", text)
if len(text) > 10000:
output = summary_text(text, objective)
return output
else:
return text
else:
print(f"Error: {response.status_code}")
def get_airtable_records(base_id, table_id):
url = f"https://api.airtable.com/v0/{base_id}/{table_id}"
headers = {
'Authorization': f"Bearer {airtable_api_key}",
}
response = requests.request("GET", url, headers=headers)
return response.json()
def update_single_airtable_record(base_id, table_id, id, fields):
url = f"https://api.airtable.com/v0/{base_id}/{table_id}"
headers = {
'Authorization': f'Bearer {airtable_api_key}',
"Content-Type": "application/json"
}
data = {
"records": [{
"id": id,
"fields": fields
}]
}
response = requests.patch(url, headers=headers, data=json.dumps(data))
data = response.json()
return data
user_proxy = UserProxyAgent(name="user_proxy",
is_termination_msg=lambda msg: "Terminate" in msg["content"],
human_input_mode="ALWAYS",
max_consecutive_auto_reply=1)
researcher = GPTAssistantAgent(name="researcher",
llm_config={
"config_list": config_list,
"assistant_id": "asst_31CUYLlH6hCsvkN87p7jSBzg",
})
researcher.register_function(
function_map={
"web_scraping": web_scraping,
"web_search": web_search
}
)
research_manager = GPTAssistantAgent(name="research_manager",
llm_config={
"config_list": config_list,
"assistant_id": "asst_xLx5q2KxPnOkXhS0ChVPMrsG",
})
research_director = GPTAssistantAgent(name="research_director",
llm_config={
"config_list": config_list,
"assistant_id": "asst_GrQIIfBToByaL7IOeYcQMwib",
})
research_director.register_function(
function_map={
"get_airtable_records": get_airtable_records,
"update_single_airtable_record": update_single_airtable_record,
}
)
group_chat = autogen.GroupChat(agents=[user_proxy, researcher, research_manager, research_director],
messages=[], max_round=10)
group_chat_manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={"config_list": config_list})
message = """
Research the api weather data which one is forecast or historical data
provide. focus on the hourly data.
list: https://airtable.com/appqSQcliGnkSLbaa/tbl74uZ3blk6CEwAE/viwivoHsMjvlq0AB8?blocks=hide
"""
user_proxy.initiate_chat(group_chat_manager, message=message) | [
"\n Write a summary the following text for {task}:\n \"{text}\"\n SUMMARY:\n "
] |
2024-01-10 | notedit/openai-play | hello_world~embeding.py |
from langchain.embeddings import OpenAIEmbeddings
import os
import openai
openai.debug = True
openai.log = 'debug'
openai.api_version = None
os.environ["OPENAI_API_TYPE"] = "open_ai"
text = "This is a test query."
embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002",
)
query_result = embeddings.embed_query(text)
print(query_result)
# openai
# https://tencent-openai01.openai.azure.com/openai/deployments/text-embedding-ada-002/embeddings?api-version=2022-12-01
# https://tencent-openai01.openai.azure.com/openai/deployments/text-embedding-ada-002/embeddings?api-version=2023-03-15-preview
# {'input': ['This is a test query.'], 'engine': 'text-embedding-ada-002'}
# url /openai/deployments/text-embedding-ada-002/embeddings?api-version=2022-12-01
# params {'input': ['This is a test query.'], 'encoding_format': 'base64'}
# headers None
# message='Request to OpenAI API' method=post path=https://tencent-openai01.openai.azure.com/openai/deployments/text-embedding-ada-002/embeddings?api-version=2022-12-01
# api_version=2022-12-01 data='{"input": ["This is a test query."], "encoding_format": "base64"}' message='Post details'
# https://tencent-openai01.openai.azure.com/openai/deployments/text-embedding-ada-002/embeddings?api-version=2022-12-01
# {'X-OpenAI-Client-User-Agent': '{"bindings_version": "0.27.6", "httplib": "requests", "lang": "python", "lang_version": "3.11.2", "platform": "macOS-13.2-arm64-arm-64bit", "publisher": "openai", "uname": "Darwin 22.3.0 Darwin Kernel Version 22.3.0: Thu Jan 5 20:48:54 PST 2023; root:xnu-8792.81.2~2/RELEASE_ARM64_T6000 arm64 arm"}', 'User-Agent': 'OpenAI/v1 PythonBindings/0.27.6', 'api-key': '49eb7c2c3acd41f4ac81fef59ceacbba', 'OpenAI-Debug': 'true', 'Content-Type': 'application/json'}
# {'input': ['This is a test query.'], 'engine': 'text-embedding-ada-002'}
# url /engines/text-embedding-ada-002/embeddings
# params {'input': ['This is a test query.'], 'encoding_format': 'base64'}
# headers None
# message='Request to OpenAI API' method=post path=http://localhost:8080/v1/engines/text-embedding-ada-002/embeddings
# api_version=2022-12-01 data='{"input": ["This is a test query."], "encoding_format": "base64"}' message='Post details'
# http://localhost:8080/v1/engines/text-embedding-ada-002/embeddings
# {'X-OpenAI-Client-User-Agent': '{"bindings_version": "0.27.6", "httplib": "requests", "lang": "python", "lang_version": "3.11.2", "platform": "macOS-13.2-arm64-arm-64bit", "publisher": "openai", "uname": "Darwin 22.3.0 Darwin Kernel Version 22.3.0: Thu Jan 5 20:48:54 PST 2023; root:xnu-8792.81.2~2/RELEASE_ARM64_T6000 arm64 arm"}', 'User-Agent': 'OpenAI/v1 PythonBindings/0.27.6', 'Authorization': 'Bearer 49eb7c2c3acd41f4ac81fef59ceacbba', 'OpenAI-Version': '2022-12-01', 'OpenAI-Debug': 'true', 'Content-Type': 'application/json'}
| [] |
2024-01-10 | notedit/openai-play | fine-tune~fineture.py | import os
import openai
import time
openai.debug = True
openai.log = "debug"
openai.api_key = os.getenv("OPENAI_API_KEY")
training_response = openai.File.create(
file=open("gpt3.5_output.jsonl", "rb"),
purpose='fine-tune'
)
training_file_id = training_response["id"]
print("Training file ID:", training_file_id)
response = openai.FineTuningJob.create(training_file=training_file_id, model="gpt-3.5-turbo")
print("Job ID:", response["id"])
print("Status:", response["status"])
time.sleep(10)
job_id = response["id"]
response = openai.FineTuningJob.retrieve(job_id)
print("Job ID:", response["id"])
print("Status:", response["status"])
print("Trained Tokens:", response["trained_tokens"])
response = openai.FineTuningJob.list_events(id=job_id, limit=50)
events = response["data"]
events.reverse()
for event in events:
print(event["message"])
response = openai.FineTuningJob.retrieve(job_id)
fine_tuned_model_id = response["fine_tuned_model"]
print("Fine-tuned model ID:", fine_tuned_model_id) | [] |
2024-01-10 | annadeichler/transflower-lightning | models~cdvae.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
from models.transformer import BasicTransformerModel, EncDecTransformerModel, EncDecXTransformer
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
# from dalle_pytorch import distributed_utils
# from dalle_pytorch.vae import OpenAIDiscreteVAE
# from dalle_pytorch.vae import VQGanVAE1024
# from dalle_pytorch.transformer import Transformer
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class ConditionalDiscreteVAEVision(nn.Module):
def __init__(
self,
image_shape = (256,256),
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
conditioning_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = ((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
assert log2(image_shape[0]).is_integer(), 'image size must be a power of 2'
assert log2(image_shape[1]).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_shape = image_shape
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
if not has_resblocks:
dec_init_chan = codebook_dim
else:
dec_init_chan = dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
# self._register_external_parameters()
# def _register_external_parameters(self):
# """Register external parameters for DeepSpeed partitioning."""
# if (
# not distributed_utils.is_distributed
# or not distributed_utils.using_backend(
# distributed_utils.DeepSpeedBackend)
# ):
# return
#
# deepspeed = distributed_utils.backend.backend_module
# deepspeed.zero.register_external_parameters(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_shape, kl_div_loss_weight = img.device, self.num_tokens, self.image_shape, self.kl_div_loss_weight
assert img.shape[-1] == image_shape[1] and img.shape[-2] == image_shape[0], f'input must have the correct image size {image_shape[0]}x{image_shape[1]}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
class ConditionalDiscreteVAE(nn.Module):
def __init__(
self,
input_shape = (256,256),
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
cond_dim = 0,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = None,
prior_nhead = 8,
prior_dhid = 512,
prior_nlayers = 8,
prior_dropout = 0,
prior_use_pos_emb = True,
prior_use_x_transformers = False,
opt = None,
cond_vae = False
):
super().__init__()
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.input_shape = input_shape
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
self.cond_dim = cond_dim
self.cond_vae = cond_vae
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
if cond_vae:
enc_chans = [channels + cond_dim, *enc_chans]
else:
enc_chans = [channels, *enc_chans]
if not has_resblocks:
if cond_vae:
dec_init_chan = codebook_dim + cond_dim
else:
dec_init_chan = codebook_dim
else:
dec_init_chan = dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
if input_shape[0] == 1:
kernel_size1 = 1
padding_size1 = 0
codebook_layer_shape1 = 1
elif input_shape[0] in [2,3,4]:
kernel_size1 = 3
padding_size1 = 1
codebook_layer_shape1 = input_shape[0]
else:
#kernel_size1 = 4
kernel_size1 = 3
padding_size1 = 1
#codebook_layer_shape1 = input_shape[0] - num_layers
codebook_layer_shape1 = input_shape[0]
if input_shape[1] == 1:
kernel_size2 = 1
padding_size2 = 0
codebook_layer_shape2 = 1
elif input_shape[1] in [2,3,4]:
kernel_size2 = 3
padding_size2 = 1
codebook_layer_shape2 = input_shape[1]
else:
#kernel_size2 = 4
kernel_size2 = 3
padding_size2 = 1
#codebook_layer_shape2 = input_shape[1] - num_layers
codebook_layer_shape2 = input_shape[1]
self.codebook_layer_shape = (codebook_layer_shape1,codebook_layer_shape2)
kernel_shape = (kernel_size1, kernel_size2)
padding_shape = (padding_size1, padding_size2)
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, kernel_shape, stride = 1, padding = padding_shape), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, kernel_shape, stride = 1, padding = padding_shape), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
if cond_vae:
dec_layers.insert(0, nn.Conv2d(codebook_dim + cond_dim, dec_chans[1], 1))
else:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.cond_upsampler = torch.nn.Upsample(size=input_shape) #upsampler to feed the conditioning to the input of the encoder
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
latent_size = codebook_layer_shape1*codebook_layer_shape2
self.latent_size = latent_size
if cond_dim > 0:
self.prior_transformer = ContDiscTransformer(cond_dim, num_tokens, codebook_dim, prior_nhead, prior_dhid, prior_nlayers, prior_dropout,
use_pos_emb=prior_use_pos_emb,
src_length=latent_size,
tgt_length=latent_size,
use_x_transformers=prior_use_x_transformers,
opt=opt)
# self._register_external_parameters()
# def _register_external_parameters(self):
# """Register external parameters for DeepSpeed partitioning."""
# if (
# not distributed_utils.is_distributed
# or not distributed_utils.using_backend(
# distributed_utils.DeepSpeedBackend)
# ):
# return
#
# deepspeed = distributed_utils.backend.backend_module
# deepspeed.zero.register_external_parameters(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, inputs, cond=None):
logits = self(inputs, cond, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq,
cond = None
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
if cond is not None:
image_embeds_cond = torch.cat([image_embeds, cond], dim = 1)
images = self.decoder(image_embeds_cond)
else:
images = self.decoder(image_embeds)
return images
def prior_logp(
self,
inputs,
cond = None,
return_accuracy = False,
detach_cond = False
):
# import pdb;pdb.set_trace()
#if cond is None: raise NotImplementedError("Haven't implemented non-conditional DVAEs")
if len(inputs.shape) == 3:
inputs = inputs.reshape(inputs.shape[0], inputs.shape[1],*self.input_shape)
if len(cond.shape) == 3:
cond = cond.reshape(cond.shape[0], cond.shape[1],*self.codebook_layer_shape)
with torch.no_grad():
if self.cond_vae:
labels = self.get_codebook_indices(inputs, cond)
else:
labels = self.get_codebook_indices(inputs)
if detach_cond:
cond = cond.detach()
logits = self.prior_transformer(cond.squeeze(-1).permute(2,0,1), labels.permute(1,0)).permute(1,2,0)
loss = F.cross_entropy(logits, labels)
if not return_accuracy:
return loss
# import pdb;pdb.set_trace()
predicted = logits.argmax(dim = 1).flatten(1)
accuracy = (predicted == labels).sum()/predicted.nelement()
return loss, accuracy
def generate(self, cond, temp=1.0, filter_thres = 0.5):
#if cond is None: raise NotImplementedError("Haven't implemented non-conditional DVAEs")
if len(cond.shape) == 3:
cond = cond.reshape(cond.shape[0], cond.shape[1],*self.codebook_layer_shape)
dummy = torch.zeros(1,1).long().to(cond.device)
tokens = []
for i in range(self.latent_size):
# print(i)
logits = self.prior_transformer(cond.squeeze(-1).permute(2,0,1), torch.cat(tokens+[dummy], 0)).permute(1,2,0)[:,-1,:]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temp, dim = -1)
sampled = torch.multinomial(probs, 1)
tokens.append(sampled)
print(tokens)
embs = self.codebook(torch.cat(tokens, 0))
# import pdb;pdb.set_trace()
if self.cond_vae:
sampled_cond = torch.cat([embs.permute(2,0,1).unsqueeze(0),cond], dim=1)
else:
sampled_cond = embs.permute(2,0,1).unsqueeze(0)
out = self.decoder(sampled_cond)
return out
def forward(
self,
inp,
cond = None,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
if len(inp.shape) == 3:
inp = inp.reshape(inp.shape[0], inp.shape[1],*self.input_shape)
device, num_tokens, input_shape, kl_div_loss_weight = inp.device, self.num_tokens, self.input_shape, self.kl_div_loss_weight
assert inp.shape[-1] == input_shape[1] and inp.shape[-2] == input_shape[0], f'input must have the correct image size {input_shape[0]}x{input_shape[1]}. Instead got {inp.shape[0]}x{inp.shape[1]}'
inp = self.norm(inp)
if cond is not None:
if len(cond.shape) == 3:
cond = cond.reshape(cond.shape[0], cond.shape[1],*self.codebook_layer_shape)
cond_upsampled = self.cond_upsampler(cond)
inp_cond = torch.cat([inp,cond_upsampled], dim=1)
inp_cond = self.norm(inp_cond)
else:
inp_cond = self.norm(inp)
logits = self.encoder(inp_cond)
# codebook_indices = logits.argmax(dim = 1).flatten(1)
# print(codebook_indices.shape)
# print(codebook_indices)
# print(list(self.encoder.parameters())[1].data)
# for p in self.prior_transformer.parameters():
# print(p.norm())
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
if cond is not None:
sampled_cond = torch.cat([sampled,cond], dim=1)
out = self.decoder(sampled_cond)
else:
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
# import pdb;pdb.set_trace()
recon_loss = self.loss_fn(inp, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
class ContDiscTransformer(nn.Module):
def __init__(self, src_d, tgt_num_tokens, tgt_emb_dim, nhead, dhid, nlayers, dropout=0.5,use_pos_emb=False,src_length=0,tgt_length=0,use_x_transformers=False,opt=None):
super(ContDiscTransformer, self).__init__()
self.transformer = EncDecTransformerModel(tgt_num_tokens, src_d, tgt_emb_dim, nhead, dhid, nlayers, dropout=dropout,use_pos_emb=use_pos_emb,src_length=src_length,tgt_length=tgt_length,use_x_transformers=use_x_transformers,opt=opt)
#self.transformer = EncDecTransformerModel(tgt_num_tokens, src_d, tgt_emb_dim, nhead, dhid, nlayers, dropout=dropout,use_pos_emb=False,src_length=src_length,tgt_length=tgt_length,use_x_transformers=use_x_transformers,opt=opt)
# self.transformer = EncDecXTransformer(dim=dhid, dec_dim_out=tgt_num_tokens, enc_dim_in=src_d, enc_dim_out=tgt_emb_dim, dec_din_in=tgt_emb_dim, enc_heads=nhead, dec_heads=nhead, enc_depth=nlayers, dec_depth=nlayers, enc_dropout=dropout, dec_dropout=dropout, enc_max_seq_len=1024, dec_max_seq_len=1024)
self.embedding = nn.Embedding(tgt_num_tokens, tgt_emb_dim)
self.first_input = nn.Parameter((torch.randn(1,1,tgt_emb_dim)))
def forward(self, src, tgt):
tgt = tgt[:-1]
embs = self.embedding(tgt)
embs = torch.cat([torch.tile(self.first_input, (1,embs.shape[1],1)), embs], 0)
output = self.transformer(src,embs)
return output
| [] |
2024-01-10 | nikhilmenon06/big-ai-summit | app_chat.py | import streamlit as st
from streamlit_chat import message
from streamlit_option_menu import option_menu
from utils import get_initial_message, get_chatgpt_response, update_chat
import os
from dotenv import load_dotenv
from jinja2 import Template
import openai
from dataclasses import dataclass
load_dotenv()
@dataclass
class Const:
MODEL_NAME: str = "gpt-4"
GUILD_RULES_NAME: str = "Assassins_Guilds_Rules_2023_v06-New"
openai.api_key = os.getenv('OPENAI_API_KEY')
# For streamlit deployment, the api key is added to streamlit-secrets in the app settings (during/after delpoyment)
# openai.api_key = st.secrets["OPENAI_API_KEY"]
def main():
st.set_page_config(page_title="Chatbot Application", page_icon=":robot_face:", layout="centered")
st.image('assets/big_ai.png', width=700)
selected_page = option_menu(None, ["Editor", "Chat"], icons=['edit', 'comments'], menu_icon="bars", default_index=0,
orientation="horizontal", styles={"nav-link-selected": {"background-color": "#7D9338"}})
if selected_page == "Editor":
editor()
elif selected_page == "Chat":
chat()
def chat():
print("On Chat...................\n")
print(st.session_state.prompt)
if 'prompt' not in st.session_state:
st.session_state.prompt = ""
if 'query' not in st.session_state:
st.session_state.query = ''
if st.session_state.bug_flag == 1:
st.warning("Oops, your previous message was sent to the model again", icon="🤖")
def submit():
st.session_state.query = st.session_state.input
st.session_state.input = ''
model = Const.MODEL_NAME
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
def display():
for i in range(len(st.session_state['generated']) - 1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
st.text_input("Query: ", key="input", on_change=submit)
if 'messages' not in st.session_state:
st.session_state['messages'] = get_initial_message()
st.session_state['messages'] = update_chat(st.session_state['messages'], "system", st.session_state['prompt'])
if st.session_state.query:
with st.spinner("generating..."):
messages = st.session_state['messages']
messages = update_chat(messages, "user", st.session_state.query)
# st.write("Before making the API call")
# st.write(messages)
response = get_chatgpt_response(messages, model)
messages = update_chat(messages, "assistant", response)
st.session_state.past.append(st.session_state.query)
st.session_state.generated.append(response)
display()
# st.experimental_rerun()
def editor():
def filename_display(filename: str):
filename, ext = os.path.splitext(filename)
split_filename = filename.split("_")
mod_filename = ""
for part in split_filename:
mod_filename = mod_filename + part.capitalize() + " "
mod_filename = mod_filename.strip()
return mod_filename
def update_text_area(selected_rag_contents_pass):
guild_rules_clean = Const.GUILD_RULES_NAME.lower().lower().replace('-', '_')
template = Template(st.session_state["text_widget"].replace(Const.GUILD_RULES_NAME, guild_rules_clean))
prompt_template_jinja_variable = guild_rules_clean
dict_to_render = {prompt_template_jinja_variable: selected_rag_contents_pass}
st.session_state.text_widget = template.render(dict_to_render)
st.session_state.prompt = st.session_state.text_widget
# print("Prompt session variable after callback")
# print(st.session_state.prompt)
# print("Text Widget session variable after callback")
# print(st.session_state.text_widget)
if 'generated' in st.session_state:
st.session_state.bug_flag = 1
# st.warning("Oops, your previous message was sent to the model again", icon = "🤖")
else:
st.session_state.bug_flag = 0
if 'prompt' not in st.session_state:
st.session_state.prompt = ""
if 'text_widget' not in st.session_state:
st.session_state.text_widget = ""
script_dir = os.path.dirname(os.path.abspath(__file__))
# Path to /prompts subdirectory
prompts_dir = os.path.join(script_dir, 'prompts')
# List all files in the /prompts directory
files = [f for f in os.listdir(prompts_dir) if os.path.isfile(os.path.join(prompts_dir, f))]
# Add a select box for all files
selected_file = st.selectbox('Select a file:', files, format_func=filename_display)
selected_file_path = os.path.join(prompts_dir, selected_file)
with open(selected_file_path, 'r') as prompt_template_file:
prompt_template_file_content = prompt_template_file.read()
# template = Template(prompt_template_file_content.replace(Const.GUILD_RULES_NAME,
# Const.GUILD_RULES_NAME.lower().lower().replace('-',
# '_')))
# st.session_state.prompt = prompt_template_file_content
# List all files in the /rules directory
rules_dir = os.path.join(script_dir, 'rules')
files = [f for f in os.listdir(rules_dir) if os.path.isfile(os.path.join(rules_dir, f))]
with st.expander("Select data source"):
selected_rag_file = st.selectbox('', files, format_func=filename_display, index=0)
selected_rag_path = os.path.join(rules_dir, selected_rag_file)
with open(selected_rag_path, "r") as file:
selected_rag_contents = file.read()
st.text_area(label="Write your prompt here:", height=200, key="text_widget", value=prompt_template_file_content)
if st.button("Save Prompt", on_click=update_text_area, args=(selected_rag_contents,)):
st.success("Prompt saved successfully!")
st.markdown(
'Like it? Want to talk? Get in touch! <a href="mailto:[email protected]">Leonid Sokolov !</a> // '
'<a href="mailto:[email protected]">Imke Bewersdorf</a>',
unsafe_allow_html=True)
if __name__ == "__main__":
main()
| [
"text_widget"
] |
2024-01-10 | ahmad2b/cinematch | Home.py | import json
import streamlit as st
from pydantic import ValidationError
from db import create_database_connection, UserOperations, MovieOperations, UserBase
from tmdb_api import MovieDB, MovieResponse
from openai_api import AsyncManager, OpenAIBot, MessageItem
database_engine = create_database_connection()
user_operations = UserOperations(database_engine)
movie_operations = MovieOperations(database_engine)
if "username" not in st.session_state:
st.session_state["username"] = ""
if "user" not in st.session_state:
st.session_state["user"] = None
async_manager = AsyncManager()
movie_database = MovieDB()
openai_bot = OpenAIBot()
st.title("Cinematch: Your Movie Mood Matcher :popcorn:")
st.markdown("---")
st.write(
"""
Cinematch is an interactive movie recommendation system built using Streamlit and the OpenAI/Gemini API.
It leverages the power of AI to provide personalized movie recommendations based on user preferences.
"""
)
sidebar = st.sidebar
# Sidebar for user login
with sidebar:
if st.session_state["username"] != "":
st.subheader(f"Welcome, {st.session_state['username']}! :wave:")
if st.button("Logout"):
st.session_state["username"] = ""
st.header("Your Watch-list 🎬")
wishlist = movie_operations.get_movies_for_user(st.session_state["user"].id)
for movie in wishlist:
st.markdown("---")
st.image(f"https://image.tmdb.org/t/p/w500{movie.image}", width=80)
st.markdown(f"**{movie.title}**")
if st.button("Remove ❌", key=f"delete_{movie.id}"):
movie_operations.delete_movie_by_id(
st.session_state["user"].id, movie.id
)
st.success(f"**{movie.title}** has been removed from your watch-list.")
else:
if "show_form" not in st.session_state:
st.session_state["show_form"] = "login"
if st.button("Join Cinematch! 🎉"):
st.session_state["show_form"] = "signup"
if st.button("Already a member? Login 🔑"):
st.session_state["show_form"] = "login"
if st.session_state["show_form"] == "login":
with st.form(key="login_form"):
st.subheader("Login")
st.write("Please enter your username and password to login.")
username = st.text_input("👤 Username")
password = st.text_input("🔒 Password", type="password")
submit_button = st.form_submit_button("🚀 Login")
if submit_button:
# Authenticate user
response = user_operations.authenticate_user(username, password)
if response["status"] == "success":
st.session_state["username"] = username
st.session_state["user"] = response["user"]
st.success(f"{username} Logged in successfully! 🎉")
else:
st.error(response["message"])
elif st.session_state["show_form"] == "signup":
with st.form(key="signup_form"):
st.subheader("Sign Up")
st.write("Please fill in the following details to create an account.")
username = st.text_input("👤 Username")
password = st.text_input("🔒 Password", type="password")
submit_button = st.form_submit_button(":door: Sign Up")
if submit_button:
try:
# Create user
user_data = UserBase(username=username, password=password)
response = user_operations.register_new_user(user_data)
if response["status"] == "success":
st.success(
f"{username} Registered successfully! \n Please login to continue"
)
else:
st.error(response["message"])
except ValidationError as e:
for error in e.errors():
st.error(
f"Validation error in field '{error['loc'][0]}': {error['msg']}"
)
# User input for movie preferences
st.header("Help us help you find your next movie :tv:")
available_movie_genres = movie_database.get_movie_genres()
list_of_genres = [genre.name for genre in available_movie_genres.genres]
with st.form(key="movie_preferences_form"):
selected_movie_genres = st.multiselect(
"What's your flavor for the day? :icecream:",
list_of_genres,
placeholder="Pick a genre, any genre!",
)
user_movie_preference = st.text_input(
"Got a movie idea in mind? :thinking_face:", placeholder="Type it here!"
)
movie_rating_range = st.slider(
"How picky are we feeling today? :sunglasses:", 0.0, 10.0, (0.0, 10.0)
)
submit_button = st.form_submit_button("Find my movie match! :heart:")
# Button to start the recommendation process
if submit_button:
# get keywords and query from the openai bot
message = f"{[selected_movie_genres]} + {[user_movie_preference]} + {[str(movie_rating_range[0]), str(movie_rating_range[1])]}"
query = openai_bot.send_message(message)
# print("query: ", query)
if openai_bot.isCompleted():
# print("completed: ")
_response: MessageItem = openai_bot.get_lastest_response()
# print("response: ", _response.content)
content = _response.content.strip("`").replace("json", "").strip()
params = json.loads(content)
# st.markdown(params)
movies = movie_database.discover_movies_with_params(params)
# print("movies: ", movies)
# Store the search results in the session state
st.session_state["search_results"] = movies.results
if "search_results" in st.session_state:
for movie in st.session_state["search_results"]:
st.markdown(f"<div >", unsafe_allow_html=True)
# Display movie poster
st.image(
f"https://image.tmdb.org/t/p/w500{movie.poster_path}", width=340
) # Adjust the width as needed
# Display movie title
st.markdown(
f"<div style='font-size: 20px; font-weight: bold; overflow: hidden; text-overflow: ellipsis'>{movie.title}</div>",
unsafe_allow_html=True,
)
# Display movie details
st.markdown(
f"<div style='color: #f4a261; font-size: 16px; margin-top: 8px; margin-bottom: 4px'>Rating: <b>{movie.vote_average}</b></div>",
unsafe_allow_html=True,
)
st.write(movie.release_date)
if st.button(
"Add to my Watch-list! :popcorn:",
key=f"watchlist_button_{movie.title}",
):
if (
"user" in st.session_state
and st.session_state["user"] is not None
):
movie_operations.add_movie_for_user(
st.session_state["user"].id, movie.title, movie.poster_path
)
st.success(
f"**{movie.title}** is on your watch-list! :partying_face:"
)
st.balloons()
else:
st.error(
"You must be logged in to add movies to your watchlist."
)
st.markdown("---")
# Get the movies from the API
movies = movie_database.discover_movies()
st.header("Here are some movies you might fancy :film_projector:")
# Display each movie in a grid
for i in range(
0, len(movies.results), 2
): # Adjust the step size to change the number of columns
cols = st.columns(2) # Adjust the number of columns as needed
for j in range(2): # Adjust the range to match the number of columns
if i + j < len(movies.results):
movie = movies.results[i + j]
with cols[j]:
st.markdown(f"<div >", unsafe_allow_html=True)
# Display movie poster
st.image(
f"https://image.tmdb.org/t/p/w500{movie.poster_path}", width=340
) # Adjust the width as needed
# Display movie title
st.markdown(
f"<div style='font-size: 20px; font-weight: bold; overflow: hidden; text-overflow: ellipsis'>{movie.title}</div>",
unsafe_allow_html=True,
)
# Display movie details
st.markdown(
f"<div style='color: #f4a261; font-size: 16px; margin-top: 8px; margin-bottom: 4px'>Rating: <b>{movie.vote_average}</b></div>",
unsafe_allow_html=True,
)
st.write(movie.release_date)
# st.markdown(f"<div style='color: #808080; font-size: 16px;'>{movie.overview}</div>", unsafe_allow_html=True)
if st.button(
"Add to my Watch-list! :popcorn:",
key=f"watchlist_button_{movie.id}",
):
if st.session_state["user"] is not None:
movie_operations.add_movie_for_user(
st.session_state["user"].id, movie.title, movie.poster_path
)
st.success(
f"**{movie.title}** is on your watch-list! :partying_face:"
)
st.balloons()
else:
st.error(
"You must be logged in to add movies to your watchlist."
)
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case04.py | #******************************************************************************
# OpenAirInterface
# Copyright(c) 1999 - 2014 Eurecom
# OpenAirInterface is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenAirInterface is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenAirInterface.The full GNU General Public License is
# included in this distribution in the file called "COPYING". If not,
# see <http://www.gnu.org/licenses/>.
# Contact Information
# OpenAirInterface Admin: [email protected]
# OpenAirInterface Tech : [email protected]
# OpenAirInterface Dev : [email protected]
# Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
#*******************************************************************************/
# \file case04.py
# \brief test case 04 for OAI: executions
# \author Navid Nikaein
# \date 2013 - 2015
# \version 0.1
# @ingroup _test
import time
import random
import log
import openair
import core
NUM_UE=2
NUM_eNB=1
NUM_TRIALS=3
def execute(oai, user, pw, host, logfile,logdir,debug):
case = '04'
oai.send('cd $OPENAIR1_DIR;')
oai.send('cd SIMULATION/LTE_PHY;')
try:
log.start()
test = '00'
name = 'Perf oai.dlsim.sanity'
conf = '-a -A AWGN -n 100'
diag = 'dlsim is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./dlsim.rel8.' + host + ' ' + conf + tee, 'Segmentation fault', 30)
trace = logdir + '/log_' + host + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./dlsim.rel8.' + host + ' ' + conf + tee, 'Exiting', 30)
trace = logdir + '/log_' + host + case + test + '_3.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./dlsim.rel8.' + host + ' ' + conf + tee, 'FATAL', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '01'
name = 'Perf oai.dlsim.test1'
diag = 'Test 1, 10 MHz, R2.FDD (MCS 5), EVA5, -1dB'
conf = '-m5 -gF -s-1 -w1.0 -f.2 -n500 -B50 -c2 -z2 -O70 -L'
trace = logdir + '/log_' + host + case + test +'.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
# try:
# test = '05'
# name = 'Perf oai.dlsim.test5'
# diag = 'Test 5, 1.4 MHz, R4.FDD (MCS 4), EVA5, 0dB (70%)'
# conf = '-m4 -gF -s0 -w1.0 -f.2 -n500 -B6 -c4 -z2 -O70'
# trace = logdir + '/log_' + host + case + test + '.txt'
# tee = ' 2>&1 | tee ' + trace
# cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
# oai.send_expect(cmd, 'passed', 150)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile,trace)
# else:
# log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '06'
name = 'Perf oai.dlsim.test6'
diag = 'Test 6, 10 MHz, R3.FDD (MCS 15), EVA5, 6.7dB (70%)'
conf = '-m15 -gF -s6.7 -w1.0 -f.2 -n500 -B50 -c2 -z2 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '06b'
name = 'Perf oai.dlsim.test6b'
diag = 'Test 6b, 5 MHz, R3-1.FDD (MCS 15), EVA5, 6.7dB (70%)'
conf = '-m14 -gF -s6.7 -w1.0 -f.2 -n500 -B25 -c3 -z2 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '07'
name = 'Perf oai.dlsim.test7'
diag = 'Test 6b, 5 MHz, R3-1.FDD (MCS 15), EVA5, 6.7dB (30%)'
conf = '-m15 -gG -s6.7 -w1.0 -f.2 -n500 -B50 -c2 -z2 -O30 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '07b'
name = 'Perf oai.dlsim.test7b'
diag = 'Test 7b, 5 MHz, R3-1.FDD (MCS 15), ETU70, 1.4 dB (30%)'
conf = '-m14 -gG -s1.4 -w1.0 -f.2 -n500 -B25 -c3 -z2 -O30 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '10'
name = 'Perf oai.dlsim.test10'
diag = 'Test 10, 5 MHz, R6.FDD (MCS 25), EVA5, 17.4 dB (70%)'
conf = '-m25 -gF -s17.4 -w1.0 -f.2 -n500 -B25 -c3 -z2 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '10b'
name = 'Perf oai.dlsim.test10b'
diag = 'Test 10b, 5 MHz, R6-1.FDD (MCS 24,18 PRB), EVA5, 17.5dB (70%)'
conf = '-m25 -gF -s17.5 -w1.0 -f.2 -n500 -B25 -c3 -z2 -r1022 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '11'
name = 'Perf oai.dlsim.test11'
diag = 'Test 11, 10 MHz, R7.FDD (MCS 25), EVA5, 17.7dB (70%)'
conf = '-m26 -gF -s17.7 -w1.0 -f.2 -n500 -B50 -c2 -z2 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
cmd = 'taskset -c 0 ./dlsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case01.py | #******************************************************************************
# OpenAirInterface
# Copyright(c) 1999 - 2014 Eurecom
# OpenAirInterface is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenAirInterface is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenAirInterface.The full GNU General Public License is
# included in this distribution in the file called "COPYING". If not,
# see <http://www.gnu.org/licenses/>.
# Contact Information
# OpenAirInterface Admin: [email protected]
# OpenAirInterface Tech : [email protected]
# OpenAirInterface Dev : [email protected]
# Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
#*******************************************************************************/
# \file case01.py
# \brief test case 01 for OAI: compilations
# \author Navid Nikaein
# \date 2013 - 2015
# \version 0.1
# @ingroup _test
import log
import openair
import core
makerr1 = '***'
makerr2 = 'Error 1'
def execute(oai, user, pw, host, logfile,logdir,debug,timeout):
case = '01'
rv = 1
oai.send_recv('cd $OPENAIR_TARGETS;')
try:
log.start()
test = '00'
name = 'Check oai.svn.add'
conf = 'svn st -q | grep makefile'
diag = 'Makefile(s) changed. If you are adding a new file, make sure that it is added to the svn'
rsp = oai.send_recv('svn st -q | grep -i makefile;')
for item in rsp.split("\n"):
if "Makefile" in item:
rsp2=item.strip() + '\n'
oai.find_false_re(rsp,'Makefile')
except log.err, e:
diag = diag + "\n" + rsp2
#log.skip(case, test, name, conf, e.value, logfile)
log.skip(case, test, name, conf, '', diag, logfile)
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd SIMU/USER;')
oai.send('mkdir ' + logdir + ';')
try:
log.start()
test = '01'
name = 'Compile oai.rel8.make'
conf = 'make'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = "check the compilation errors for oai"
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel8.'+host)
oai.send_expect_false('make -j4 JF=1' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '02'
name = 'Compile oai.rel8.nas.make'
conf = 'make nasmesh_fix; make LINK_ENB_PDCP_TO_IP_DRIVER=1'
diag = 'check the compilation errors for oai and nas driver'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel8.nas'+host)
oai.send('rm -f ./nasmesh;')
oai.send('make nasmesh_clean;')
trace = logdir + '/log_' + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make nasmesh_fix' + tee, makerr1, 60)
oai.send('cp $OPENAIR2_DIR/NAS/DRIVER/MESH/nasmesh.ko .')
trace = logdir + '/log_' + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make LINK_ENB_PDCP_TO_IP_DRIVER=1 JF=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel8.nas.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd RT/USER;')
try:
log.start()
test = '03'
name = 'Compile oai.rel8.rf.make'
conf = 'make RTAI=0 EXMIMO=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for Rel8'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel8.rf.'+host)
oai.send_expect_false('make RTAI=0 EXMIMO=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel8.rf.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR1_DIR;')
oai.send('cd SIMULATION/LTE_PHY;')
try:
log.start()
test = '04'
name = 'Compile oai.rel8.phy.dlsim.make'
conf = 'make dlsim'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for dlsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make clean;')
oai.send('rm -f ./dlsim.rel8.'+host)
oai.send_expect_false('make dlsim -j4 PERFECT_CE=1' + tee, makerr1, timeout)
oai.send('cp ./dlsim ./dlsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '05'
name = 'Compile oai.rel8.phy.ulsim.make'
conf = 'make ulsim'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ulsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make clean;')
oai.send('rm -f ./ulsim.rel8.'+host)
oai.send_expect_false('make ulsim -j4' + tee, makerr1, timeout)
oai.send('cp ./ulsim ./ulsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd SIMU/USER;')
try:
log.start()
test = '06'
name = 'Compile oai.rel8.itti.make'
conf = 'make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('rm -f ./oaisim.rel8.itti.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 Rel8=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel8.itti.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '07'
name = 'Compile oai.rel10.make'
conf = 'make RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for Rel10'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel10.'+host)
oai.send_expect_false('make RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel10.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '08'
name = 'Compile oai.rel10.itti.make'
conf = 'make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel10'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel10.itti.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel10.itti.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '13'
name = 'Compile oai_nw_ether IP driver'
conf = 'make oai_nw_drv'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oai_nw_drv;')
oai.send('make oai_nw_drv_clean;')
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make oai_nw_drv' + tee, makerr1, 60)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '14'
name = 'Compile oai.rel8.itti.ral.make'
conf = 'make DISABLE_XER_PRINT=1 LINK_ENB_PDCP_TO_IP_DRIVER=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel8.itti.ral.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 LINK_ENB_PDCP_TO_IP_DRIVER=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel8=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel8.itti.ral.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '15'
name = 'Compile oai.rel10.itti.ral.make'
conf = 'make DISABLE_XER_PRINT=1 LINK_ENB_PDCP_TO_IP_DRIVER=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel10'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel10.itti.ral.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 LINK_ENB_PDCP_TO_IP_DRIVER=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, timeout)
oai.send('cp ./oaisim ./oaisim.rel10.itti.ral.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
return rv
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case05.py | #******************************************************************************
# OpenAirInterface
# Copyright(c) 1999 - 2014 Eurecom
# OpenAirInterface is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenAirInterface is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenAirInterface.The full GNU General Public License is
# included in this distribution in the file called "COPYING". If not,
# see <http://www.gnu.org/licenses/>.
# Contact Information
# OpenAirInterface Admin: [email protected]
# OpenAirInterface Tech : [email protected]
# OpenAirInterface Dev : [email protected]
# Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
#*******************************************************************************/
# \file case05.py
# \brief test case 05 for OAI: executions
# \author Navid Nikaein
# \date 2013 - 2015
# \version 0.1
# @ingroup _test
import time
import random
import log
import openair
import core
NUM_UE=2
NUM_eNB=1
NUM_TRIALS=3
def execute(oai, user, pw, host,logfile,logdir,debug):
case = '04'
oai.send('cd $OPENAIR1_DIR;')
oai.send('cd SIMULATION/LTE_PHY;')
try:
log.start()
test = '00'
name = 'Perf oai.ulsim.sanity'
conf = '-a -A AWGN -n 100'
diag = 'ulsim is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee, 'Segmentation fault', 30)
trace = logdir + '/log_' + host + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee, 'Exiting', 30)
trace = logdir + '/log_' + host + case + test + '_3.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.rel8.' + host + ' ' + conf + tee, 'FATAL', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '01'
name = 'Perf oai.ulsim.test1'
diag = 'Test 1, 5 MHz, FDD (MCS 5), AWGN, 6dB'
conf = '-B25 -m5 -y1 -gN -x1 -s6 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test +'.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '02'
name = 'Perf oai.ulsim.test2'
diag = 'Test 2, 5 MHz, FDD (MCS 16), AWGN , 12dB (70%)'
conf = '-B25 -m16 -y1 -gN -x1 -s12 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '03'
name = 'Perf oai.ulsim.test3'
diag = 'Test 3, 10 MHz, R3.FDD (MCS 5), AWGN, 6dB (70%)'
conf = '-B50 -m5 -y1 -gN -x1 -s6 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '04'
name = 'Perf oai.ulsim.test4'
diag = 'Test 4, 10 MHz, R3-1.FDD (MCS 16), AWGN, 12dB (70%)'
conf = '-B50 -m16 -y1 -gN -x1 -s12 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '05'
name = 'Perf oai.ulsim.test7'
diag = 'Test 5, 20 MHz, FDD (MCS 5), AWGN, 6dB (70%)'
conf = '-B100 -m5 -y1 -gN -x1 -s6 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '06'
name = 'Perf oai.ulsim.test10'
diag = 'Test 06, 20 MHz, FDD (MCS 16), AWGN, 12 dB (70%)'
conf = '-B100 -m16 -y1 -gN -x1 -s12 -w1.0 -e.1 -P -n500 -O70 -L'
trace = logdir + '/log_' + host + case + test + '.txt'
tee = ' 2>&1 | tee ' + trace
#print test + 'not performed'
cmd = 'taskset -c 0 ./ulsim.rel8.' + host + ' ' + conf + tee
oai.send_expect(cmd, 'passed', 150)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case02.py | #******************************************************************************
# OpenAirInterface
# Copyright(c) 1999 - 2014 Eurecom
# OpenAirInterface is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenAirInterface is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenAirInterface.The full GNU General Public License is
# included in this distribution in the file called "COPYING". If not,
# see <http://www.gnu.org/licenses/>.
# Contact Information
# OpenAirInterface Admin: [email protected]
# OpenAirInterface Tech : [email protected]
# OpenAirInterface Dev : [email protected]
# Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
#*******************************************************************************/
# \file case02.py
# \brief test case 02 for OAI: executions
# \author Navid Nikaein
# \date 2013 - 2015
# \version 0.1
# @ingroup _test
import time
import random
import log
import openair
import core
NUM_UE=2
NUM_eNB=1
NUM_TRIALS=3
def execute(oai, user, pw, host, logfile,logdir,debug):
case = '02'
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd SIMU/USER;')
try:
log.start()
test = '00'
name = 'Run oai.rel8.sf'
conf = '-a -A AWGN -n 100'
diag = 'OAI is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel8.' + host + ' ' + conf + tee, 'Segmentation fault', 30)
trace = logdir + '/log_' + host + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel8.' + host + ' ' + conf + tee, 'Exiting', 30)
trace = logdir + '/log_' + host + case + test + '_3.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel8.' + host + ' ' + conf + tee, 'FATAL', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '01'
name = 'Run oai.rel8.err'
conf = '-a -A AWGN -n 100 '
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
diag = '[E] Error(s) found during the execution, check the execution logs'
oai.send_expect_false('./oaisim.rel8.'+ host + ' ' + conf, '[E]', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '02'
name = 'Run oai.rel8.tdd.5MHz.rrc.abs'
diag = 'RRC procedure is not finished completely, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-a -A AWGN -n' + str((i+1+j) * 50) + ' -u' + str(i+1) +' -b'+ str(j+1)
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 100)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '03'
name = 'Run oai.rel8.tdd.5MHz.rrc.itti.abs'
diag = 'RRC procedure is not finished completely, check the eNB config file (default is enb.band7.generic.conf), in addition to the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
log_name = logdir + '/log_' + host + case + test + '_' + str(i) + str(j)
itti_name = log_name + '.log'
trace_name = log_name + '.txt'
conf = '-a -l7 -A AWGN --enb-conf ../../PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.generic.conf -n' + str((i+1+j) * 50) + ' -u' + str(i+1) +' -b'+ str(j+1) + ' -K' + itti_name
tee = ' 2>&1 | tee ' + trace_name
command = './oaisim.rel8.itti.' + host + ' ' + conf
oai.send('echo ' + command + ' > ' + trace_name + ';')
oai.send_expect(command + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile, trace_name)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test='04'
name = 'Run oai.rel8.tdd.5MHz.abs.ocg.otg'
diag = 'Check the scenario if the test 0202 is passed.'
conf = '-a -c26'
trace = logdir + '/log_' + host + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' DL and UL loss rate below 10 ', 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test='05'
name = 'Run oai.rel8.fdd.5MHz.abs.ocg.otg'
diag = 'Check the template 26 and the results of test 0202.'
conf = '-a -F -c26'
trace = logdir + '/log_' + host + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' DL and UL loss rate below 10 ', 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '06'
name = 'Run oai.rel8.tdd.5MHz.abs.ping'
diag = 'Data-plane is not working normally, check the OAI protocol stack, OAI driver, and normal operation of the OS'
oai.driver(oai,user,pw)
for i in range(NUM_eNB) :
for j in range(NUM_UE) :
conf = '-a -A AWGN -u' + str(j+1) +' -b'+ str(i+1)
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 > ' + trace
if user == 'root' :
oai.send_nowait('./oaisim.rel8.nas.' + host + ' ' + conf + ' &')
else :
oai.send_nowait('echo '+pw+ ' | sudo -S -E ./oaisim.rel8.nas.'+ host + ' ' + conf + tee + ' &')
time.sleep(10)
for k in range(NUM_TRIALS) :
trace_ping = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + str(k) + '_ping.txt;'
tee_ping = ' 2>&1 | tee ' + trace_ping
oai.send_expect('ping 10.0.'+str(j+1)+'.'+str(NUM_eNB+i+1) + ' -c ' + str(random.randint(2, 10))+ ' -s ' + str(random.randint(128, 1500)) + tee_ping, ' 0% packet loss', 20)
if user == 'root' :
oai.send('pkill -f oaisim.rel8.nas.'+host)
time.sleep(1)
oai.send('pkill -f oaisim.rel8.nas.'+host)
else :
oai.send('pkill -f oaisim.rel8.nas.'+host)
time.sleep(1)
oai.send('echo '+pw+ ' | sudo -S pkill -f oaisim.rel8.nas.'+host)
time.sleep(1)
oai.send('echo '+pw+ ' | sudo -S pkill -f oaisim.rel8.nas.'+host)
time.sleep(1)
oai.rm_driver(oai,user,pw)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '07'
name = 'Run oai.rel8.tdd.5MHz.phy.rrc'
diag = 'RRC procedure is not finished completely, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-A AWGN -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1) + ' -x1'
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '08'
name = 'Run oai.rel8.fdd.5MHz.phy.rrc'
diag = 'RRC procedure is not finished completely in FDD mode, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-A AWGN -F -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1) + ' -x1'
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '09'
name = 'Run oai.rel8.fdd.10MHz.phy.rrc'
diag = 'RRC procedure is not finished completely, check th execution logs and trace BCCH, CCCH, and DCCH channels and the results of test 0204'
conf = '-A AWGN -F -R 50 -n 150 -u 1 -b 1 -x1'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE 0', 600)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '10'
name = 'Run oai.rel8.fdd.20MHz.phy.rrc'
diag = 'RRC procedure is not finished completely, check th execution logs and trace BCCH, CCCH, and DCCH channels and the results of test 0204'
conf = '-A AWGN -F -R 100 -n 200 -u 1 -b 1 -x1'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel8.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE 0', 700)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case13.py | #******************************************************************************
# Eurecom OpenAirInterface
# Copyright(c) 1999 - 2013 Eurecom
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
# Contact Information
# Openair Admin: [email protected]
# Openair Tech : [email protected]
# Forums : http://forums.eurecom.fsr/openairinterface
# Address : Eurecom, Compus SophiaTech 450, route des chappes, 06451 Biot, France
#*****************************************************************************
# \file case02.py
# \brief test case 02 for OAI: executions
# \author Navid Nikaein
# \date 2013
# \version 0.1
# @ingroup _test
import time
import random
import log
import openair
import core
import os
import shutil # copy file
NUM_UE=1
NUM_eNB=1
NUM_TRIALS=3
PRB=[25,50,100]
MCS=[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]
#MCS=[0,4,9,10,13,16,17,22,27]
#PRB=[100]
#MCS=[16]
ANT_TX=2 # 2
ANT_RX=2 # 2
CHANNEL=["N"]
#CHANNEL=["C","E","F","G","H","I","L","M"] # A,B,C,D,E,F,
TX_MODE=2 # 2,
MIN_SNR=10
MAX_SNR=40
PERF=75
OPT="-L"
FRAME=2000
#OPT="-L -d" # 8bit decoder , activate dci decoding at UE
def execute(oai, user, pw, host,logfile,logdir,debug,cpu):
case = '10'
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd bin;')
oai.send('cp ./ulsim.Rel10 ./ulsim.Rel10.'+host)
try:
log.start()
test = '300'
name = 'Run oai.ulsim.sanity'
conf = '-a -n 100'
diag = 'ulsim is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
trace = logdir + '/log_' + host + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.Rel10.'+ host + ' ' + conf + tee, 'Segmentation fault', 30)
trace = logdir + '/log_' + host + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.Rel10.'+ host + ' ' + conf + tee, 'Exiting', 30)
trace = logdir + '/log_' + host + case + test + '_3.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./ulsim.Rel10.'+ host + ' ' + conf + tee, 'FATAL', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = 310
name = 'Run oai.ulsim.perf.'+str(PERF)+'%'
diag = 'no diagnostic is available, check the log file'
for i in range(len(PRB)):
for o in range(len(CHANNEL)):
MIN_SNR=10
for j in range(len(MCS)):
for m in range (1,ANT_RX):
for p in range(1,TX_MODE):
for r in range(5,PRB[i]):
for q in range(MIN_SNR,MAX_SNR):
if r ==7 or r ==11 or r ==14 or r == 17 or r==19 or r == 21 or r == 23 or r == 26 or r == 28 :
continue
conf = '-B' + str(PRB[i]) + ' -r'+str(r) + ' -m'+str(MCS[j]) + ' -y'+str(m) + ' -g'+str(CHANNEL[o]) + ' -x'+str(p) + ' -s'+str(q) + ' -w1.0 -e.1 -P -n'+str(FRAME)+' -O'+str(PERF)+' '+ OPT
trace = logdir + '/time_meas' + '_prb'+str(PRB[i])+ '_rb'+str(r)+'_mcs'+ str(MCS[j])+ '_antrx' + str(m) + '_channel' +str(CHANNEL[o]) + '_tx' +str(p) + '_snr' +str(q)+'.'+case+str(test)+ '.log'
tee = ' 2>&1 | tee ' + trace
if cpu > -1 :
cmd = 'taskset -c ' + str(cpu) + ' ./ulsim.Rel10.'+ host + ' ' + conf + tee
else :
cmd = './ulsim.Rel10.'+ host + ' ' + conf + tee
if debug :
print cmd
match = oai.send_expect_re(cmd, 'passed', 0, 1000)
#match =1
if match :
log.ok(case, str(test), name, conf, '', logfile)
MIN_SNR = q - 1 # just to speed up the test
test+=1
break # found the smallest snr
else :
if q == MAX_SNR -1 :
log.skip(case,str(test), name, conf,'','',logfile)
test+=1
break
try:
if os.path.isfile(trace) :
os.remove(trace)
except OSError, e: ## if failed, report it back to the user ##
print ("Error: %s - %s." % (e.filename,e.strerror))
except log.err, e:
log.fail(case, str(test), name, conf, e.value, diag, logfile,trace)
#else:
# log.ok(case, test, name, conf, '', logfile)
| [] |
2024-01-10 | herlesupreeth/OAI-5G | targets~TEST~OAI~case03.py | #******************************************************************************
# OpenAirInterface
# Copyright(c) 1999 - 2014 Eurecom
# OpenAirInterface is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenAirInterface is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenAirInterface.The full GNU General Public License is
# included in this distribution in the file called "COPYING". If not,
# see <http://www.gnu.org/licenses/>.
# Contact Information
# OpenAirInterface Admin: [email protected]
# OpenAirInterface Tech : [email protected]
# OpenAirInterface Dev : [email protected]
# Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
#*******************************************************************************/
# \file case03.py
# \brief test case 03 for OAI: executions
# \author Navid Nikaein
# \date 2013
# \version 0.1
# @ingroup _test
import time
import random
import log
import openair
import core
NUM_UE=2
NUM_eNB=1
NUM_TRIALS=3
def execute(oai, user, pw, host, logfile,logdir,debug):
case = '03'
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd SIMU/USER;')
try:
log.start()
test = '00'
name = 'Run oai.rel10.sf'
conf = '-a -A AWGN -l7 -n 100'
diag = 'OAI is not running normally (Segmentation fault / Exiting / FATAL), debugging might be needed'
trace = logdir + '/log_' + host + case + test + '_1.txt'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'Segmentation fault', 30)
trace = logdir + '/log_' + host + case + test + '_2.txt'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'Exiting', 30)
trace = logdir + '/log_' + host + case + test + '_3.txt'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, 'FATAL', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '01'
name = 'Run oai.rel10.err'
conf = '-a -A AWGN -l7 -n 100'
trace = logdir + '/log_' + host + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'Error(s) found in the execution, check the execution logs'
oai.send_expect_false('./oaisim.rel10.' + host + ' ' + conf + tee, '[E]', 30)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '02'
name = 'Run oai.rel10.tdd.5MHz.abs.rrc'
diag = 'RRC procedure is not finished completely, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-a -l7 -A AWGN -n' + str((i+1+j) * 50) + ' -u' + str(i+1) +' -b'+ str(j+1)
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 100)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '03'
name = 'Run oai.rel10.tdd.5MHz.phy.rrc'
diag = 'RRC procedure is not finished completely, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-A AWGN -l7 -x 1 -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1)
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '04'
name = 'Run oai.rel10.fdd.5MHz.phy.rrc'
diag = 'RRC procedure is not finished completely in FDD mode, check the execution logs and trace BCCH, CCCH, and DCCH channels'
for i in range(NUM_UE) :
for j in range(NUM_eNB) :
conf = '-A AWGN -l7 -F -x 1 -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1)
trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Received RRCConnectionReconfigurationComplete from UE ' + str(i), (i+1) * 500)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
# try:
# test = '05'
# name = 'Run oai.rel10.phy.eMBMS.MCCH'
# diag = 'eMBMS procedure is not finished completely, make sure that the SIB13/MCCH have been correclty received by UEs'
# for i in range(NUM_UE) :
# for j in range(NUM_eNB) :
# conf = '-A AWGN -l7 -x 1 -Q3 -n' + str((i+1+j) * 50) + ' -u' + str(i+1) +' -b'+ str(j+1)
# trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt'
# tee = ' 2>&1 | tee ' + trace
# oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Found MBSFNAreaConfiguration from eNB ' + str(j), (i+1) * 200)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile,trace)
# else:
# log.ok(case, test, name, conf, '', logfile)
# try:
# test = '06'
# name = 'Run oai.rel10.phy.eMBMS.OTG'
# diag = 'eMBMS multicast/broadcast data is not received, make sure that the SIB13/MCCH/MTCH have been correclty received by UEs'
# for i in range(NUM_UE) :
# for j in range(NUM_eNB) :
# conf = '-A AWGN -l7 -x 1 -T mscbr -Q3 -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1)
# trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt'
# tee = ' 2>&1 | tee ' + trace
# oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Received a multicast packet', (i+1) * 200)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile,trace)
# else:
# log.ok(case, test, name, conf, 'Note: check the packet loss from the OTG stats', logfile)
# try:
# test = '07'
# name = 'Run oai.rel10.phy.eMBMS.OTG.fdd'
# diag = 'eMBMS multicast/broadcast data is not received in fdd mode, make sure that the SIB13/MCCH/MTCH have been correclty received by UEs'
# for i in range(NUM_UE) :
# for j in range(NUM_eNB) :
# conf = '-A AWGN -l7 -F -x 1 -T mscbr -Q3 -n' + str((i+1+j) * 100) + ' -u' + str(i+1) +' -b'+ str(j+1)
# trace = logdir + '/log_' + host + case + test + '_' + str(i) + str(j) + '.txt'
# tee = ' 2>&1 | tee ' + trace
# oai.send_expect('./oaisim.rel10.' + host + ' ' + conf + tee, ' Received a multicast packet', (i+1) * 200)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile,trace)
# else:
# log.ok(case, test, name, conf, 'Note: check the packet loss from the OTG stats', logfile)
# try:
# test = '08'
# name = 'Run oai.rel10.phy.eMBMS.Relay.OTG.fdd'
# diag = 'eMBMS multicast/broadcast DF relaying is not working properly in fdd mode, make sure that the SIB13/MCCH/MTCH have been correclty received by UEs'
# conf = '-c43 -F -T mbvbr -Q4 -j1 -n120'
# tee = ' | tee ' + logs_dir + '/log_' + case + test + '.txt'
# oai.send_expect('./oaisim.rel10 ' + conf + tee, ' MTCH for sync area 1', 100)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile)
# else:
# log.ok(case, test, name, conf, 'Note: check the packet loss from the OTG stats', logfile)
# try:
# test = '09'
# name = 'Run oai.rel10.itti.phy.eMBMS.MCCH'
# diag = 'eMBMS procedure is not finished completely, check the eNB config file (enb.band7.generic.conf), and make sure that the SIB13/MCCH have been correclty received by UEs'
# for i in range(NUM_UE) :
# for j in range(NUM_eNB) :
# log_name = logdir + '/log_' + host + case + test + '_' + str(i) + str(j)
# itti_name = log_name + '.log'
# trace_name = log_name + '.txt'
# conf = '-A AWGN -l7 -x 1 -Q3 --enb-conf ../../PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.generic.conf -n' + str((i+1+j) * 50) + ' -u' + str(i+1) +' -b'+ str(j+1) + ' -K' + itti_name
# tee = ' 2>&1 | tee -a ' + trace_name
# command = './oaisim.rel10.itti.' + host + ' ' + conf
# oai.send('echo ' + command + ' > ' + trace_name + ';')
# oai.send_expect(command + tee, ' Found MBSFNAreaConfiguration from eNB ' + str(j), (i+1) * 200)
# except log.err, e:
# log.fail(case, test, name, conf, e.value, diag, logfile, trace_name)
# else:
# log.ok(case, test, name, conf, '', logfile)
| [] |
2024-01-10 | thomashirtz/soft-actor-critic | soft_actor_critic~policy.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
import numpy as np
from typing import Optional
from typing import Sequence
from soft_actor_critic.utilities import weight_initialization
from soft_actor_critic.utilities import get_multilayer_perceptron
class StochasticPolicy(nn.Module):
def __init__(self, input_dims: int, num_actions: int, hidden_units: Optional[Sequence[int]] = None,
action_space=None, epsilon: float = 10e-6, log_sigma_max: float = 2, log_sigma_min: float = -20):
super(StochasticPolicy, self).__init__()
if hidden_units is None:
hidden_units = [256, 256]
self.input_dims = input_dims
self.num_actions = num_actions
self.hidden_units = list(hidden_units)
self.epsilon = epsilon
self.log_sigma_max = log_sigma_max
self.log_sigma_min = log_sigma_min
units = [input_dims] + list(hidden_units)
self.multilayer_perceptron = get_multilayer_perceptron(units, keep_last_relu=True)
self.mean_linear = nn.Linear(units[-1], num_actions)
self.log_std_linear = nn.Linear(units[-1], num_actions)
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor((action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor((action_space.high + action_space.low) / 2.)
self.apply(weight_initialization)
def forward(self, x): # todo maybe merge forward and evaluate + maybe add the "act" from openai
x = self.multilayer_perceptron(x)
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std_clamped = torch.clamp(log_std, min=self.log_sigma_min, max=self.log_sigma_max)
std = torch.exp(log_std_clamped)
return mean, std
def evaluate(self, state, deterministic: bool = False, with_log_probability: bool = True):
mean, std = self.forward(state)
distribution = Normal(mean, std)
sample = distribution.rsample()
if deterministic:
action = mean
else:
action = torch.tanh(sample) # todo when sac working, multiply by action_scale and add action_bias
if with_log_probability:
# Implementation that I originally implemented
# the "_" are only here for now to debug the values and the shapes
# log_probability_ = distribution.log_prob(sample) - torch.log((1 - action.pow(2)) + self.epsilon)
# log_probability = log_probability_.sum(1, keepdim=True)
# OPENAI Implementation
# https://github.com/openai/spinningup/blob/038665d62d569055401d91856abb287263096178/spinup/algos/pytorch/sac/core.py#L59
log_probability_ = distribution.log_prob(sample).sum(axis=-1, keepdim=True)
log_probability__ = (2 * (np.log(2) - sample - F.softplus(-2 * sample))).sum(axis=1).unsqueeze(1)
log_probability = log_probability_ - log_probability__
else:
log_probability = None
return action, log_probability
def act(self, observation, deterministic=False) -> np.array: # todo need to replace in the agent code
with torch.no_grad():
action, _ = self.evaluate(observation, deterministic=deterministic, with_log_probability=False)
return action.cpu().numpy()
| [] |
2024-01-10 | IanSteenstra/MentalHealthLLM | mental_health_llm.py | import os
import time
from rank_bm25 import BM25Okapi
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from langchain.chat_models import ChatOpenAI
# Download necessary NLTK models and data
nltk.download('punkt')
nltk.download('stopwords')
# Initialize constants
CORPUS_FOLDER = "corpus"
MAX_FINAL_RESPONSE_SIZE = 200
MAX_CHUNK_RESPONSE_SIZE = 100
# Retrieve the set of English stopwords
stop_words = set(stopwords.words('english'))
# Initialize the LLM
llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106")
def tokenize(document):
"""
Tokenize a document, remove stopwords and non-alphabetic tokens.
Args:
document (str): The document to tokenize.
Returns:
list: List of tokens from the document.
"""
# Tokenize the document
tokens = word_tokenize(document.lower())
# Remove stopwords and non-alphabetic tokens
filtered_tokens = [word for word in tokens if word.isalpha() and word not in stop_words]
return filtered_tokens
def load_documents():
"""
Load and tokenize documents from the "corpus" folder.
Returns:
list: List of tokenized documents.
"""
print("Loading and tokenizing documents...")
tokenized_documents = []
for filename in os.listdir(CORPUS_FOLDER):
if filename.endswith('.txt'): # Ensure we're reading text files
with open(os.path.join(CORPUS_FOLDER, filename), 'r', encoding='iso-8859-1') as file:
# Read and tokenize the document
text = file.read()
tokens = tokenize(text)
tokenized_documents.append(tokens)
print(f"Loaded and tokenized {len(tokenized_documents)} documents.")
return tokenized_documents
def retrieve_documents_with_bm25(query, tokenized_documents, top_k, remove_top_2=False):
"""
Retrieve top documents using the BM25 algorithm.
Args:
query (str): The user query.
tokenized_documents (list): List of tokenized documents.
top_k (int): Number of top documents to retrieve.
remove_top_2 (bool): If true, exclude the top 2 documents from the result.
Returns:
list: List of top documents based on BM25 scores.
"""
print(f"Retrieving top-{top_k} documents with BM25...")
bm25 = BM25Okapi(tokenized_documents)
query_tokens = tokenize(query)
doc_scores = bm25.get_scores(query_tokens)
# If remove_top_2 is True, adjust the slicing to exclude the top 2 documents
if remove_top_2:
start_index = 2
else:
start_index = 0
top_doc_indices = sorted(range(len(doc_scores)), key=lambda i: doc_scores[i], reverse=True)[start_index:top_k]
top_documents = [tokenized_documents[i] for i in top_doc_indices]
print(f"{len(top_documents)} documents retrieved.")
return top_documents
def generate_prompts(document, user_query, max_token_limit=16385):
"""
Generate document chunks as prompts for the LLM.
Args:
document (str): The document to chunk.
user_query (str): The user query for context.
max_token_limit (int): Maximum token limit for each chunk.
Returns:
list: List of generated prompts.
"""
prompt_intro = f"The user is seeking information related to '{user_query}'. Given this context, carefully analyze the following text. Identify and summarize the key points that directly address the user's query in less than 100 tokens, especially focusing on any relevant facts, insights, or advice. Highlight critical details and provide a concise summary that would be most helpful and informative to the user:\n\n"
# Estimate the number of tokens in the prompt
estimated_prompt_tokens = len(prompt_intro) // 4
# Calculate the maximum size for each chunk in terms of tokens
max_words_per_chunk = (max_token_limit - estimated_prompt_tokens - MAX_CHUNK_RESPONSE_SIZE) // 2
# Split the document into words
words = document.split()
# Create document chunks based on max_words_per_chunk and convert to individual prompts
prompt_chunks = []
for i in range(0, len(words), int(max_words_per_chunk)):
chunk = ' '.join(words[i:i + int(max_words_per_chunk)])
prompt_chunks.append(prompt_intro + chunk)
return prompt_chunks
def batch_process(prompts):
"""
Process prompts in batches. A 5 second gap between batches is used because of rate limiting.
Args:
prompts (list): List of prompts to process.
Returns:
str: Concatenated responses from all batches.
"""
responses = ""
for idx, i in enumerate(range(0, len(prompts), 3)):
print(f"Processing Batch {idx+1}/{(len(prompts)//3)+1}")
# Extract a batch of 3 prompts
batch = prompts[i:i+3]
# Send the batch to the LLM
response_batch = llm.batch(batch, max_tokens=MAX_CHUNK_RESPONSE_SIZE)
# Extract each text response from the batch
for response in response_batch:
responses += response.content
# Wait for 5 seconds before sending the next batch
time.sleep(5)
return responses
def run_system():
"""
Main function to run the document retrieval and summarization system.
"""
start = time.time()
user_query = input("Please enter your mental health-related query: ")
tokenized_documents = load_documents()
top_k = 5 # Change here for top k documents
remove_top_2 = False # Change here to test removing top 2 documents
# Retrieve top k documents using BM25
top_documents = retrieve_documents_with_bm25(user_query, tokenized_documents, top_k, remove_top_2)
all_prompts = []
# Generate prompts from top-K documents
for tokenized_doc in top_documents:
doc_string = ' '.join(tokenized_doc)
all_prompts += generate_prompts(doc_string, user_query)
# Batch process prompts
combined_responses = batch_process(all_prompts)
final_prompt = (
f"Summary of Information: {combined_responses}\n\n"
"Based on this earlier summary about cognitive-behavioral therapy (CBT) techniques, "
f"apply these strategies directly through a normal conversational style of a counselor in 200 tokens or less to help a user's current situation and questions: {user_query}"
)
final_response = llm.predict(final_prompt, max_tokens=MAX_FINAL_RESPONSE_SIZE)
print(f"Response: {final_response}")
end = time.time()
print(f"Response Duration: {end-start}")
if __name__ == "__main__":
run_system() | [
"0",
"[]",
"The user is seeking information related to 'PLACEHOLDER'. Given this context, carefully analyze the following text. Identify and summarize the key points that directly address the user's query in less than 100 tokens, especially focusing on any relevant facts, insights, or advice. Highlight critical details and provide a concise summary that would be most helpful and informative to the user:\n\n",
"Summary of Information: PLACEHOLDER\n\nBased on this earlier summary about cognitive-behavioral therapy (CBT) techniques, apply these strategies directly through a normal conversational style of a counselor in 200 tokens or less to help a user's current situation and questions: PLACEHOLDER"
] |
2024-01-10 | mali404/chatbot | web_scrapping~url_crawling.py | from langchain.document_loaders import RecursiveUrlLoader
import pandas as pd
from urllib.parse import urlparse
from tqdm import tqdm
import os
class url_crawl(RecursiveUrlLoader):
def __init__(self, base_url, depth):
super().__init__(url=base_url, max_depth=depth)
self.base_url = base_url
self.max_depth = depth
def get_child_urls(self):
# Initialize a set to store visited URLs
visited = set()
# Initialize a list to store the collected URLs
self.child_urls = []
# Call the _get_child_links_recursive method to start crawling
for document in tqdm(self._get_child_links_recursive(self.base_url, visited)):
self.child_urls.append(document.metadata['source'])
return self.child_urls
def filter_urls(self):
""" Filter out URLs containing a question mark
because these urls are not useful for our purpose
such urls mostly contain search results, css files, etc.
two things are done here:
i. filter out urls containing a question mark
ii. sort the urls in alphabetical order"""
filtered_urls = (url for url in self.child_urls if '?' not in url)
#sorting URLS in alphabetical order
self.sorted_urls = sorted(filtered_urls)
return self.sorted_urls
def process_urls(self):
""""there are some urls especially in bulletin.iit.edu that
have duplicate content. One in html form and other in pdf form.
Here we are doing 2 things mainly:
1. remove the pdf urls with duplicate content
2. remove the duplicate urls that result after the first step"""
# performing step 1
processed_urls_1 = (
url.rsplit('/', 1)[0] if url.endswith('.pdf') and
urlparse(url).path.split('/')[-1].replace('.pdf', '') == urlparse(url).path.split('/')[-2]
else url
for url in self.sorted_urls
)
# performing step 2
self.processed_urls_2 = set(url.rstrip('/') for url in processed_urls_1)
return self
def store_urls(self):
# export to csv
pd.DataFrame(self.processed_urls_3, columns=['urls']).to_csv('urls_iit_edu.csv')
class MultiCrawler:
def __init__(self, urls_with_depths):
# Remove duplicates based on the base URL
base_urls = {}
for url, depth in urls_with_depths:
base_url = urlparse(url).scheme + "://" + urlparse(url).netloc
if base_url not in base_urls:
base_urls[base_url] = depth
self.urls_with_depths = list(base_urls.items())
self.all_urls = []
def crawl(self):
for url, depth in self.urls_with_depths:
crawler = url_crawl(base_url=url, depth=depth)
crawler.get_child_urls()
crawler.filter_urls()
crawler.process_urls()
# Assuming process_urls() returns a list of processed URLs
self.all_urls.extend(crawler.process_urls())
def get_all_urls(self):
return self.all_urls
if __name__ == '__main__':
crawler = url_crawl(base_url='https://www.iit.edu/', depth=3)
crawler.get_child_urls()
crawler.filter_urls()
crawler.process_urls()
crawler.store_urls()
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch04_semantic_kernel~gen2~init_kernel.py | import os
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def init_kernel():
kernel = sk.Kernel()
kernel.add_chat_service(
"gpt-3.5-turbo",
OpenAIChatCompletion(
"gpt-3.5-turbo",
api_key=os.getenv("OPENAI_API_KEY"),
),
)
skills = {}
skills["AnswerSkill"] = kernel.import_semantic_skill_from_directory(
CUR_DIR, "AnswerSkill"
)
skills["IntentSkill"] = kernel.import_semantic_skill_from_directory(
CUR_DIR, "IntentSkill"
)
return kernel, skills
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch02~upload~database~langchain~upload.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import (
NotebookLoader,
TextLoader,
UnstructuredMarkdownLoader,
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
load_dotenv()
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(os.path.dirname(CUR_DIR), "dataset")
SK_CODE_DIR = os.path.join(DATA_DIR, "semantic-kernel", "python")
SK_SAMPLE_DIR = os.path.join(
DATA_DIR, "semantic-kernel", "samples", "notebooks", "python"
)
SK_DOC_DIR = os.path.join(DATA_DIR, "semantic-kernel-docs", "semantic-kernel")
CHROMA_PERSIST_DIR = os.path.join(CUR_DIR, "chroma-persist")
CHROMA_COLLECTION_NAME = "fastcampus-bot"
LOADER_DICT = {
"py": TextLoader,
"md": UnstructuredMarkdownLoader,
"ipynb": NotebookLoader,
}
def upload_embedding_from_file(file_path):
loader = LOADER_DICT.get(file_path.split(".")[-1])
if loader is None:
raise ValueError("Not supported file type")
documents = loader(file_path).load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=100)
docs = text_splitter.split_documents(documents)
Chroma.from_documents(
docs,
OpenAIEmbeddings(),
collection_name=CHROMA_COLLECTION_NAME,
persist_directory=CHROMA_PERSIST_DIR,
)
def upload_embeddings_from_dir(dir_path):
failed_upload_files = []
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith(".py") or file.endswith(".md") or file.endswith(".ipynb"):
file_path = os.path.join(root, file)
try:
upload_embedding_from_file(file_path)
print("SUCCESS: ", file_path)
except Exception as e:
print("FAILED: ", file_path + f"by({e})")
failed_upload_files.append(file_path)
if __name__ == "__main__":
upload_embeddings_from_dir(SK_CODE_DIR)
upload_embeddings_from_dir(SK_SAMPLE_DIR)
upload_embeddings_from_dir(SK_DOC_DIR)
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part07~ch03_langchain~gen3~web_search.py | import os
from chains import search_compression_chain, search_value_check_chain
from dotenv import load_dotenv
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
load_dotenv()
search = GoogleSearchAPIWrapper(
google_api_key=os.getenv("GOOGLE_API_KEY"),
google_cse_id=os.getenv("GOOGLE_CSE_ID"),
)
search_tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=search.run,
)
def query_web_search(user_message: str) -> str:
context = {"user_message": user_message}
context["related_web_search_results"] = search_tool.run(user_message)
has_value = search_value_check_chain.run(context)
print(has_value)
if has_value == "Y":
return search_compression_chain.run(context)
else:
return ""
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch02~upload~database~semantic_kernel~upload.py | import json
import os
from uuid import uuid4
import markdown
import nbformat
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAITextEmbedding
from semantic_kernel.connectors.memory.chroma import ChromaMemoryStore
from semantic_kernel.text.text_chunker import (
split_markdown_paragraph,
split_plaintext_paragraph,
)
load_dotenv()
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(os.path.dirname(CUR_DIR), "dataset")
SK_CODE_DIR = os.path.join(DATA_DIR, "semantic-kernel", "python")
SK_SAMPLE_DIR = os.path.join(
DATA_DIR, "semantic-kernel", "samples", "notebooks", "python"
)
SK_DOC_DIR = os.path.join(DATA_DIR, "semantic-kernel-docs", "semantic-kernel")
CHROMA_PERSIST_DIR = os.path.join(CUR_DIR, "chroma-persist")
CHROMA_COLLECTION_NAME = "fastcampus-bot"
kernel = Kernel()
kernel.add_text_embedding_generation_service(
"ada",
OpenAITextEmbedding(
"text-embedding-ada-002",
os.getenv("OPENAI_API_KEY"),
),
)
kernel.register_memory_store(
memory_store=ChromaMemoryStore(persist_directory=CHROMA_PERSIST_DIR)
)
def read_file(file_path):
with open(file_path, "r") as f:
if file_path.endswith(".ipynb"):
nb = nbformat.read(f, as_version=4)
contents = ""
for cell in nb["cells"]:
if cell["cell_type"] in ["code", "markdown"]:
contents += cell["source"] + "\n"
else:
raise ValueError(f"Unknown cell type: {cell['cell_type']}")
else:
contents = f.read()
if file_path.endswith(".ipynb") or file_path.endswith(".md"):
contents = markdown.markdown(contents)
soup = BeautifulSoup(contents, "html.parser")
contents = soup.get_text()
return contents
async def upload_embedding_from_file(file_path):
contents = read_file(file_path)
if file_path.endswith(".ipynb") or file_path.endswith(".md"):
chunks = split_markdown_paragraph([contents], max_tokens=500)
else:
chunks = split_plaintext_paragraph([contents], max_tokens=500)
for chunk_id, chunk in enumerate(chunks):
await kernel.memory.save_information_async(
collection=CHROMA_COLLECTION_NAME,
text=chunk,
id=str(uuid4()),
description=os.path.relpath(file_path, DATA_DIR),
additional_metadata=json.dumps({"chunk_id": chunk_id}),
)
async def upload_embeddings_from_dir(dir_path):
failed_upload_files = []
for root, dirs, files in os.walk(dir_path):
for file in files:
if file.endswith(".py") or file.endswith(".md") or file.endswith(".ipynb"):
file_path = os.path.join(root, file)
await upload_embedding_from_file(file_path)
try:
await upload_embedding_from_file(file_path)
print("SUCCESS: ", file_path)
except Exception:
print("FAILED: ", file_path)
failed_upload_files.append(file_path)
if __name__ == "__main__":
import asyncio
asyncio.run(upload_embeddings_from_dir(SK_CODE_DIR))
asyncio.run(upload_embeddings_from_dir(SK_SAMPLE_DIR))
asyncio.run(upload_embeddings_from_dir(SK_DOC_DIR))
| [] |
2024-01-10 | joowon-dm-snu/fastcampus-chatgpt-intro-frameworks | part06~ch03_langchain~gen3~database.py | import os
from typing import List
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
CHROMA_PERSIST_DIR = os.path.join(CUR_DIR, "database", "chroma-persist")
CHROMA_COLLECTION_NAME = "fastcampus-bot"
_db = Chroma(
persist_directory=CHROMA_PERSIST_DIR,
embedding_function=OpenAIEmbeddings(),
collection_name=CHROMA_COLLECTION_NAME,
)
_retriever = _db.as_retriever()
def query_db(query: str, use_retriever: bool = False) -> List[str]:
if use_retriever:
docs = _retriever.get_relevant_documents(query)
else:
docs = _db.similarity_search(query)
str_docs = [doc.page_content for doc in docs]
return str_docs
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.